content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def load(csv, sep=';'):
"""
Load data into dataframe
:param csv:
:param sep:
:return:
"""
data = pd.read_csv(csv, sep=sep)
return data
| 12,200
|
def get_docs(request):
"""
вернуть список
[
{doc_id, doc_name},
{doc_id, doc_name},
]
"""
doc_set_id = request.GET["doc_set_id"]
docs = Document.objects.filter(doc_set_id=doc_set_id)
response_data = []
for doc in docs:
filename, file_extension = os.path.splitext(doc.get_path())
doc_name, _ = os.path.splitext(doc.doc_name)
out_name = f"{filename}.pdf"
response_data.append(
{
"doc_id": doc.id,
"doc_name": f"{doc_name}.pdf",
"path": out_name.split("/static")[1],
}
)
return HttpResponse(json.dumps(response_data), content_type="application/json")
| 12,201
|
def count_lost_norm4(matrix):
"""calculate 4th lost points: Proportion of dark modules in entire symbol:
50 + (5 + k) or 50 - (5 + k), return k * 10
Args:
matrix ([type]): [description]
Returns:
[int]: [description]
"""
dark_sum = np.sum(matrix)
modules_num = matrix.size
dark_ratio = dark_sum / modules_num
k = abs((dark_ratio * 100 - 50)) / 5
return int(k) * 10
| 12,202
|
def sumaDigits(s):
"""assumes s is a string and returns the sum of the
decimal digits in s. For example if s is 'a2b3c' it returns 5"""
suma = 0
for c in s:
try:
suma+=int(c)
except ValueError:
continue
return suma
| 12,203
|
def test_incorrectly_sized_pixel_ticks():
"""BinaryMaskCollection.from_label_array_and_ticks with 2D data and some pixel ticks provided,
albeit of the wrong cardinality."""
label_array, physical_ticks = label_array_2d()
pixel_ticks = {
Axes.X: [2, 3, 4, 5, 6, 7, 8],
}
with pytest.raises(ValueError):
BinaryMaskCollection.from_label_array_and_ticks(
label_array,
pixel_ticks,
physical_ticks,
None
)
| 12,204
|
def f5_list_policy_file_types_command(client: Client, policy_md5: str) -> CommandResults:
"""
Get a list of all policy file types.
Args:
client (Client): f5 client.
policy_md5 (str): MD5 hash of the policy.
"""
result = client.list_policy_file_types(policy_md5)
table_name = 'f5 data for listing policy file types:'
readable_output, printable_result = build_command_result(result, table_name)
command_results = CommandResults(
outputs_prefix='f5.FileType',
outputs_key_field='id',
readable_output=readable_output,
outputs=printable_result,
raw_response=result
)
return command_results
| 12,205
|
def recurrent_layer(input,
act=None,
bias_attr=None,
param_attr=None,
name=None,
reverse=False,
layer_attr=None):
"""
Simple recurrent unit layer. It is just a fully connect layer through both
time and neural network.
For each sequence [start, end] it performs the following computation\:
.. math::
out_{i} = act(in_{i}) \\ \\ \\text{for} \\ i = start \\\\
out_{i} = act(in_{i} + out_{i-1} * W) \\ \\ \\text{for} \\ start < i <= end
If reversed is true, the order is reversed\:
.. math::
out_{i} = act(in_{i}) \\ \\ \\text{for} \\ i = end \\\\
out_{i} = act(in_{i} + out_{i+1} * W) \\ \\ \\text{for} \\ start <= i < end
:param input: Input Layer
:type input: LayerOutput
:param act: activation.
:type act: BaseActivation
:param bias_attr: bias attribute.
:type bias_attr: ParameterAttribute
:param param_attr: parameter attribute.
:type param_attr: ParameterAttribute
:param name: name of the layer
:type name: basestring
:param layer_attr: Layer Attribute.
:type layer_attr: ExtraLayerAttribute
:return: LayerOutput object.
:rtype: LayerOutput
"""
Layer(
name=name,
type=LayerType.RECURRENT_LAYER,
inputs=Input(input.name, **param_attr.attr),
active_type=act.name,
bias=ParamAttr.to_bias(bias_attr),
reversed=reverse,
**ExtraAttr.to_kwargs(layer_attr))
return LayerOutput(
name=name,
layer_type=LayerType.RECURRENT_LAYER,
parents=[input],
size=input.size,
activation=act,
reverse=reverse)
| 12,206
|
def add_replicas(rse, files, issuer, ignore_availability=False):
"""
Bulk add file replicas.
:param rse: The RSE name.
:param files: The list of files.
:param issuer: The issuer account.
:param ignore_availability: Ignore the RSE blacklisting.
:returns: True is successful, False otherwise
"""
validate_schema(name='dids', obj=files)
kwargs = {'rse': rse}
if not permission.has_permission(issuer=issuer, action='add_replicas', kwargs=kwargs):
raise exception.AccessDenied('Account %s can not add file replicas on %s' % (issuer, rse))
if not permission.has_permission(issuer=issuer, action='skip_availability_check', kwargs=kwargs):
ignore_availability = False
replica.add_replicas(rse=rse, files=files, account=issuer, ignore_availability=ignore_availability)
| 12,207
|
def mean_center(X):
"""
@param X: 2-dimensional matrix of number data
@type X: numpy array
@return: Mean centered X (always has same dimensions as X)
"""
(rows, cols) = shape(X)
new_X = zeros((rows, cols), float)
_averages = average(X, 0)
for row in range(rows):
new_X[row, 0:cols] = X[row, 0:cols] - _averages[0:cols]
return new_X
| 12,208
|
def inventory_report(products, show_all=False):
"""Prints a list of ACME objects into a report."""
print("\nACME CORPORATION OFFICIAL INVENTORY REPORT")
totalPrice = 0
totalWeight = 0
totalFlammability = 0
all_product_titles = []
for _ in products:
"""Prints all the generated products"""
if (show_all == True): # Got PEP8 Error here not sure how to fix?
print("\nDescription of", _.name, "[ID:", _.identifier, "]",
"\nPRICE:\t", _.price,
"\nWEIGHT:\t", _.weight,
"\nFLAMABILITY:\t", _.flammability)
totalPrice += _.price
totalWeight += _.weight
totalFlammability += _.flammability
all_product_titles.append(_.name)
print("\nUnique product names:", len(set(all_product_titles)),
"\nAverage Price:", totalPrice/len(products),
"\nAverage Weight:", totalWeight/len(products),
"\nAverage Flammability:", totalFlammability/len(products))
| 12,209
|
def updateTransformMatrixFromArray(transformNode, narray, toWorld=False):
"""Set transformation matrix from a numpy array of size 4x4 (toParent).
:param world: if set to True then the transform will be set so that transform
to world matrix will be equal to narray; otherwise transform to parent will be
set as narray.
"""
import numpy as np
from vtk import vtkMatrix4x4
narrayshape = narray.shape
if narrayshape != (4, 4):
raise RuntimeError("Unsupported numpy array shape: " + str(narrayshape) + " expected (4,4)")
if toWorld and transformNode.GetParentTransformNode():
# thisToParent = worldToParent * thisToWorld = inv(parentToWorld) * toWorld
narrayParentToWorld = arrayFromTransformMatrix(transformNode.GetParentTransformNode())
thisToParent = np.dot(np.linalg.inv(narrayParentToWorld), narray)
updateTransformMatrixFromArray(transformNode, thisToParent, toWorld=False)
else:
vmatrix = vtkMatrix4x4()
updateVTKMatrixFromArray(vmatrix, narray)
transformNode.SetMatrixTransformToParent(vmatrix)
| 12,210
|
def csv_to_json(order_sentence_file: str, order_comment_file: str, os_filter_file: str=None) -> dict:
"""
Conversion of CSV to dictionary/JSON for sequenced PowerPlans and clinical
category
:param order_sentence_file:
:param order_comment_file:
:return:
"""
output_dict = collections.defaultdict()
details_dict = create_os_details_dict(os_file=order_sentence_file,
comp_file=order_comment_file,
os_filter_file=os_filter_file)
with open(order_comment_file, "r") as f:
reader = csv.DictReader(f)
row = next(reader)
field_names = list(row.keys())
with open(order_comment_file, 'r') as f:
reader = csv.DictReader(f, fieldnames=field_names)
next(reader)
for row in reader:
powerplan = row['POWERPLAN_DESCRIPTION']
if not powerplan:
continue
phase = row['PHASE']
powerplan_display_method = row['PLAN_DISPLAY_METHOD']
phase_display_method = row['PHASE_DISPLAY_METHOD']
dcp_clin_cat = row['DCP_CLIN_CAT']
dcp_clin_sub_cat = row['DCP_CLIN_SUB_CAT']
sequence = int(row['SEQUENCE'].strip())
bgcolor_red = row['BGCOLOR_RED']
bgcolor_green = row['BGCOLOR_GREEN']
bgcolor_blue = row['BGCOLOR_BLUE']
synonym = row['COMPONENT']
iv_synonym = row.get("IV_COMPONENT")
orderable_type_flag = int(row.get("ORDERABLE_TYPE_FLAG"))
target_duration = row['TARGET_DURATION']
start_offset = row['START_OFFSET']
link_duration_to_phase = row['LINK_DURATION_TO_PHASE']
required_ind = row['REQUIRED_IND']
include_ind = row['INCLUDE_IND']
chemo_ind = row['CHEMO_IND']
chemo_related_ind = row['CHEMO_RELATED_IND']
persistent_ind = row['PERSISTENT_IND']
linking_rule = row.get("LINKING_RULE")
linking_rule_quantity = row.get("LINKING_RULE_QUANTITY")
linking_rule_flag = row.get("LINKING_RULE_FLAG")
linking_override_reason = row.get("LINKING_OVERRIDE_REASON")
assigned_dots = row.get("ASSIGNED_DOTS")
if row['ORDER_SENTENCE_ID'] is not None:
order_sentence_id = int(float(row['ORDER_SENTENCE_ID']))
else:
order_sentence_id = 0
if row['ORDER_SENTENCE_SEQ'] is not None and row['ORDER_SENTENCE_SEQ']:
sent_seq = int(row['ORDER_SENTENCE_SEQ'].strip())
else:
sent_seq = 0
if powerplan not in output_dict:
output_dict[powerplan] = {
'display_method': powerplan_display_method,
'phases': {}
}
phase_dict = output_dict.get(powerplan).get('phases')
if not phase:
phase = powerplan
phase_display_method = powerplan_display_method
if phase not in phase_dict:
phase_dict[phase] = {
'phase_display_method': phase_display_method,
'components': []
}
comp_dict = phase_dict.get(phase).get('components')
component_idx = find_key_val_idx_in_list(
lst=comp_dict, key='sequence', value=sequence
)
if component_idx is None:
comp_dict.append({
'synonym': synonym,
'orderable_type_flag': orderable_type_flag,
'dcp_clin_cat': dcp_clin_cat,
'dcp_clin_sub_cat': dcp_clin_sub_cat,
'sequence': sequence,
'target_duration': target_duration,
'start_offset': start_offset,
'link_duration_to_phase': link_duration_to_phase,
'required_ind': required_ind,
'include_ind': include_ind,
'chemo_ind': chemo_ind,
'chemo_related_ind': chemo_related_ind,
'persistent_ind': persistent_ind,
'linking_rule': linking_rule,
'linking_rule_quantity': linking_rule_quantity,
'linking_rule_flag': linking_rule_flag,
'linking_override_reason': linking_override_reason,
'assigned_dots': assigned_dots,
'bgcolor_red': bgcolor_red,
'bgcolor_green': bgcolor_green,
'bgcolor_blue': bgcolor_blue,
'order_sentences': []
})
component_idx = -1
sent_list = comp_dict[component_idx].get('order_sentences')
# sentence_idx = find_key_val_idx_in_list(
# lst=sent_list, key='sequence', value=sent_seq
# )
order_sentence_details = details_dict.get(order_sentence_id)
if order_sentence_id > 0:
sent_list.append({
'sequence': sent_seq,
'order_sentence_id': order_sentence_id,
'order_sentence_details': order_sentence_details,
'iv_synonym': iv_synonym
})
sentence_idx = -1
# TODO: Refactor this to have a domain key and a powerplans key that
# will hold the powerplans dictionary
if 'b0783' in order_comment_file.lower():
domain = 'b0783'
elif 'p0783' in order_comment_file.lower():
domain = 'p0783'
output = dict()
output['domain'] = domain
output['powerplans'] = output_dict
return output
| 12,211
|
def process_xlsform(xls, default_name):
"""
Process XLSForm file and return the survey dictionary for the XLSForm.
"""
# FLOW Results package is a JSON file.
file_object = None
if xls.name.endswith('csv'):
# a csv file gets closed in pyxform, make a copy
xls.seek(0)
file_object = BytesIO()
file_object.write(xls.read())
file_object.seek(0)
xls.seek(0)
try:
return parse_file_to_json(xls.name, file_object=file_object or xls)
except csv.Error as e:
if is_newline_error(e):
xls.seek(0)
file_object = StringIO(
u'\n'.join(xls.read().splitlines()))
return parse_file_to_json(
xls.name, default_name=default_name, file_object=file_object)
raise e
| 12,212
|
def unflatten(X: np.ndarray, Y: np.ndarray, shape: tuple):
""" Unflattens images with shape defined by list of tuples s
X is an array (1D), unflattened to 2D
Y is an array (1D) of flattened mask (flattened 2D label) array
Not that X and Y are not compatible dimensions
s denotes dimensions of the *INPUT* image
len(s) == 3 : reshape to 2D label image
len(s) == 2 : input is flattened image, ignore.
"""
# This need to be tested.
Yout = Y.copy()
Yout[Y!=LABEL_IGNORE] = X
Yout = np.reshape(Yout,(shape[0], shape[1]))
return Yout
| 12,213
|
def collate_features(model_config: Dict[str, Any], dummy_features: List[str]) -> List[str]:
"""Saves and returns final list of simple and dummy features."""
simple_features = list(model_config.get("simple_features", {}).keys())
features = simple_features + dummy_features
logging.info(
f"Model uses {len(simple_features)} simple features and"
+ f"{len(dummy_features)} dummy features"
+ f"for {len(features)} features total"
)
output_path = Path(utils.get_model_path(model_config), "features.txt")
logging.info(f"Saving list of features to {output_path}")
with open(output_path, "w") as f:
for feature in features:
f.write(feature)
f.write("\n")
return features
| 12,214
|
def test_get_event_consumer(local, provide_loop, topic, sub, consumer_config,
exp_topic, auth_client, exp_sub, subscriber_client,
emulator, monkeypatch, event_loop, metrics):
"""Happy path to initialize an Event Consumer client."""
success_chnl, error_chnl = asyncio.Queue(), asyncio.Queue()
if local:
monkeypatch.setenv('PUBSUB_EMULATOR_HOST', True)
consumer_config['topic'], consumer_config['subscription'] = topic, sub
kwargs = {
'config': consumer_config,
'success_channel': success_chnl,
'error_channel': error_chnl,
'metrics': metrics
}
if provide_loop:
kwargs['loop'] = event_loop
client = service.get_event_consumer(**kwargs)
creds = None
if not local:
creds = auth_client.return_value.creds
subscriber_client.assert_called_once_with(credentials=creds)
sub_inst = subscriber_client.return_value
sub_inst.create_subscription.assert_called_once_with(exp_sub, exp_topic)
assert client._validator
assert client._parser
assert client.success_channel is success_chnl
assert client.error_channel is error_chnl
assert client._subscriber
assert exp_sub == client._subscription
assert ['audit-log', 'event'] == sorted(client._message_schemas)
if provide_loop:
assert event_loop is client._loop
else:
assert event_loop is not client._loop
| 12,215
|
def new_dga(*, key_mo=None, pred=None, deg_diff=None) -> Type[DgaGb]:
"""Return a dynamically created subclass of GbDga.
When key_mo=None, use revlex ordering by default."""
class_name = f"GbDga_{DgaGb._index_subclass}"
DgaGb._index_subclass += 1
if deg_diff is not None:
deg_diff = Vector(deg_diff)
else:
raise BA.MyDegreeError("degree of differential not supplied")
dct = {
"gens": {},
"rels": {},
"_rels_buffer": {},
"key_mo": key_mo,
"pred": pred or pred_always_true,
"dim_grading": None,
"deg_diff": deg_diff,
}
return type(class_name, (DgaGb,), dct)
| 12,216
|
def is_xarray(array):
"""Return True if array is a xarray.DataArray
Parameters
----------
array : array-like
Returns
-------
test : bool
"""
return isinstance(array,xr.DataArray)
| 12,217
|
def set_variable(type, value):
""" Setter for dynamic variable (a.k.a. dependency).
@param type: The type of the dynamic variable.
@type type: Str
@param value: The value to assign to the dynamic vairable.
@type type: Str
@return: None
@rtype : None
"""
global object_creations
object_creations += 1
tlb[type] = value
# thread_id = threading.current_thread().ident
# print("Setting: {} / Value: {} ({})".format(type, value, thread_id))
# Keep track of all dynamic objects ever created.
if dyn_objects_cache_lock is not None:
dyn_objects_cache_lock.acquire()
if type not in dyn_objects_cache:
dyn_objects_cache[type] = []
dyn_objects_cache[type].append(value)
if dyn_objects_cache_lock is not None:
dyn_objects_cache_lock.release()
| 12,218
|
def _check_duplicate_gnames(block_id, block_dict, extra_args):
"""
Return False if any duplicate group names exist in /etc/group file, else return True
"""
gnames = _execute_shell_command("cat /etc/group | cut -f1 -d\":\"", python_shell=True).strip()
gnames = gnames.split('\n') if gnames != "" else []
duplicate_gnames = [k for k, v in Counter(gnames).items() if v > 1]
if duplicate_gnames is None or duplicate_gnames == []:
return True
return str(duplicate_gnames)
| 12,219
|
def test_combine_same_shape(span):
"""Test _combine with same shape of cubes."""
len_data = 3
num_cubes = 5
cubes = []
for i in range(num_cubes):
cube = generate_cube_from_dates('monthly',
'360_day',
fill_val=i,
len_data=len_data)
cubes.append(cube)
result_cube = mm._combine(cubes)
dim_coord = result_cube.coord(mm.CONCAT_DIM)
assert dim_coord.var_name == mm.CONCAT_DIM
assert result_cube.shape == (num_cubes, len_data)
desired = np.linspace((0, ) * len_data,
num_cubes - 1,
num=num_cubes,
dtype=int)
np.testing.assert_equal(result_cube.data, desired)
| 12,220
|
def join_nonempty(l):
"""
Join all of the nonempty string with a plus sign.
>>> join_nonempty(('x1 + x2 + x1:x2', 'x3 + x4'))
'x1 + x2 + x1:x2 + x3 + x4'
>>> join_nonempty(('abc', '', '123', ''))
'abc + 123'
"""
return ' + '.join(s for s in l if s != '')
| 12,221
|
def accumulateProduct(src1, src2, dst, mask=None):
""" accumulateProduct(src1, src2, dst[, mask]) -> dst """
pass
| 12,222
|
def send_file_to_euler(username, local_file):
""" Send a local file to the ETHZ Euler cluster (home folder).
Parameters
----------
username (str): Username.
local_file (str): Path of local file to send.
Returns
-------
None
"""
ssh.send_file(username=username, local_file=local_file, server=server)
| 12,223
|
def correct_msa_restypes(protein):
"""Correct MSA restype to have the same order as rc."""
new_order_list = rc.MAP_HHBLITS_AATYPE_TO_OUR_AATYPE
new_order = torch.tensor(
[new_order_list] * protein["msa"].shape[1],
device=protein["msa"].device,
).transpose(0, 1)
protein["msa"] = torch.gather(new_order, 0, protein["msa"])
perm_matrix = np.zeros((22, 22), dtype=np.float32)
perm_matrix[range(len(new_order_list)), new_order_list] = 1.0
for k in protein:
if "profile" in k:
num_dim = protein[k].shape.as_list()[-1]
assert num_dim in [
20,
21,
22,
], "num_dim for %s out of expected range: %s" % (k, num_dim)
protein[k] = torch.dot(protein[k], perm_matrix[:num_dim, :num_dim])
return protein
| 12,224
|
def test_update_user_ensures_request_data_id_matches_resource_id(
client, auth, example_users
):
"""If request data contains an (optional) "id" then it has to match the resource id."""
auth.login("user1@example.com", "password1")
url_update_user = url_for("api.update_user", user_id=auth.id)
json = {
"id": auth.id,
"username": "??",
"email": "user1@example.com",
"password": "????",
}
r = client.put(url_update_user, headers=auth.headers, json=json)
assert r.status_code == 200
json = {"username": "??", "email": "user1@example.com", "password": "????"}
r = client.put(url_update_user, headers=auth.headers, json=json)
assert r.status_code == 200
json = {
"id": auth.id + 1,
"username": "??",
"email": "user1@example.com",
"password": "????",
}
r = client.put(url_update_user, headers=auth.headers, json=json)
assert r.status_code == 400
data = r.get_json()
assert "message" in data
assert data["message"] == "Request data id has to match resource id."
| 12,225
|
def results(event, update):
"""
Find all available results for a given event.
"""
server, repository = connect_gitlab()
events = gitlab.find_events(repository, milestone=config.get("olivaw", "milestone"), subset=[event], update=update, repo=False)
for event in events:
click.secho(f"{event.title}")
logger = logging.AsimovLogger(event=event.event_object)
for production in event.productions:
try:
for result, meta in production.results().items():
print(f"{production.event.name}/{production.name}/{result}, {production.results(result)}")
except:
pass
# print(production.results())
| 12,226
|
def plot_bootstrap_lr_grp(dfboot, df, grp='grp', prm='premium', clm='claim',
title_add='', force_xlim=None):
""" Plot bootstrapped loss ratio, grouped by grp """
count_txt_h_kws, mean_txt_kws, pest_mean_point_kws, mean_point_kws = _get_kws_styling()
if dfboot[grp].dtypes != 'object':
dfboot = dfboot.copy()
dfboot[grp] = dfboot[grp].map(lambda x: f's{x}')
mn = dfboot.groupby(grp)['lr'].mean().tolist()
pest_mn = df.groupby(grp).apply(lambda g: np.nan_to_num(g[clm], 0).sum() / g[prm].sum()).values
f = plt.figure(figsize=(14, 2+(len(mn)*.25))) #, constrained_layout=True)
gs = gridspec.GridSpec(1, 2, width_ratios=[11, 1], figure=f)
ax0 = f.add_subplot(gs[0])
ax1 = f.add_subplot(gs[1], sharey=ax0)
_ = sns.violinplot(x='lr', y=grp, data=dfboot, kind='violin', cut=0,
scale='count', width=0.6, palette='cubehelix_r', ax=ax0)
_ = [ax0.plot(v, i%len(mn), **mean_point_kws) for i, v in enumerate(mn)]
_ = [ax0.annotate(f'{v:.1%}', xy=(v, i%len(mn)), **mean_txt_kws) for i, v in enumerate(mn)]
_ = [ax0.plot(v, i%len(pest_mn), **pest_mean_point_kws) for i, v in enumerate(pest_mn)]
elems = [Line2D([0],[0], label='population (bootstrap)', **mean_point_kws),
Line2D([0],[0], label='sample', **pest_mean_point_kws)]
_ = ax0.legend(handles=elems, title='Mean LRs') #loc='upper right',
if force_xlim is not None:
_ = ax0.set(xlim=force_xlim)
_ = sns.countplot(y=grp, data=df, ax=ax1, palette='cubehelix_r')
ct = df.groupby(grp).size().tolist()
_ = [ax1.annotate(f'{v}', xy=(v, i%len(ct)), **count_txt_h_kws) for i, v in enumerate(ct)]
ypos = 1.01
if title_add != '':
ypos = 1.03
title_add = f'\n{title_add}'
title = (f'Grouped Loss Ratios (Population Estimates via Bootstrapping)' +
f' - grouped by {grp}')
_ = f.suptitle(f'{title}{title_add}', y=ypos)
plt.tight_layout()
return gs
| 12,227
|
def ajax_user_search(request):
"""
returns the user search result. currently this is not used since search user feature changed to form post.
"""
if request.method=='POST':
username=request.POST.get('username','')
users=User.objects.filter(username__contains=username)
try:
brand=int(request.POST['company'])
users=users.filter(userprofile__work_for=brand)
except:
pass
return render_to_response('ajax/user_search.html', {'users':users,}, mimetype='text/html')
| 12,228
|
def shortest_path(start, end):
"""
Using 2-way BFS, finds the shortest path from start_position to
end_position. Returns a list of moves.
You can use the rubik.quarter_twists move set.
Each move can be applied using rubik.perm_apply
"""
if start == (7, 8, 6, 20, 18, 19, 3, 4, 5, 16, 17, 15, 0, 1, 2, 14, 12, 13, 10, 11, 9, 21, 22, 23):
return None
return bfs((start, None), end)
| 12,229
|
def pytest_unconfigure(config: pytest.Config) -> None:
"""
Called before test process is exited.
:param config: The pytest config object.
"""
with logger.contextualize(task="teardown".rjust(10, " ")):
logger.debug("Unregistering kiru plugins")
from sel4.core.plugins.directory_manager import DirectoryManagerPlugin
# pl_names = ['sel4.core.plugins.webdriver', DirectoryManagerPlugin.name, AssertionPlugin.name]
pl_names = ["sel4.core.plugins.webdriver", DirectoryManagerPlugin.name]
for pl in pl_names:
if config.pluginmanager.has_plugin(pl):
plugin = config.pluginmanager.get_plugin(pl)
name = config.pluginmanager.get_name(plugin)
logger.debug("Unregistering plugin: " "[wheat1]{name}[/]", name=name)
config.pluginmanager.unregister(plugin, pl)
| 12,230
|
def prep_param_lists(model, flat_master=False):
"""
Creates a list of FP32 master parameters for a given model, as in
`Training Neural Networks with Mixed Precision: Real Examples`_.
Args:
model (torch.nn.Module): Existing Pytorch model
flat_master (bool, optional, default=False): Flatten the master parameters into a single tensor, as a performance optimization.
Returns:
A tuple (``model_params``, ``master_params``). ``model_params`` is a list of the model's parameters for later use with :func:`model_grads_to_master_grads` and :func:`master_params_to_model_params`. ``master_params`` is a list of FP32 master gradients. If ``flat_master=True``, ``master_params`` will be a list with one element.
Example::
model_params, master_params = prep_param_lists(model)
.. warning::
Currently, if ``flat_master=True``, all the model's parameters must be the same type. If the model has parameters of different types, use ``flat_master=False``, or use :class:`FP16_Optimizer`.
.. _`Training Neural Networks with Mixed Precision: Real Examples`:
http://on-demand.gputechconf.com/gtc/2018/video/S81012/
"""
model_params = [param for param in model.parameters() if param.requires_grad]
if flat_master:
# Give the user some more useful error messages
try:
# flatten_dense_tensors returns a contiguous flat array.
# http://pytorch.org/docs/master/_modules/torch/_utils.html
master_params = _flatten_dense_tensors(
[param.data for param in model_params]
).float()
except BaseException:
print(
"Error in prep_param_lists: model may contain a mixture of parameters "
"of different types. Use flat_master=False, or use F16_Optimizer."
)
raise
master_params = torch.nn.Parameter(master_params)
master_params.requires_grad = True
# master_params.register_hook(backwards_debug_hook)
if master_params.grad is None:
master_params.grad = master_params.new(*master_params.size())
return model_params, [master_params]
else:
master_params = [param.clone().float().detach() for param in model_params]
for param in master_params:
param.requires_grad = True
return model_params, master_params
| 12,231
|
def to_matrix_vector(transform):
"""
Code from nilearn module, available at: https://github.com/nilearn/nilearn/blob/master/nilearn/image/resampling.py
Split an homogeneous transform into its matrix and vector components.
The transformation must be represented in homogeneous coordinates.
It is split into its linear transformation matrix and translation vector
components.
This function does not normalize the matrix. This means that for it to be
the inverse of from_matrix_vector, transform[-1, -1] must equal 1, and
transform[-1, :-1] must equal 0.
Parameters
----------
transform: numpy.ndarray
Homogeneous transform matrix. Example: a (4, 4) transform representing
linear transformation and translation in 3 dimensions.
Returns
-------
matrix, vector: numpy.ndarray
The matrix and vector components of the transform matrix. For
an (N, N) transform, matrix will be (N-1, N-1) and vector will be
a 1D array of shape (N-1,).
See Also
--------
from_matrix_vector
"""
ndimin = transform.shape[0] - 1
ndimout = transform.shape[1] - 1
matrix = transform[0:ndimin, 0:ndimout]
vector = transform[0:ndimin, ndimout]
return matrix, vector
| 12,232
|
def game(var, wrapper, message):
"""Vote for a game mode to be picked."""
if message:
vote_gamemode(var, wrapper, message.lower().split()[0], doreply=True)
else:
wrapper.pm(messages["no_mode_specified"].format(_get_gamemodes(var)))
| 12,233
|
def sieveEr(N):
"""
input: positive integer 'N' > 2
returns a list of prime numbers from 2 up to N.
This function implements the algorithm called
sieve of erathostenes.
"""
# precondition
assert isinstance(N,int) and (N > 2), "'N' must been an int and > 2"
# beginList: conatins all natural numbers from 2 upt to N
beginList = [x for x in range(2,N+1)]
ans = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(beginList)):
for j in range(i+1,len(beginList)):
if (beginList[i] != 0) and \
(beginList[j] % beginList[i] == 0):
beginList[j] = 0
# filters actual prime numbers.
ans = [x for x in beginList if x != 0]
# precondition
assert isinstance(ans,list), "'ans' must been from type list"
return ans
| 12,234
|
def validate_fields(item, fields=None):
"""
Check that all requested fields were returned
:param item: comment or submission
:param fields: list[str]
:return: list[str]
"""
actual_fields = item.d_.keys()
if fields is None:
requested_fields = actual_fields
else:
requested_fields = fields
missing_fields = set(requested_fields).difference(actual_fields)
# drop extra fields returned from api
final_fields = set(requested_fields).intersection(actual_fields)
return final_fields, missing_fields
| 12,235
|
def chunks(l, n, cnt):
"""Yield successive n-sized chunks from l first cnt elements."""
for i in range(0, cnt, n):
if i + n > cnt:
yield l[i:cnt]
else:
yield l[i:i + n]
| 12,236
|
def _parallel_build_trees(tree, forest, X, y, sample_weight, tree_idx, n_trees,
verbose=0, class_weight=None, target_imbalance_ratio=None):
"""Private function used to fit a single tree in parallel."""
if verbose > 1:
print("building tree %d of %d" % (tree_idx + 1, n_trees))
if forest.bootstrap:
n_samples = X.shape[0]
if sample_weight is None:
curr_sample_weight = np.ones((n_samples,), dtype=np.float64)
else:
curr_sample_weight = sample_weight.copy()
indices = _generate_sample_indices(tree.random_state, y,
target_imbalance_ratio, verbose)
sample_counts = bincount(indices, minlength=n_samples)
curr_sample_weight *= sample_counts
if class_weight == 'subsample':
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
curr_sample_weight *= compute_sample_weight('auto', y, indices)
elif class_weight == 'balanced_subsample':
curr_sample_weight *= compute_sample_weight('balanced', y, indices)
tree.fit(X, y, sample_weight=curr_sample_weight, check_input=False)
else:
tree.fit(X, y, sample_weight=sample_weight, check_input=False)
return tree
| 12,237
|
def test_get_backend_invalid_custom_name():
"""Test that loading backend fails with specific error if name is invalid
"""
with pytest.raises(ValueError):
storage.get_storage('ckanext.asset_storage.storage.local.LocalStorage',
{'storage_path': '/tmp'})
with pytest.raises(ValueError):
storage.get_storage('ckanext.asset_storage.storage:local:LocalStorage',
{'storage_path': '/tmp'})
| 12,238
|
def remove_nones(sequence: Iterable) -> list:
"""Removes elements where bool(x) evaluates to False.
Examples
--------
Normal usage::
remove_nones(['m', '', 'l', 0, 42, False, True])
# ['m', 'l', 42, True]
"""
# Note this is redundant with it.chain
return [x for x in sequence if x]
| 12,239
|
def writeSConscript(dirpath, profile, pkeys):
""" Create a SConscript file in dirpath.
"""
# Activate modules
mods, defines = collectModules(dirpath, pkeys)
if validKey('CONFIG', pkeys) and isComplicated(pkeys['CONFIG'][0]):
return False
qrcname = ""
if not validKey('SOURCES', pkeys):
# No SOURCES specified, try to find CPP files
slist = glob.glob(os.path.join(dirpath,'*.cpp'))
if len(slist) == 0:
# Nothing to build here
return False
else:
# Scan for Q_INIT_RESOURCE
for s in slist:
qrcname = findQResourceName(s)
if qrcname:
break
allmods = True
for m in mods:
if m not in pkeys['qtmodules']:
print(" no module %s" % m)
allmods = False
if not allmods:
return False
sc = open(os.path.join(dirpath,'SConscript'),'w')
sc.write("""Import('qtEnv')
env = qtEnv.Clone()
""")
if len(mods):
sc.write('env.EnableQt5Modules([\n')
for m in mods[:-1]:
sc.write("'%s',\n" % m)
sc.write("'%s'\n" % mods[-1])
sc.write('])\n\n')
# Add CPPDEFINEs
if len(defines):
sc.write('env.AppendUnique(CPPDEFINES=[\n')
for d in defines[:-1]:
sc.write("'%s',\n" % d)
sc.write("'%s'\n" % defines[-1])
sc.write('])\n\n')
# Add LIBS
if validKey('LIBS', pkeys):
sc.write('env.AppendUnique(LIBS=[\n')
for d in pkeys['LIBS'][:-1]:
sc.write("'%s',\n" % d)
sc.write("'%s'\n" % pkeys['LIBS'][-1])
sc.write('])\n\n')
# Collect INCLUDEPATHs
incpaths = []
if validKey('INCLUDEPATH', pkeys):
incpaths = pkeys['INCLUDEPATH']
if validKey('FORMS', pkeys):
for s in pkeys['FORMS']:
head, tail = os.path.split(s)
if head and head not in incpaths:
incpaths.append(head)
if incpaths:
sc.write('env.Append(CPPPATH=[\n')
for d in incpaths[:-1]:
sc.write("'%s',\n" % relOrAbsPath(dirpath, d))
sc.write("'%s'\n" % relOrAbsPath(dirpath, incpaths[-1]))
sc.write('])\n\n')
# Add special environment flags
if len(qtenv_flags):
for key, value in list(qtenv_flags.items()):
sc.write("env['%s']=%s\n" % (key, value))
# Write source files
if validKey('SOURCES', pkeys):
sc.write('source_files = [\n')
for s in pkeys['SOURCES'][:-1]:
sc.write("'%s',\n" % relOrAbsPath(dirpath, s))
if not qrcname:
qrcname = findQResourceName(os.path.join(dirpath,s))
sc.write("'%s'\n" % relOrAbsPath(dirpath, pkeys['SOURCES'][-1]))
if not qrcname:
qrcname = findQResourceName(os.path.join(dirpath,pkeys['SOURCES'][-1]))
sc.write(']\n\n')
# Write .ui files
if validKey('FORMS', pkeys):
sc.write('ui_files = [\n')
for s in pkeys['FORMS'][:-1]:
sc.write("'%s',\n" % relOrAbsPath(dirpath, s))
sc.write("'%s'\n" % relOrAbsPath(dirpath, pkeys['FORMS'][-1]))
sc.write(']\n')
sc.write('env.Uic5(ui_files)\n\n')
# Write .qrc files
if validKey('RESOURCES', pkeys):
qrc_name = pkeys['RESOURCES'][0]
if qrcname:
if qrc_name.endswith('.qrc'):
qrc_name = qrc_name[:-4]
sc.write("qrc_out = env.Qrc5('%s')\nsource_files.append(qrc_out)\nenv['QT5_QRCFLAGS'] = ['-name', '%s']\n" % (qrc_name, qrcname))
else:
if not qrc_name.endswith('.qrc'):
qrc_name += '.qrc'
sc.write("source_files.append('%s')\n" % qrc_name)
# Select module
type = 'Program'
if validKey('TEMPLATE', pkeys):
if pkeys['TEMPLATE'][0] == 'lib':
type = 'StaticLibrary'
if pkeys['TEMPLATE'][0] == 'dll':
type = 'SharedLibrary'
# TARGET may be wrapped by qtLibraryTarget function...
target = profile
if validKey('TARGET', pkeys):
t = pkeys['TARGET'][0]
m = qtlib_re.search(t)
if m:
t = "Qt" + m.group(1)
target = t.replace("$$TARGET", profile)
# Create program/lib/dll
else:
if validKey('SOURCES', pkeys):
sc.write("env.%s('%s', source_files)\n\n" % (type, target))
else:
sc.write("env.%s('%s', Glob('*.cpp'))\n\n" % (type, target))
sc.close()
return True
| 12,240
|
def retrieve(passed: List[str]) -> List[str]:
"""
Retrieves all items that are able to be
converted, recursively, from the passed list.
Parameters
----------
passed: List[str]
The items to search.
Returns
-------
List[str]:
All found items.
"""
ret = []
for item in passed:
try:
path = pathlib.Path(item)
if path.is_file() and path.suffix == ".txt":
ret += retrieve(path.read_text().split("\n"))
elif path.is_file():
ret.append(str(path))
elif path.is_dir():
ret += retrieve([str(p) for p in path.iterdir()])
else:
ret.append(item)
except OSError:
ret.append(item)
return ret
| 12,241
|
def test_response_parsing():
"""
Should have properly formed payload if working.
"""
eoo = EdxOrgOAuth2(strategy=load_strategy())
result = eoo.get_user_details(
{
"id": 5,
"username": "darth",
"email": "darth@deathst.ar",
"name": "Darth Vader",
}
)
assert {
"edx_id": "darth",
"username": "darth",
"fullname": "Darth Vader",
"email": "darth@deathst.ar",
"first_name": "",
"last_name": "",
} == result
| 12,242
|
def get_out_of_sample_best_point_acqf(
model: Model,
Xs: List[Tensor],
X_observed: Tensor,
objective_weights: Tensor,
mc_samples: int = 512,
fixed_features: Optional[Dict[int, float]] = None,
fidelity_features: Optional[List[int]] = None,
target_fidelities: Optional[Dict[int, float]] = None,
outcome_constraints: Optional[Tuple[Tensor, Tensor]] = None,
seed_inner: Optional[int] = None,
qmc: bool = True,
**kwargs: Any,
) -> Tuple[AcquisitionFunction, Optional[List[int]]]:
"""Picks an appropriate acquisition function to find the best
out-of-sample (predicted by the given surrogate model) point
and instantiates it.
NOTE: Typically the appropriate function is the posterior mean,
but can differ to account for fidelities etc.
"""
model = model
# subset model only to the outcomes we need for the optimization
if kwargs.get(Keys.SUBSET_MODEL, True):
subset_model_results = subset_model(
model=model,
objective_weights=objective_weights,
outcome_constraints=outcome_constraints,
)
model = subset_model_results.model
objective_weights = subset_model_results.objective_weights
outcome_constraints = subset_model_results.outcome_constraints
fixed_features = fixed_features or {}
target_fidelities = target_fidelities or {}
if fidelity_features:
# we need to optimize at the target fidelities
if any(f in fidelity_features for f in fixed_features):
raise RuntimeError("Fixed features cannot also be fidelity features.")
elif set(fidelity_features) != set(target_fidelities):
raise RuntimeError(
"Must provide a target fidelity for every fidelity feature."
)
# make sure to not modify fixed_features in-place
fixed_features = {**fixed_features, **target_fidelities}
elif target_fidelities:
raise RuntimeError(
"Must specify fidelity_features in fit() when using target fidelities."
)
acqf_class, acqf_options = pick_best_out_of_sample_point_acqf_class(
outcome_constraints=outcome_constraints,
mc_samples=mc_samples,
qmc=qmc,
seed_inner=seed_inner,
)
objective, posterior_transform = get_botorch_objective_and_transform(
model=model,
objective_weights=objective_weights,
outcome_constraints=outcome_constraints,
X_observed=X_observed,
)
if objective is not None:
if not isinstance(objective, MCAcquisitionObjective):
raise UnsupportedError(
f"Unknown objective type: {objective.__class__}" # pragma: nocover
)
acqf_options = {"objective": objective, **acqf_options}
if posterior_transform is not None:
acqf_options = {"posterior_transform": posterior_transform, **acqf_options}
acqf = acqf_class(model=model, **acqf_options) # pyre-ignore [45]
if fixed_features:
acqf = FixedFeatureAcquisitionFunction(
acq_function=acqf,
d=X_observed.size(-1),
columns=list(fixed_features.keys()),
values=list(fixed_features.values()),
)
non_fixed_idcs = [i for i in range(Xs[0].size(-1)) if i not in fixed_features]
else:
non_fixed_idcs = None
return acqf, non_fixed_idcs
| 12,243
|
def removeBots(gdf, bot_list):
"""
A Function for removing Twitter bots.
Parameters
----------
gdf: <gpd.GeoDataFrame>
A GeoDataFrame from which Twitter bots should be removed.
bot_list: <list>
Input either 'home_unique_days' or 'home_unique_weeks'
Output
------
<gpd.GeoDataFrame>
A processed GeoDataFrame. Likely bots removed.
"""
copy = gdf
for index, row in gdf.iterrows():
userid = str(row['user']['id'])
for item in bot_list:
bot_id = item['userid']
if bot_id == userid:
gdf = gdf.drop(index)
print("A bot dropped: ID", userid, ". Length of GDF now: ", len(gdf))
print("Processing: ", index, "/", len(copy))
return(gdf)
| 12,244
|
def getinput(prompt):
""">> getinput <prompt>
Get input, store it in '__input__'.
"""
local_dict = get_twill_glocals()[1]
inp = input(prompt)
local_dict['__input__'] = inp
return inp
| 12,245
|
def plot_qualitative_with_kde(
named_trainer,
dataset,
named_trainer_compare=None,
n_images=8,
percentiles=None, # if None uses uniform linspace from n_images
figsize=DFLT_FIGSIZE,
title=None,
seed=123,
height_ratios=[1, 3],
font_size=12,
h_pad=-3,
x_lim={},
is_smallest_xrange=False,
kdeplot_kwargs={},
n_samples=1,
upscale_factor=1,
**kwargs,
):
"""
Plot qualitative samples using `plot_posterior_samples` but select the samples and mask to plot
given the score at test time.
Parameters
----------
named_trainer : list [name, NeuralNet]
Trainer (model outputted of training) and the name under which it should be displayed.
dataset :
named_trainer_compare : list [name, NeuralNet], optional
Like `named_trainer` but for a model against which to compare.
n_images : int, optional
Number of images to plot (at uniform interval of log like). Only used if `percentiles` is None.
percentiles : list of float, optional
Percentiles of log likelihood of the main model for which to select an image. The length
of the list will correspond to the number fo images.
figsize : tuple, optional
title : str, optional
seed : int, optional
height_ratios : int iterable of length = nrows, optional
Height ratios of the rows.
font_size : int, optional
h_pad : int, optional
Padding between kde plot and images
x_lim : dict, optional
Dictionary containing one (or both) of "left", "right" correspomding to the x limit of kde plot.
is_smallest_xrange : bool, optional
Whether to rescale the x axis based on the range of percentils.
kdeplot_kwargs : dict, optional
Additional arguments to `sns.kdeplot`
upscale_factor : float, optional
Whether to upscale the image => extrapolation. Only if not uniform grid.
kwargs
!VERY DIRTY
"""
kwargs["n_samples"] = n_samples
kwargs["is_plot_std"] = False
kwargs["is_add_annot"] = False
if percentiles is not None:
n_images = len(percentiles)
plt.rcParams.update({"font.size": font_size})
fig, axes = plt.subplots(
2, 1, figsize=figsize, gridspec_kw={"height_ratios": height_ratios}
)
# a dictionary that has "upscale_factor" which is needed for downscaling when plotting
# only is not grided
CntxtTrgtDictUpscale = partial(CntxtTrgtDict, upscale_factor=upscale_factor)
def _plot_kde_loglike(name, trainer):
chckpnt_dirname = dict(trainer.callbacks_)["Checkpoint"].dirname
test_eval_file = os.path.join(chckpnt_dirname, EVAL_FILENAME)
test_loglike = np.loadtxt(test_eval_file, delimiter=",")
sns.kdeplot(
test_loglike, ax=axes[0], shade=True, label=name, cut=0, **kdeplot_kwargs
)
sns.despine()
return test_loglike
def _grid_to_points(selected_data):
cntxt_trgt_getter = GridCntxtTrgtGetter(upscale_factor=upscale_factor)
for i in range(n_images):
X = selected_data["Y_cntxt"][i]
X_cntxt, Y_cntxt = cntxt_trgt_getter.select(
X, None, selected_data["X_cntxt"][i]
)
X_trgt, Y_trgt = cntxt_trgt_getter.select(
X, None, selected_data["X_trgt"][i]
)
yield CntxtTrgtDictUpscale(
X_cntxt=X_cntxt, Y_cntxt=Y_cntxt, X_trgt=X_trgt, Y_trgt=Y_trgt
)
def _plot_posterior_img_selected(name, trainer, selected_data, is_grided_trainer):
is_uniform_grid = isinstance(trainer.module_, GridConvCNP)
kwargs["img_indcs"] = []
kwargs["is_uniform_grid"] = is_uniform_grid
kwargs["is_return"] = True
if not is_uniform_grid:
if is_grided_trainer:
grids = [
plot_posterior_samples(
dataset, data, trainer.module_.cpu(), **kwargs
)
for i, data in enumerate(_grid_to_points(selected_data))
]
else:
grids = [
plot_posterior_samples(
dataset,
CntxtTrgtDictUpscale(
**{k: v[i] for k, v in selected_data.items()}
),
trainer.module_.cpu(),
**kwargs,
)
for i in range(n_images)
]
# images are padded by 2 pixels inbetween each but here you concatenate => will pad twice
# => remove all the rleft padding for each besides first
grids = [g[..., 2:] if i != 0 else g for i, g in enumerate(grids)]
return torch.cat(grids, axis=-1)
elif is_uniform_grid:
if not is_grided_trainer:
grids = []
for i in range(n_images):
_, X_cntxt = points_to_grid(
selected_data["X_cntxt"][i],
selected_data["Y_cntxt"][i],
dataset.shape[1:],
background=torch.tensor([0.0] * dataset.shape[0]),
)
Y_trgt, X_trgt = points_to_grid(
selected_data["X_trgt"][i],
selected_data["Y_trgt"][i],
dataset.shape[1:],
background=torch.tensor([0.0] * dataset.shape[0]),
)
grids.append(
plot_posterior_samples(
dataset,
dict(
X_cntxt=X_cntxt,
Y_cntxt=Y_trgt, # Y_trgt is all X because no masking for target (assumption)
X_trgt=X_trgt,
Y_trgt=Y_trgt,
),
trainer.module_.cpu(),
**kwargs,
)
)
grids = [g[..., 2:] if i != 0 else g for i, g in enumerate(grids)]
return torch.cat(grids, axis=-1)
else:
return plot_posterior_samples(
dataset,
{k: torch.cat(v, dim=0) for k, v in selected_data.items()},
trainer.module_.cpu(),
**kwargs,
)
name, trainer = named_trainer
test_loglike = _plot_kde_loglike(name, trainer)
if named_trainer_compare is not None:
left = axes[0].get_xlim()[0]
_ = _plot_kde_loglike(*named_trainer_compare)
axes[0].set_xlim(left=left) # left bound by first model to not look strange
if len(x_lim) != 0:
axes[0].set_xlim(**x_lim)
if percentiles is not None:
idcs = []
values = []
for i, p in enumerate(percentiles):
# value closest to percentile
percentile_val = np.percentile(test_loglike, p, interpolation="nearest")
idcs.append(np.argwhere(test_loglike == percentile_val).item())
values.append(percentile_val)
sorted_idcs = list(np.sort(idcs))[::-1]
if is_smallest_xrange:
axes[0].set_xlim(left=values[0] - 0.05, right=values[-1] + 0.05)
else:
# find indices such that same space between all
values = np.linspace(test_loglike.min(), test_loglike.max(), n_images)
idcs = [(np.abs(test_loglike - v)).argmin() for v in values]
sorted_idcs = list(np.sort(idcs))[::-1]
axes[0].set_ylabel("Density")
axes[0].set_xlabel("Test Log-Likelihood")
selected_data = []
set_seed(seed) # make sure same order and indices for cntxt and trgt
i = -1
saved_values = []
queue = sorted_idcs.copy()
next_idx = queue.pop()
for data in trainer.get_iterator(dataset, training=False):
Xi, yi = unpack_data(data)
for cur_idx in range(yi.size(0)):
i += 1
if next_idx != i:
continue
selected_data.append(
{k: v[cur_idx : cur_idx + 1, ...] for k, v in Xi.items()}
)
if len(queue) == 0:
break
else:
next_idx = queue.pop()
# puts back to non sorted array
selected_data = [selected_data[sorted_idcs[::-1].index(idx)] for idx in idcs]
selected_data = {k: v for k, v in tuple_cont_to_cont_tuple(selected_data).items()}
for v in values:
axes[0].axvline(v, linestyle=":", alpha=0.7, c="tab:green")
axes[0].legend(loc="upper left")
if title is not None:
axes[0].set_title(title, fontsize=18)
is_grided_trainer = isinstance(trainer.module_, GridConvCNP)
grid = _plot_posterior_img_selected(name, trainer, selected_data, is_grided_trainer)
middle_img = dataset.shape[1] // 2 + 1 # half height
y_ticks = [middle_img, middle_img * 3]
y_ticks_labels = ["Context", name]
if named_trainer_compare is not None:
grid_compare = _plot_posterior_img_selected(
*named_trainer_compare, selected_data, is_grided_trainer
)
grid = torch.cat(
(grid, grid_compare[:, grid_compare.size(1) // (n_samples + 1) + 1 :, :]),
dim=1,
)
y_ticks += [middle_img * (3 + 2 * n_samples)]
y_ticks_labels += [named_trainer_compare[0]]
axes[1].imshow(grid.permute(1, 2, 0).numpy())
axes[1].yaxis.set_major_locator(ticker.FixedLocator(y_ticks))
axes[1].set_yticklabels(y_ticks_labels, rotation="vertical", va="center")
remove_axis(axes[1])
if percentiles is not None:
axes[1].xaxis.set_major_locator(
ticker.FixedLocator(
[
(dataset.shape[2] // 2 + 1) * (i * 2 + 1)
for i, p in enumerate(percentiles)
]
)
)
axes[1].set_xticklabels(["{}%".format(p) for p in percentiles])
else:
axes[1].set_xticks([])
fig.tight_layout(h_pad=h_pad)
| 12,246
|
def test_df_multiple_aggfuncs():
"""Test output when ``aggfunc`` is more than one."""
df_frame = pd.DataFrame(
[
{"A": "foo", "B": "one", "C": "small", "D": 1, "E": 2},
{"A": "foo", "B": "one", "C": "large", "D": 2, "E": 4},
{"A": "foo", "B": "one", "C": "large", "D": 2, "E": 5},
{"A": "foo", "B": "one", "C": "small", "D": 3, "E": 5},
{"A": "foo", "B": "one", "C": "small", "D": 3, "E": 6},
{"A": "bar", "B": "one", "C": "large", "D": 4, "E": 6},
{"A": "bar", "B": "one", "C": "small", "D": 5, "E": 8},
{"A": "bar", "B": "one", "C": "small", "D": 6, "E": 9},
{"A": "bar", "B": "one", "C": "large", "D": 7, "E": 9},
]
)
expected = pd.DataFrame(
{
"A": ["bar", "bar", "foo", "foo"],
"C": ["large", "small", "large", "small"],
"one_D_mean": [5.5, 5.5, 2.0, 2.3333333333333335],
"one_D_sum": [11, 11, 4, 7],
"one_E_mean": [7.5, 8.5, 4.5, 4.333333333333333],
"one_E_sum": [15, 17, 9, 13],
}
)
result = df_frame.pivot_wider(
index=["A", "C"],
names_from=["B"],
values_from=["D", "E"],
aggfunc=["mean", "sum"],
flatten_levels=True,
)
assert_frame_equal(result, expected)
| 12,247
|
def get_git_doc_ref():
"""Return the revision used for linking to source code on GitHub."""
global _head_ref
if not _head_ref:
try:
branch = git_get_nearest_tracking_branch('.')
_head_ref = _run_git(['rev-parse', branch]).strip()
except subprocess.CalledProcessError:
_head_ref = None
return _head_ref
| 12,248
|
def test_rename_columns_bad_column_name():
"""Test column renamin with bad column name."""
cars = {
"Brand": ["Chevrolet Bel Air", "Lotus Esprit"],
"Price": [49995, 59950],
"Year": [1957, 1977],
"Sign": ["Rooster", "Snake"],
}
original_list = ["Brand", "Price", "Year", "fake_column"]
df = pd.DataFrame(cars, columns=["Brand", "Price", "Year", "Sign"])
renamed_list = ["Marque", "Cost", "Zodiac", "bogus_column"]
df1 = CleanDF.rename_columns(df, original_list, renamed_list)
assert df1.columns[0] is renamed_list[0]
assert df1.columns[1] is renamed_list[1]
assert df1.columns[2] is renamed_list[2]
assert df1.columns[3] is not renamed_list[3]
| 12,249
|
def check_call_demo():
"""
执行命令,返回结果和状态,正常为0,执行错误则抛出异常
"""
ret = subprocess.check_call(["lm", "l"])
print(ret)
| 12,250
|
def update_trails(force=False, offline=False):
"""
Update trails from feeds
"""
success = False
trails = {}
duplicates = {}
try:
if not os.path.isdir(USERS_DIR):
os.makedirs(USERS_DIR, 0755)
except Exception, ex:
exit("[!] something went wrong during creation of directory '%s' ('%s')" % (USERS_DIR, ex))
_chown(USERS_DIR)
if config.UPDATE_SERVER:
print "[i] retrieving trails from provided 'UPDATE_SERVER' server..."
content = retrieve_content(config.UPDATE_SERVER)
if not content or content.count(',') < 2:
print "[x] unable to retrieve data from '%s'" % config.UPDATE_SERVER
else:
with _fopen(TRAILS_FILE, "w+b") as f:
f.write(content)
trails = load_trails()
else:
trail_files = set()
for dirpath, dirnames, filenames in os.walk(os.path.abspath(os.path.join(ROOT_DIR, "trails"))) :
for filename in filenames:
trail_files.add(os.path.abspath(os.path.join(dirpath, filename)))
if config.CUSTOM_TRAILS_DIR:
for dirpath, dirnames, filenames in os.walk(os.path.abspath(os.path.join(ROOT_DIR, os.path.expanduser(config.CUSTOM_TRAILS_DIR)))) :
for filename in filenames:
trail_files.add(os.path.abspath(os.path.join(dirpath, filename)))
if not trails and (force or not os.path.isfile(TRAILS_FILE) or (time.time() - os.stat(TRAILS_FILE).st_mtime) >= config.UPDATE_PERIOD or os.stat(TRAILS_FILE).st_size == 0 or any(os.stat(_).st_mtime > os.stat(TRAILS_FILE).st_mtime for _ in trail_files)):
if not config.no_updates:
print "[i] updating trails (this might take a while)..."
else:
print "[i] checking trails..."
if not offline and (force or config.USE_FEED_UPDATES):
_ = os.path.abspath(os.path.join(ROOT_DIR, "trails", "feeds"))
if _ not in sys.path:
sys.path.append(_)
filenames = sorted(glob.glob(os.path.join(_, "*.py")))
else:
filenames = []
_ = os.path.abspath(os.path.join(ROOT_DIR, "trails"))
if _ not in sys.path:
sys.path.append(_)
filenames += [os.path.join(_, "static")]
filenames += [os.path.join(_, "custom")]
filenames = [_ for _ in filenames if "__init__.py" not in _]
if config.DISABLED_FEEDS:
filenames = [filename for filename in filenames if os.path.splitext(os.path.split(filename)[-1])[0] not in re.split(r"[^\w]+", config.DISABLED_FEEDS)]
for i in xrange(len(filenames)):
filename = filenames[i]
try:
module = __import__(os.path.basename(filename).split(".py")[0])
except (ImportError, SyntaxError), ex:
print "[x] something went wrong during import of feed file '%s' ('%s')" % (filename, ex)
continue
for name, function in inspect.getmembers(module, inspect.isfunction):
if name == "fetch":
print(" [o] '%s'%s" % (module.__url__, " " * 20 if len(module.__url__) < 20 else ""))
sys.stdout.write("[?] progress: %d/%d (%d%%)\r" % (i, len(filenames), i * 100 / len(filenames)))
sys.stdout.flush()
if config.DISABLED_TRAILS_INFO_REGEX and re.search(config.DISABLED_TRAILS_INFO_REGEX, getattr(module, "__info__", "")):
continue
try:
results = function()
for item in results.items():
if item[0].startswith("www.") and '/' not in item[0]:
item = [item[0][len("www."):], item[1]]
if item[0] in trails:
if item[0] not in duplicates:
duplicates[item[0]] = set((trails[item[0]][1],))
duplicates[item[0]].add(item[1][1])
if not (item[0] in trails and (any(_ in item[1][0] for _ in LOW_PRIORITY_INFO_KEYWORDS) or trails[item[0]][1] in HIGH_PRIORITY_REFERENCES)) or (item[1][1] in HIGH_PRIORITY_REFERENCES and "history" not in item[1][0]) or any(_ in item[1][0] for _ in HIGH_PRIORITY_INFO_KEYWORDS):
trails[item[0]] = item[1]
if not results and "abuse.ch" not in module.__url__:
print "[x] something went wrong during remote data retrieval ('%s')" % module.__url__
except Exception, ex:
print "[x] something went wrong during processing of feed file '%s' ('%s')" % (filename, ex)
try:
sys.modules.pop(module.__name__)
del module
except Exception:
pass
# custom trails from remote location
if config.CUSTOM_TRAILS_URL:
print(" [o] '(remote custom)'%s" % (" " * 20))
for url in re.split(r"[;,]", config.CUSTOM_TRAILS_URL):
url = url.strip()
if not url:
continue
url = ("http://%s" % url) if not "//" in url else url
content = retrieve_content(url)
if not content:
print "[x] unable to retrieve data (or empty response) from '%s'" % url
else:
__info__ = "blacklisted"
__reference__ = "(remote custom)" # urlparse.urlsplit(url).netloc
for line in content.split('\n'):
line = line.strip()
if not line or line.startswith('#'):
continue
line = re.sub(r"\s*#.*", "", line)
if '://' in line:
line = re.search(r"://(.*)", line).group(1)
line = line.rstrip('/')
if line in trails and any(_ in trails[line][1] for _ in ("custom", "static")):
continue
if '/' in line:
trails[line] = (__info__, __reference__)
line = line.split('/')[0]
elif re.search(r"\A\d+\.\d+\.\d+\.\d+\Z", line):
trails[line] = (__info__, __reference__)
else:
trails[line.strip('.')] = (__info__, __reference__)
for match in re.finditer(r"(\d+\.\d+\.\d+\.\d+)/(\d+)", content):
prefix, mask = match.groups()
mask = int(mask)
if mask > 32:
continue
start_int = addr_to_int(prefix) & make_mask(mask)
end_int = start_int | ((1 << 32 - mask) - 1)
if 0 <= end_int - start_int <= 1024:
address = start_int
while start_int <= address <= end_int:
trails[int_to_addr(address)] = (__info__, __reference__)
address += 1
# basic cleanup
for key in trails.keys():
if key not in trails:
continue
if config.DISABLED_TRAILS_INFO_REGEX:
if re.search(config.DISABLED_TRAILS_INFO_REGEX, trails[key][0]):
del trails[key]
continue
try:
_key = key.decode("utf8").encode("idna")
if _key != key: # for domains with non-ASCII letters (e.g. phishing)
trails[_key] = trails[key]
del trails[key]
key = _key
except:
pass
if not key or re.search(r"\A(?i)\.?[a-z]+\Z", key) and not any(_ in trails[key][1] for _ in ("custom", "static")):
del trails[key]
continue
if re.search(r"\A\d+\.\d+\.\d+\.\d+\Z", key):
if any(_ in trails[key][0] for _ in ("parking site", "sinkhole")) and key in duplicates:
del duplicates[key]
if trails[key][0] == "malware":
trails[key] = ("potential malware site", trails[key][1])
if trails[key][0] == "ransomware":
trails[key] = ("ransomware (malware)", trails[key][1])
if key.startswith("www.") and '/' not in key:
_ = trails[key]
del trails[key]
key = key[len("www."):]
if key:
trails[key] = _
if '?' in key:
_ = trails[key]
del trails[key]
key = key.split('?')[0]
if key:
trails[key] = _
if '//' in key:
_ = trails[key]
del trails[key]
key = key.replace('//', '/')
trails[key] = _
if key != key.lower():
_ = trails[key]
del trails[key]
key = key.lower()
trails[key] = _
if key in duplicates:
_ = trails[key]
others = sorted(duplicates[key] - set((_[1],)))
if others and " (+" not in _[1]:
trails[key] = (_[0], "%s (+%s)" % (_[1], ','.join(others)))
read_whitelist()
for key in trails.keys():
if check_whitelisted(key) or any(key.startswith(_) for _ in BAD_TRAIL_PREFIXES):
del trails[key]
elif re.search(r"\A\d+\.\d+\.\d+\.\d+\Z", key) and (bogon_ip(key) or cdn_ip(key)):
del trails[key]
else:
try:
key.decode("utf8")
trails[key][0].decode("utf8")
trails[key][1].decode("utf8")
except UnicodeDecodeError:
del trails[key]
try:
if trails:
with _fopen(TRAILS_FILE, "w+b") as f:
writer = csv.writer(f, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)
for trail in trails:
writer.writerow((trail, trails[trail][0], trails[trail][1]))
success = True
except Exception, ex:
print "[x] something went wrong during trails file write '%s' ('%s')" % (TRAILS_FILE, ex)
print "[i] update finished%s" % (40 * " ")
if success:
print "[i] trails stored to '%s'" % TRAILS_FILE
return trails
| 12,251
|
def bq_to_rows(rows):
"""Reformat BigQuery's output to regular pnguin LOD data
Reformat BigQuery's output format so we can put it into a DataFrame
Args:
rows (dict): A nested list of key-value tuples that need to be converted into a list of dicts
Returns:
list: A list of dictionaries based on the input x
"""
def _reformat(x):
pairs = x.items()
row = {}
for pair in pairs:
key, value = pair
row[key] = value
return row
return [_reformat(x) for x in rows]
| 12,252
|
def _write_impropers(lmp_file: IO, openff_sys: Interchange):
"""Write the Impropers section of a LAMMPS data file."""
from openff.interchange.components.mdtraj import (
_iterate_impropers,
_store_bond_partners,
)
_store_bond_partners(openff_sys.topology.mdtop)
lmp_file.write("\nImpropers\n\n")
improper_handler = openff_sys["ImproperTorsions"]
improper_type_map = dict(enumerate(improper_handler.potentials))
improper_type_map_inv = dict({v: k for k, v in improper_type_map.items()})
for improper_idx, improper in enumerate(
_iterate_impropers(openff_sys.topology.mdtop)
):
# These are "topology indices"
indices = tuple(a.index for a in improper)
for top_key, pot_key in improper_handler.slot_map.items():
if indices == top_key.atom_indices:
improper_type_idx = improper_type_map_inv[pot_key]
lmp_file.write(
"{:d}\t{:d}\t{:d}\t{:d}\t{:d}\t{:d}\n".format(
improper_idx + 1,
improper_type_idx + 1,
indices[0] + 1,
indices[1] + 1,
indices[2] + 1,
indices[3] + 1,
)
)
| 12,253
|
def draw_border(img, pt1, pt2, color, thickness, r, d, label='Unknown'):
"""Fancy box drawing function for detected faces."""
x1, y1 = pt1
x2, y2 = pt2
# Top left drawing
cv2.line(img, (x1 + r, y1), (x1 + r + d, y1), color, thickness)
cv2.line(img, (x1, y1 + r), (x1, y1 + r + d), color, thickness)
cv2.ellipse(img, (x1 + r, y1 + r), (r, r), 180, 0, 90, color, thickness)
# Top right drawing
cv2.line(img, (x2 - r, y1), (x2 - r - d, y1), color, thickness)
cv2.line(img, (x2, y1 + r), (x2, y1 + r + d), color, thickness)
cv2.ellipse(img, (x2 - r, y1 + r), (r, r), 270, 0, 90, color, thickness)
# Bottom left drawing
cv2.line(img, (x1 + r, y2), (x1 + r + d, y2), color, thickness)
cv2.line(img, (x1, y2 - r), (x1, y2 - r - d), color, thickness)
cv2.ellipse(img, (x1 + r, y2 - r), (r, r), 90, 0, 90, color, thickness)
# Bottom right drawing
cv2.line(img, (x2 - r, y2), (x2 - r - d, y2), color, thickness)
cv2.line(img, (x2, y2 - r), (x2, y2 - r - d), color, thickness)
cv2.ellipse(img, (x2 - r, y2 - r), (r, r), 0, 0, 90, color, thickness)
# Writing image's label
cv2.putText(img=img, text=label, org=(x1 + r, y1 - 3*r), fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=2, color=color, lineType=cv2.LINE_AA)
| 12,254
|
def network_config(session, args):
"""network config functions"""
cmd = pluginlib.exists(args, 'cmd')
if not isinstance(cmd, basestring):
msg = "invalid command '%s'" % str(cmd)
raise pluginlib.PluginError(msg)
return
if cmd not in ALLOWED_NETWORK_CMDS:
msg = "Dom0 execution of '%s' is not permitted" % cmd
raise pluginlib.PluginError(msg)
return
cmd_args = pluginlib.exists(args, 'args')
return ALLOWED_NETWORK_CMDS[cmd](cmd_args)
| 12,255
|
def count_pred_result(args, file_name, logit_id, class_num=2, max_seq_length=128):
"""
support two method to calc f1 sore, if dataset has two class, suggest using BF1,
else more than two class, suggest using MF1.
Args:
args: param of config.
file_name: label file name.
logit_id: output tensor of infer.
class_num: cluner data default is 2.
max_seq_length: sentence input length default is 128.
global:
TP: pred == target == 1
FP: pred == 1 target == 0
FN: pred == 0 target == 1
TN: pred == target == 0
"""
file_name_index = file_name.split('_')[-1][:-4]
label_file = os.path.realpath(os.path.join(args.data_dir, args.dataset, 'label_ids.npy'))
real_label_index = int(file_name_index)
label_ids = np.load(label_file)[real_label_index]
print("real label is: ", label_ids)
# label_ids.reshape(max_seq_length, -1)
global TP, FP, FN, TN, NegNum, PosNum
if args.f1_method == "BF1":
pos_eva = np.isin(logit_id, [1]) # prediction
pos_label = np.isin(label_ids, [1]) # target
TP += np.sum(pos_eva & pos_label) # 1 1
FP += np.sum(pos_eva & (~pos_label)) # 1 0
FN += np.sum((~pos_eva) & pos_label) # 0 1
TN += np.sum((~pos_eva) & (~pos_label)) # 0 0
NegNum += np.sum(~pos_label)
PosNum += np.sum(pos_label)
print('TP= %d, FP= %d, FN= %d, TN= %d' % (TP, FP, FN, TN))
else:
target = np.zeros((len(label_ids), class_num), dtype=np.int32)
logit_id_str = map(str, logit_id)
pred = np.zeros((len(logit_id_str), class_num), dtype=np.int32)
for i, label in enumerate(label_ids):
if label > 0:
target[i][label] = 1
for i, label in enumerate(logit_id):
if label > 0:
pred[i][label] = 1
target = target.reshape(class_num, -1)
pred = pred.reshape(class_num, -1)
for i in range(0, class_num):
for j in range(0, max_seq_length):
if pred[i][j] == 1:
if target[i][j] == 1:
TP += 1
else:
FP += 1
if pred[i][j] == 0:
if target[i][j] == 0:
TN += 1
else:
FN += 1
| 12,256
|
def persistent_property(name,default_value=0.0):
"""A propery object to be used inside a class"""
def get(self):
class_name = getattr(self,"name",self.__class__.__name__)
if not "{name}" in name:
if class_name: dbname = class_name+"."+name
else: dbname = name
else: dbname = name.replace("{name}",class_name)
##debug("persistent_property.get: %s: %r, %r: %r" % (name,self,class_name,dbname))
from DB import dbget
t = dbget(dbname)
if type(default_value) == str and default_value.startswith("self."):
def_val = getattr(self,default_value[len("self."):])
else: def_val = default_value
dtype = type(def_val)
try: from numpy import nan,inf,array # for "eval"
except: pass
try: import wx # for "eval"
except: pass
try: t = dtype(eval(t))
except: t = def_val
return t
def set(self,value):
class_name = getattr(self,"name",self.__class__.__name__)
if not "{name}" in name:
if class_name: dbname = class_name+"."+name
else: dbname = name
else: dbname = name.replace("{name}",class_name)
##debug("persistent_property.set: %s: %r, %r: %r" % (name,self,class_name,dbname))
from DB import dbput
dbput(dbname,repr(value))
return property(get,set)
| 12,257
|
def album_id(items, sp_album):
"""Iterate through results to find correct Discogs album id."""
try:
artist = sp_album['artists'][0].lower().replace(" ", "")
except IndexError:
artist = ""
owners = -1
discogs_id = -1
similarity = 0
title = sp_album['name'].lower().replace(" ", "")
for album in items:
# title format: artist - title
index = album['title'].rfind(" - ")
disc_artist = album['title'][:index].lower().replace(" ", "")
disc_title = album['title'][index+3:].lower().replace(" ", "")
# calculate string similarity for artist spelling deviations
jw_similarity = jellyfish.jaro_winkler_similarity(artist, disc_artist)
# comparison for use of symbols in titles (& vs and)
if jellyfish.match_rating_comparison(disc_title, title):
# If they are basically the same, then match the best artist
if jellyfish.match_rating_comparison(artist, disc_artist):
if album['community']['have'] > owners:
owners = album['community']['have']
discogs_id = album['id']
similarity = jw_similarity
# If they are the same and this release is more popular
elif (jw_similarity == similarity and
album['community']['have'] > owners):
owners = album['community']['have']
discogs_id = album['id']
# If a better artist candidate is found
elif jw_similarity > similarity:
owners = album['community']['have']
discogs_id = album['id']
similarity = jw_similarity
# we havent found the artist if the name is not similar enough
if similarity < 0.85:
return -1
return discogs_id
| 12,258
|
def css_defaults(name, css_dict):
"""Находит первое значение по-умолчанию
background -> #FFF
color -> #FFF
content -> ""
"""
cur = css_dict.get(name) or css_dict.get(name[1:-1])
if cur is None:
return None
default = cur.get('default')
if default is not None:
return default
for v in cur['values']:
if v.startswith('<') and v.endswith('>'):
ret = css_defaults(v, css_dict)
if ret is not None:
return ret
| 12,259
|
def yn_zeros(n,nt):
"""Compute nt zeros of the Bessel function Yn(x).
"""
return jnyn_zeros(n,nt)[2]
| 12,260
|
def get_chebi_parents(chebi_ent):
"""
Get parents of ChEBI entity
:param chebi_ent:
:return:
"""
if hasattr(chebi_ent, 'OntologyParents'):
return [ent.chebiId for ent in chebi_ent.OntologyParents if
(ent.type == 'is a')]
else:
return []
| 12,261
|
def test_validate_api_key_valid(client, mocker):
"""Tests the validate api key route with a valid form."""
mocked_api_key_form = mocker.patch('app.routes.ApiKeyForm')
mocked_api_key_form.return_value.validate_on_submit.return_value = True
mocked_api_key_form.return_value.organization.data = 1
with client.session_transaction() as sess:
sess['orgs_list'] = []
response = client.post('/validate-api-key')
assert response.status_code == 200
assert flask.session['org_id'] == 1
response_json = response.get_json()
assert response_json
| 12,262
|
def average_saccades_time(saccades_times):
"""
:param saccades_times: a list of tuples with (start_time_inclusive, end_time_exclusive)
:return: returns the average time of saccades
"""
return sum([saccade_time[1] - saccade_time[0] for saccade_time in saccades_times]) / len(saccades_times)
| 12,263
|
def set_api_key(api_key):
"""Sets an environment variable :code:`"DC_API_KEY"` to given :code:`api_key`.
Users may supply an API key to the Python API, which simply passes it on to
the REST API for handling. The API key can be provided to the API after
importing the library, or set as an environment variable
:code:`"DC_API_KEY"`.
For more details about how to get an API key and provide it to the Python
Client API, please visit :ref:`getting_started`.
Args:
api_key (:obj:`str`): The API key.
"""
os.environ[_ENV_VAR_API_KEY] = api_key
| 12,264
|
def solve_tsp_local_search(
distance_matrix: np.ndarray,
x0: Optional[List[int]] = None,
perturbation_scheme: str = "two_opt",
max_processing_time: Optional[float] = None,
log_file: Optional[str] = None,
) -> Tuple[List, float]:
"""Solve a TSP problem with a local search heuristic
Parameters
----------
distance_matrix
Distance matrix of shape (n x n) with the (i, j) entry indicating the
distance from node i to j
x0
Initial permutation. If not provided, it starts with a random path
perturbation_scheme {"ps1", "ps2", "ps3", "ps4", "ps5", "ps6", ["two_opt"]}
Mechanism used to generate new solutions. Defaults to "two_opt"
max_processing_time {None}
Maximum processing time in seconds. If not provided, the method stops
only when a local minimum is obtained
log_file
If not `None`, creates a log file with details about the whole
execution
Returns
-------
A permutation of nodes from 0 to n - 1 that produces the least total
distance obtained (not necessarily optimal).
The total distance the returned permutation produces.
Notes
-----
Here are the steps of the algorithm:
1. Let `x`, `fx` be a initial solution permutation and its objective
value;
2. Perform a neighborhood search in `x`:
2.1 For each `x'` neighbor of `x`, if `fx'` < `fx`, set `x` <- `x'`
and stop;
3. Repeat step 2 until all neighbors of `x` are tried and there is no
improvement. Return `x`, `fx` as solution.
"""
x, fx = setup(distance_matrix, x0)
max_processing_time = max_processing_time or np.inf
if log_file:
fh = logging.FileHandler(log_file)
fh.setLevel(logging.INFO)
logger.addHandler(fh)
logger.setLevel(logging.INFO)
tic = default_timer()
stop_early = False
improvement = True
while improvement and (not stop_early):
improvement = False
for n_index, xn in enumerate(neighborhood_gen[perturbation_scheme](x)):
if default_timer() - tic > max_processing_time:
logger.warning("Stopping early due to time constraints")
stop_early = True
break
fn = compute_permutation_distance(distance_matrix, xn)
logger.info(f"Current value: {fx}; Neighbor: {n_index}")
if fn < fx:
improvement = True
x, fx = xn, fn
break # early stop due to first improvement local search
return x, fx
| 12,265
|
def split(self, split_size_or_sections, dim=0, copy=True):
"""Return the split chunks along the given dimension.
Parameters
----------
split_size_or_sections : Union[int, Sequence[int]
The number or size of chunks.
dim : int, optional, default=0
The dimension to split.
copy : bool, optional, default=True
Copy or create the views of input.
Returns
-------
Sequence[dragon.vm.torch.Tensor]
The output tensors.
See Also
--------
`torch.split(...)`_
"""
return array_ops.split(self, split_size_or_sections, dim, copy)
| 12,266
|
def HighFlowSingleInletTwoCompartmentGadoxetateModel(xData2DArray, Ve: float,
Kbh: float, Khe: float,
dummyVariable):
"""This function contains the algorithm for calculating how concentration varies with time
using the High Flow Single Inlet Two Compartment Gadoxetate Model model.
Input Parameters
----------------
xData2DArray - time and AIF concentration 1D arrays stacked into one 2D array.
Ve - Plasma Volume Fraction (decimal fraction)
Khe - Hepatocyte Uptake Rate (mL/min/mL)
Kbh - 'Biliary Efflux Rate (mL/min/mL)'-
Returns
-------
modelConcs - list of calculated concentrations at each of the
time points in array 'time'.
"""
try:
# Logging and exception handling function.
exceptionHandler.modelFunctionInfoLogger()
# In order to use lmfit curve fitting, time and concentration must be
# combined into one function input parameter, a 2D array, then separated into individual
# 1 D arrays
times = xData2DArray[:,0]
AIFconcentrations = xData2DArray[:,1]
Th = (1-Ve)/Kbh
modelConcs = []
modelConcs = (Ve*AIFconcentrations + Khe*Th*tools.expconv(Th, times, AIFconcentrations, 'HighFlowSingleInletTwoCompartmentGadoxetateModel'))
return(modelConcs)
# Exception handling and logging code.
except ZeroDivisionError as zde:
exceptionHandler.handleDivByZeroException(zde)
except Exception as e:
exceptionHandler.handleGeneralException(e)
| 12,267
|
def create_db_directories(db_path: str = DB_PATH, not_a_book: str = NO_BOOK_NAME) -> None:
"""create DB if not existing"""
db_directory = full_db_path(db_path)
if not os.path.exists(db_directory):
print('Make directory: ' + db_directory)
os.makedirs(db_directory)
db_not_a_book_directory = os.path.join(db_directory, not_a_book)
if not os.path.exists(db_not_a_book_directory):
print('Make directory: ' + db_not_a_book_directory)
os.makedirs(db_not_a_book_directory)
| 12,268
|
def read_cz_lsminfo(fh, byteorder, dtype, count, offsetsize):
"""Read CZ_LSMINFO tag from file and return as dict."""
assert byteorder == '<'
magic_number, structure_size = struct.unpack('<II', fh.read(8))
if magic_number not in (50350412, 67127628):
raise ValueError('invalid CZ_LSMINFO structure')
fh.seek(-8, 1)
if structure_size < numpy.dtype(TIFF.CZ_LSMINFO).itemsize:
# adjust structure according to structure_size
lsminfo = []
size = 0
for name, dtype in TIFF.CZ_LSMINFO:
size += numpy.dtype(dtype).itemsize
if size > structure_size:
break
lsminfo.append((name, dtype))
else:
lsminfo = TIFF.CZ_LSMINFO
lsminfo = fh.read_record(lsminfo, byteorder=byteorder)
lsminfo = recarray2dict(lsminfo)
# read LSM info subrecords at offsets
for name, reader in TIFF.CZ_LSMINFO_READERS.items():
if reader is None:
continue
offset = lsminfo.get('Offset' + name, 0)
if offset < 8:
continue
fh.seek(offset)
try:
lsminfo[name] = reader(fh)
except ValueError:
pass
return lsminfo
| 12,269
|
def mi_alignment(
alignment,
mi_calculator=mi,
null_value=DEFAULT_NULL_VALUE,
excludes=DEFAULT_EXCLUDES,
exclude_handler=None,
):
"""Calc mi over all position pairs in an alignment
alignment: the full alignment object
mi_calculator: a function which calculated MI from two entropies and
their joint entropy -- see mi and normalized_mi for examples
null_value: the value to be returned if mi cannot be calculated (e.g.,
if mi_calculator == normalized_mi and joint_h = 0.0)
excludes: iterable objects containing characters that require special
handling -- by default, if a position contains an exclude, null_value
will be returned. For non-default handling, pass an exclude_handler
exclude_handler: a function which takes a position and returns it
with exclude characters processed in someway.
"""
aln_length = len(alignment)
# Create result matrix
result = zeros((aln_length, aln_length), float)
# Compile postional entropies for each position in the alignment
# I believe I started using this rather than alignment.uncertainties
# b/c the latter relies on converting a ArrayAlignment to an Alignment --
# need to check into this.
positional_entropies = alignment.entropy_per_pos()
# Calculate pairwise MI between position_number and all alignment
# positions, and return the results in a vector.
for i in range(aln_length):
for j in range(i + 1):
result[i, j] = mi_pair(
alignment,
pos1=i,
pos2=j,
h1=positional_entropies[i],
h2=positional_entropies[j],
mi_calculator=mi_calculator,
null_value=null_value,
excludes=excludes,
exclude_handler=exclude_handler,
)
# copy the lower triangle to the upper triangle to make
# the matrix symmetric
ltm_to_symmetric(result)
return result
| 12,270
|
def Jaccard3d(a, b):
"""
This will compute the Jaccard Similarity coefficient for two 3-dimensional volumes
Volumes are expected to be of the same size. We are expecting binary masks -
0's are treated as background and anything else is counted as data
Arguments:
a {Numpy array} -- 3D array with first volume
b {Numpy array} -- 3D array with second volume
Returns:
float
"""
if len(a.shape) != 3 or len(b.shape) != 3:
raise Exception(f"Expecting 3 dimensional inputs, got {a.shape} and {b.shape}")
if a.shape != b.shape:
raise Exception(f"Expecting inputs of the same shape, got {a.shape} and {b.shape}")
# TASK: Write implementation of Jaccard similarity coefficient. Please do not use
# the Dice3D function from above to do the computation ;)
# <YOUR CODE GOES HERE>
overlap = 0
for i in range(a.shape[0]):
for j in range(a.shape[1]):
for j in range(a.shape[2]):
if (a[i, j, k] != 0 and b[i, j, k] != 0):
overlap += 1
all_together = 0
for i in range(a.shape[0]):
for j in range(a.shape[1]):
for j in range(a.shape[2]):
if (a[i, j, k] != 0 or b[i, j, k] != 0):
all_together += 1
return overlap/all_together
| 12,271
|
def jaccard_distance() -> None:
"""
Calculates the Jaccard distances for all the repos, save the pre-calculated distances
as a NumPy file.
:return: None.
"""
reqs = read_dependencies()
matrix = np.zeros((len(reqs), len(reqs)))
print(f"The shape of the matrix is {matrix.shape}")
for index1, repo1 in tqdm(enumerate(reqs)):
repo1reqs = set(reqs[repo1])
for index2, repo2 in enumerate(reqs):
repo2reqs = set(reqs[repo2])
matrix[index1][index2] = len(repo1reqs.intersection(repo2reqs)) / len(
repo1reqs.union(repo2reqs))
np.save(f"models/jaccard", matrix)
| 12,272
|
def get_pod_obj(name, namespace=None):
"""
Returns the pod obj for the given pod
Args:
name (str): Name of the resources
Returns:
obj : A pod object
"""
ocp_obj = OCP(api_version='v1', kind=constants.POD, namespace=namespace)
ocp_dict = ocp_obj.get(resource_name=name)
pod_obj = Pod(**ocp_dict)
return pod_obj
| 12,273
|
def _input_password() -> str:
"""
Get password input by masking characters.
Similar to getpass() but works with cygwin.
"""
sys.stdout.write("Password :\n")
sys.stdout.flush()
subprocess.check_call(["stty", "-echo"])
password = input()
subprocess.check_call(["stty", "echo"])
return password
| 12,274
|
def config_pymatgen(psp_dir=None, def_fun="PBE", mapi=None, path_to_store_psp="psp_pymatgen", aci=False,
vasp_cmd="vasp_std", template="vaspjob.pbs", queue_type="pbs"):
"""
Config pymatgen.
If the key is exists in ~/.pmgrc.yaml and not empty, skip
Parameter
psp_dir: str (path-like)
Ref: handle_potcar_gz
def_fun: str
The default functional. Default: PBE
mapi: str
The API of Materials Project. Default: None. Ref. https://materialsproject.org/open
path_to_store_psp: str (path-like)
The destination to store the compressed psp. default: psp_pymatgen
Return
"""
keys_required = ["PMG_DEFAULT_FUNCTIONAL", "PMG_MAPI_KEY", "PMG_VASP_PSP_DIR"]
keys_dict = {"PMG_DEFAULT_FUNCTIONAL": def_fun, "PMG_VASP_PSP_DIR": path_to_store_psp, "PMG_MAPI_KEY": mapi}
pmg_config_file = os.path.join(os.environ["HOME"], ".pmgrc.yaml")
keys_exist = []
params = {}
if os.path.exists(pmg_config_file):
pmg_config = loadfn(pmg_config_file)
for key in keys_required:
flag_exist = 0
key_old = key[4:] #old style not "PMG_"
if key_old in pmg_config:
if pmg_config[key_old]:
params[key] = pmg_config[key]
flag_exist = 1
if key in pmg_config:
if pmg_config[key]:
# Not empty or None
params[key] = pmg_config[key]
flag_exist = 1
if flag_exist:
keys_exist.append(key)
keys_required = list(set(keys_required).difference(set(keys_exist)))
if len(keys_required) == 0:
warnings.warn("The pymatgen has been configured before.")
return
else:
#Backup the .pmgrc.yaml file
shutil.copyfile(pmg_config_file, pmg_config_file + ".dfttk.bak")
for key in keys_required:
params[key] = keys_dict[key]
dumpfn(params, pmg_config_file, default_flow_style=False)
if "PMG_MAPI_KEY" in keys_required and (not mapi):
warnings.warn("'PMG_MAPI_KEY' is empty, some function will not work. " +
"Please add your own Materials Project's API. " +
"Ref. https://github.com/PhasesResearchLab/dfttk/tree/master/docs/Configuration.md")
if "PMG_VASP_PSP_DIR" in keys_required:
#No configuration for psp path
handle_potcar_gz(psp_dir=psp_dir, path_to_store_psp=path_to_store_psp, aci=aci,
vasp_cmd=vasp_cmd, template=template, queue_type=queue_type)
| 12,275
|
def file_size(file_path):
"""Return the file size."""
if os.path.isfile(file_path):
file_info = os.stat(file_path)
return convert_bytes(file_info.st_size)
| 12,276
|
def median_rank(PESSI_SORT, OPTI_SORT, A):
"""
Calculates the median rank of each action.
:param PESSI_SORT: Dictionary containing the actions classified according to the pessimistic procedure.
:param OPTI_SORT: Dictionary containing the actions classified according to the optimistic procedure.
:param A: List containing the names of the actions as strings.
:return med_rank: Dictionary containing the median rank of each action. The keys are the names of the actions
and the values are the median ranks.
"""
med_rank = {}
for a in A:
med_rank[a] = (OPTI_SORT[1][a] + PESSI_SORT[1][a]) / 2
return med_rank
| 12,277
|
def get_token(
event: ApiGatewayEvent,
_context: LambdaContext,
node_api: Optional[NodeApi] = None
) -> dict:
"""Get token details given a token uid.
*IMPORTANT: Any changes on the parameters should be reflected on the `cacheKeyParameters` for this method.
"""
node_api = node_api or NodeApi()
id = event.query.get("id")
if id is None:
raise ApiError("invalid_parameters")
response = node_api.get_token(id)
return {
"statusCode": 200,
"body": json.dumps(response or UNKNOWN_ERROR_MSG),
"headers": {
"Content-Type": "application/json"
}
}
| 12,278
|
def create_video(input_file, line_count, video_name, visible_obstacles):
"""
Given an input file which logs an agent's movements,
create a video file which records the sequence of movements.
"""
obstacle_plots = []
# convert obstacles into lists of tuples so matplotlib can
# understand them.
for obstacle in visible_obstacles:
obstacle_plots.append([ast.literal_eval(repr(line[0]))
for line in obstacle.lines])
# initialize plot and obstacles for the maze
fig = plt.figure(frameon=False)
txt = plt.text(2, 33, 'agent score: {}'.format(0), fontsize=8)
ax = fig.add_subplot(111, aspect='equal', ylim=[0.0, 35], xlim=[0.0, 42])
for ob in obstacle_plots:
ax.add_patch(Polygon(ob))
arrow = None
p1 = None
p2 = None
# remove extra border around subplot
frame = plt.gca()
frame.axes.get_xaxis().set_visible(False)
frame.axes.get_yaxis().set_visible(False)
frame.set_frame_on(False)
with open(input_file) as f:
generate_frames(p1, p2, arrow, txt, ax, fig, f)
| 12,279
|
def update_dictionary_entries(old_entries, need_to_add):
"""
Expects dictionary of species entries and
unique list of species (as SMILES) that need to be added
Creates new entries for the species that need to be added
Returns old and new entries
"""
list(set(need_to_add))
for j, species in enumerate(need_to_add):
molecule = rmgpy.molecule.Molecule(smiles=species)
adjlist = molecule.to_adjacency_list()
multiplicity = None
if re.search('(?<=multiplicity ).*', adjlist):
multiplicity = int(
re.search('(?<=multiplicity ).*', adjlist).group(0))
adjlist = re.sub(r'multiplicity .*',
f'multiplicity [{multiplicity}]', adjlist)
group = rmgpy.molecule.group.Group()
group.from_adjacency_list(adjlist)
atom_counts = {}
rel_label = ''
for atom in ['C', 'H', 'O']:
count = species.count(atom)
if count > 0:
rel_label = rel_label + atom + str(count)
assert rel_label != ''
"""
3 Scenerios:
No old -> no need for ID number: max_ID = -1
Only one old -> needs to have ID of 1: max_ID = 0
Multiple old -> needs to have a unique ID: max_ID > 0
"""
new_ID = None
max_ID = -1
duplicate = False
for old_label in old_entries:
old_entry = old_entries[old_label]
if group.is_isomorphic(old_entry.item):
duplicate = True
print(f'{old_entry} found to be duplicate')
continue
if rel_label not in old_label:
continue
if rel_label == old_label and max_ID == -1:
# Atleast one with same label
max_ID = 0
if old_label.find('-') > 0:
old_label, ID_str = old_label.split('-')
ID = int(ID_str)
if old_label == rel_label and ID > max_ID:
# Multiple exisitng labels
max_ID = ID
if max_ID > -1:
# Existing label
new_ID = max_ID + 1
rel_label = rel_label + '-' + str(new_ID)
if not duplicate:
entry = rmgpy.data.base.Entry()
entry.label = rel_label
entry.item = group
assert rel_label not in list(old_entries.keys())
old_entries[rel_label] = entry
entry_labels = [old_entries[key].label for key in old_entries]
assert len(entry_labels) == len(list(set(entry_labels))
), 'Non-unique labels in dictionary'
return old_entries
| 12,280
|
def ParseOptions():
"""Parses the options passed to the program.
@return: Options and arguments
"""
parser = optparse.OptionParser(usage="%prog [--no-backup]",
prog=os.path.basename(sys.argv[0]))
parser.add_option(cli.DEBUG_OPT)
parser.add_option(cli.VERBOSE_OPT)
parser.add_option(cli.YES_DOIT_OPT)
parser.add_option("--no-backup", dest="backup", default=True,
action="store_false",
help="Whether to create backup copies of deleted files")
(opts, args) = parser.parse_args()
return VerifyOptions(parser, opts, args)
| 12,281
|
def test_parse_through_basic(monkeypatch):
"""Checks prismify directive "parse_through" """
mock_XlTemplateReader_from_excel(
{"ws1": [["#preamble", "propname", "propval"]]}, monkeypatch
)
xlsx, errs = XlTemplateReader.from_excel("workbook")
assert not errs
template_schema = {
"title": "parse_through",
"prism_template_root_object_schema": "test_schema.json",
"properties": {
"worksheets": {
"ws1": {
"prism_preamble_object_schema": "test_schema.json",
"prism_preamble_object_pointer": "#",
"prism_data_object_pointer": "/whatever",
"preamble_rows": {
"propname": {
"type": "string",
"parse_through": "lambda x: f'encrypted({x})'",
"merge_pointer": "/propname",
}
},
}
}
},
}
template = build_mock_Template(template_schema, "test_template_name", monkeypatch)
patch, _, errs = core.prismify(xlsx, template, TEST_SCHEMA_DIR)
assert not errs
assert patch == {"propname": "encrypted(propval)"}
# Check working with null/None values"""
mock_XlTemplateReader_from_excel(
{"ws1": [["#preamble", "propname", None]]}, monkeypatch
)
xlsx, errs = XlTemplateReader.from_excel("workbook")
assert not errs
template_schema["properties"]["worksheets"]["ws1"]["preamble_rows"]["propname"][
"allow_empty"
] = True
template = build_mock_Template(template_schema, "test_template_name", monkeypatch)
patch, _, errs = core.prismify(xlsx, template, TEST_SCHEMA_DIR)
assert not errs
# empty val (None) was not parsed through
assert patch != {"propname": "encrypted(None)"}
# but was skipped all together
assert patch == {}
| 12,282
|
def join_customer_script(customer_code_process : multiprocessing.Process):
"""
Joins the process running the customer code.
Args:
customer_code_process (Process): the process running the customer code.
"""
try:
customer_code_process.join()
except Exception as e:
log_failure_and_exit(f"Job did not exit gracefully.\nException: {e}")
| 12,283
|
def four_rooms(dims, doorway=1.):
"""
Args:
dims: [dimx, dimy] dimensions of rectangle
doorway: size of doorway
Returns:
adjmat: adjacency matrix
xy: xy coordinates of each state for plotting
labels: empty []
"""
half_x, half_y = (dims[0]*.5, dims[1]*.5)
quarter_x, quarter_y = (dims[0]*.25, dims[1]*.25)
threequarter_x, threequarter_y = (dims[0]*.75, dims[1]*.75)
adj, xy, _ = rectangle_mesh(dims)
room = np.array([xy[:,0] < half_x, xy[:,1] < half_y], dtype=np.float32).T
mask = np.array(distance.squareform(distance.pdist(room, "euclidean")) == 0, dtype=np.float32)
labels = np.sum(room * np.array([[1, 2]]), 1)
doorsx = [quarter_x, threequarter_x, half_x, half_x]
doorsy = [half_y, half_y, quarter_y, threequarter_y]
doors = np.array([doorsx, doorsy]).T
inds = []
for d in doors:
dist_to_door = np.sum(np.abs(xy - d[None, :]), 1)
ind = np.where(dist_to_door == np.min(dist_to_door))[0]
if len(ind) > 1: ind = ind[0]
mask[ind, :] = 1
mask[:, ind] = 1
adj = adj * mask
return adj, xy, labels
| 12,284
|
def get_verse_url(verse: str) -> str:
"""Creates a URL for the verse text."""
node = CONNECTIONS[verse]
volume = scripture_graph.VOLUMES_SHORT[node['volume']].lower()
if volume == 'bom':
volume = 'bofm'
elif volume == 'd&c':
volume = 'dc-testament'
elif volume == 'pogp':
volume = 'pgp'
book = node['book'].lower()
book_replacements = {
' ': '-',
'.': '',
'&': '',
'—': '-',
}
for old, new in book_replacements.items():
book = book.replace(old, new)
if book == 'd&c':
book = 'dc'
chapter = node['chapter']
i = node['verse']
return parse.urljoin(URL_BASE,
f'{volume}/{book}/{chapter}.{i}?lang=eng#p{i}#{i}')
| 12,285
|
def get_ws_dependency_annotation(state: GlobalState) -> WSDependencyAnnotation:
""" Returns the world state annotation
:param state: A global state object
"""
annotations = cast(
List[WSDependencyAnnotation],
list(state.world_state.get_annotations(WSDependencyAnnotation)),
)
if len(annotations) == 0:
annotation = WSDependencyAnnotation()
state.world_state.annotate(annotation)
else:
annotation = annotations[0]
return annotation
| 12,286
|
def test_ga_tap_standard_tests(ga_config: Optional[dict]):
"""Run standard tap tests against Google Analytics tap."""
tests = get_standard_tap_tests(SampleTapGoogleAnalytics, config=ga_config)
for test in tests:
test()
| 12,287
|
def get_report_df(get_devreport_df, chemdf_dict, dev_args):
"""
Grab a default devreport under the expected conditions
which can be used for downstream testing
"""
dataset_list = dev_args.d
raw_bool = dev_args.raw
target_naming_scheme = dev_args.local_directory
offline_folder = f'./{target_naming_scheme}/offline'
offline_toggle = 0
report_df = report_pipeline(chemdf_dict,
raw_bool,
dataset_list,
offline_toggle)
# pandas does stuff during read write... so mimic..
report_df.to_csv(f'{offline_folder}/testing.csv')
report_df = pd.read_csv(f'{offline_folder}/testing.csv')
yield report_df
| 12,288
|
def get_gin_confg_strs():
"""
Obtain both the operative and inoperative config strs from gin.
The operative configuration consists of all parameter values used by
configurable functions that are actually called during execution of the
current program, and inoperative configuration consists of all parameter
configured but not used by configurable functions. See
``gin.operative_config_str()`` and ``gin_utils.inoperative_config_str`` for
more detail on how the config is generated.
Returns:
tuple:
- md_operative_config_str (str): a markdown-formatted operative str
- md_inoperative_config_str (str): a markdown-formatted inoperative str
"""
operative_config_str = gin.operative_config_str()
md_operative_config_str = _markdownify_gin_config_str(
operative_config_str,
'All parameter values used by configurable functions that are actually called'
)
md_inoperative_config_str = gin_utils.inoperative_config_str()
if md_inoperative_config_str:
md_inoperative_config_str = _markdownify_gin_config_str(
md_inoperative_config_str,
"All parameter values configured but not used by program. The configured "
"functions are either not called or called with explicit parameter values "
"overriding the config.")
return md_operative_config_str, md_inoperative_config_str
| 12,289
|
def L(x, c, gamma):
"""Return c-centered Lorentzian line shape at x with HWHM gamma"""
return gamma / (np.pi * ((x - c) ** 2 + gamma ** 2))
| 12,290
|
def request_data_from_weather_station():
"""
Send a command to the weather station to get current values.
Returns
-------
bytes
received data, 0 if error occurred
"""
sock = socket.create_connection((WEATHER_HOST, WEATHER_PORT), GRAPHITE_TIMEOUT)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
data = 0
try:
sock.send(CMD_ACT)
data = sock.recv(1024)
except:
logging.error('Error getting data from weather station!')
finally:
sock.close()
if check_crc(data):
return data
logging.error('CRC failed! \r\n Data: %s', data)
return 0
| 12,291
|
def solution2(arr):
"""improved solution1 #TLE """
if len(arr) == 1:
return arr[0]
max_sum = float('-inf')
l = len(arr)
for i in range(l):
local_sum = arr[i]
local_min = arr[i]
max_sum = max(max_sum, local_sum)
for j in range(i + 1, l):
local_sum += arr[j]
local_min = min(local_min, arr[j])
max_sum = max([max_sum, local_sum, local_sum - local_min])
return max_sum
| 12,292
|
def change_test_dir_and_create_data_path(request):
""" The fixture required to change the root directory where from pytest runs the tests.
Root is set to the directory whene this file is located, so relative paths work fine. """
os.chdir(request.fspath.dirname) # <- Changes pytest root to this directory
temp_cache_dir.mkdir(exist_ok=True) # <- `temp_cache_dir` shouldn't exist before this line run
yield
if temp_cache_dir.exists():
shutil.rmtree(temp_cache_dir, ignore_errors=True)
os.chdir(request.config.invocation_dir)
| 12,293
|
def hillas_parameters_4(pix_x, pix_y, image, recalculate_pixels=True):
"""Compute Hillas parameters for a given shower image.
As for hillas_parameters_3 (old Whipple Fortran code), but more Pythonized
MP: Parameters calculated as Whipple Reynolds et al 1993 paper:
http://adsabs.harvard.edu/abs/1993ApJ...404..206R
which should be the same as one of my ICRC 1991 papers and my thesis.
Parameters
----------
pix_x : array_like
Pixel x-coordinate
pix_y : array_like
Pixel y-coordinate
image : array_like
Pixel values corresponding
recalculate_pixels : Boolean (default True)
Recalculate the pixel higher multiples (e.g., if pixels move
(!) or pixel list changes between calls)
Returns
-------
hillas_parameters : `MomentParameters`
"""
if type(pix_x) == Quantity:
unit = pix_x.unit
assert pix_x.unit == pix_y.unit
else:
unit = 1.0
''' MP: Actually, I don't know why we need to strip the units...
shouldn' the calculations all work with them?'''
pix_x = Quantity(np.asanyarray(pix_x, dtype=np.float64)).value
pix_y = Quantity(np.asanyarray(pix_y, dtype=np.float64)).value
image = np.asanyarray(image, dtype=np.float64)
assert pix_x.shape == image.shape
assert pix_y.shape == image.shape
(sumsig, sumxsig, sumysig, sumx2sig, sumy2sig, sumxysig, sumx3sig,
sumx2ysig, sumxy2sig, sumy3sig) = np.zeros(10)
# Call static_xy to initialize the "static variables"
# Actually, would be nice to just call this if we
# know the pixel positions have changed
static_xy(pix_x, pix_y, recalculate_pixels)
sumsig = image.sum()
sumxsig = (image * pix_x).sum()
sumysig = (image * pix_y).sum()
sumx2sig = (image * static_xy.pix_x2).sum()
sumy2sig = (image * static_xy.pix_y2).sum()
sumxysig = (image * static_xy.pix_xy).sum()
sumx3sig = (image * static_xy.pix_x3).sum()
sumx2ysig = (image * static_xy.pix_x2y).sum()
sumxy2sig = (image * static_xy.pix_xy2).sum()
sumy3sig = (image * static_xy.pix_y3).sum()
sumx4sig = (image * static_xy.pix_x4).sum()
sumx3ysig = (image * static_xy.pix_x3y).sum()
sumx2y2sig = (image * static_xy.pix_x2y2).sum()
sumxy3sig = (image * static_xy.pix_xy3).sum()
sumy4sig = (image * static_xy.pix_y4).sum()
if sumsig == 0.0:
raise (HillasParameterizationError(("Empty pixels!"
"Cannot calculate image parameters."
"Exiting...")))
xm = sumxsig / sumsig
ym = sumysig / sumsig
x2m = sumx2sig / sumsig
y2m = sumy2sig / sumsig
xym = sumxysig / sumsig
x3m = sumx3sig / sumsig
x2ym = sumx2ysig / sumsig
xy2m = sumxy2sig / sumsig
y3m = sumy3sig / sumsig
x4m = sumx4sig / sumsig
x3ym = sumx3ysig / sumsig
x2y2m = sumx2y2sig / sumsig
xy3m = sumxy3sig / sumsig
y4m = sumy4sig / sumsig
'''Doing this should be same as above, but its 4us slower !?
(xm, ym, x2m, y2m, xym, x3m, x2ym, xy2m, y3m) = \
(sumxsig, sumysig, sumx2sig, sumy2sig, sumxysig, sumx3sig,
sumx2ysig, sumxy2sig, sumy3sig) / sumsig'''
xm2 = xm * xm
ym2 = ym * ym
xmym = xm * ym
vx2 = x2m - xm2
vy2 = y2m - ym2
vxy = xym - xmym
vx3 = x3m - 3.0 * xm * x2m + 2.0 * xm2 * xm
vx2y = x2ym - x2m * ym - 2.0 * xym * xm + 2.0 * xm2 * ym
vxy2 = xy2m - y2m * xm - 2.0 * xym * ym + 2.0 * xm * ym2
vy3 = y3m - 3.0 * ym * y2m + 2.0 * ym2 * ym
d = vy2 - vx2
dist = np.sqrt(xm2 + ym2) # could use hypot(xm,ym), but already have squares
phi = np.arctan2(ym, xm)
# -- simpler formulae for length & width suggested CA 901019
z = np.hypot(d, 2.0 * vxy)
length = np.sqrt((vx2 + vy2 + z) / 2.0)
width = np.sqrt((vy2 + vx2 - z) / 2.0)
# -- simpler formula for miss introduced CA, 901101
# -- revised MP 910112
if z == 0.0:
miss = dist
else:
uu = 1 + d / z
vv = 2 - uu
miss = np.sqrt((uu * xm2 + vv * ym2) / 2.0 - xmym * (2.0 * vxy / z))
'''Change to faster caluclation of psi and avoid inaccuracy for hyp
psi = np.arctan2((d + z) * ym + 2.0 * vxy * xm, 2.0 *vxy * ym - (d - z) * xm)
hyp = np.sqrt(2 * z * (z + d)) #! should be simplification of sqrt((d+z)
**2+(2*vxy)**2 ... but not accurate!
hyp = np.hypot(d + z,2 * vxy)
psi = np.arctan2(d + z, 2 * vxy)
cpsi = np.cos(psi)
spsi = np.sin(psi)'''
tanpsi_numer = (d + z) * ym + 2.0 * vxy * xm
tanpsi_denom = 2.0 * vxy * ym - (d - z) * xm
psi = np.arctan2(tanpsi_numer, tanpsi_denom)
# Code to de-interface with historical code
size = sumsig
m_x = xm
m_y = ym
length = length
r = dist
# Note, "skewness" is the same as the Whipple/MP "asymmetry^3", which is fine.
# ... and also, Whipple/MP "asymmetry" * "length" = MAGIC "asymmetry"
# ... so, MAGIC "asymmetry" = MAGIC "skewness"^(1/3) * "length"
# I don't know what MAGIC's "asymmetry" is supposed to be.
# -- Asymmetry and other higher moments
if length != 0.0:
vx4 = x4m - 4.0 * xm * x3m + 6.0 * xm2 * x2m - 3.0 * xm2 * xm2
vx3y = x3ym - 3.0 * xm * x2ym + 3.0 * xm2 * xym - x3m * ym \
+ 3.0 * x2m * xmym - 3.0 * xm2 * xm * ym
vx2y2 = x2y2m - 2.0 * ym * x2ym + x2m * ym2 \
- 2.0 * xm * xy2m + 4.0 * xym * xmym + xm2 * y2m - 3.0 * xm2 * ym2
vxy3 = xy3m - 3.0 * ym * xy2m + 3.0 * ym2 * xym - y3m * xm \
+ 3.0 * y2m * xmym - 3.0 * ym2 * ym * xm
vy4 = y4m - 4.0 * ym * y3m + 6.0 * ym2 * y2m - 3.0 * ym2 * ym2
hyp = np.hypot(tanpsi_numer, tanpsi_denom)
if hyp != 0.:
cpsi = tanpsi_denom / hyp
spsi = tanpsi_numer / hyp
else:
cpsi = 1.
spsi = 0.
cpsi2 = cpsi * cpsi
spsi2 = spsi * spsi
cspsi = cpsi * spsi
sk3bylen3 = (vx3 * cpsi * cpsi2 +
3.0 * vx2y * cpsi2 * spsi +
3.0 * vxy2 * cpsi * spsi2 +
vy3 * spsi * spsi2)
asym = np.copysign(np.power(np.abs(sk3bylen3), 1. / 3.), sk3bylen3) / length
skewness = asym * asym * asym # for MP's asym... (not for MAGIC asym!)
# Kurtosis
kurt = (vx4 * cpsi2 * cpsi2 +
4.0 * vx3y * cpsi2 * cspsi +
6.0 * vx2y2 * cpsi2 * spsi2 +
4.0 * vxy3 * cspsi * spsi2 +
vy4 * spsi2 * spsi2)
kurtosis = kurt / (length * length * length * length)
else: # Skip Higher Moments
asym = 0.0
psi = 0.0
skewness = 0.0
kurtosis = 0.0
# Azwidth not used anymore
# # -- Akerlof azwidth now used, 910112
# d = y2m - x2m
# z = np.sqrt(d * d + 4 * xym * xym)
# azwidth = np.sqrt((x2m + y2m - z) / 2.0)
return MomentParameters(size=size, cen_x=m_x * unit, cen_y=m_y * unit,
length=length * unit, width=width * unit, r=r * unit,
phi=Angle(phi * u.rad),
psi=Angle(psi * u.rad),
miss=miss * unit,
skewness=skewness, kurtosis=kurtosis)
| 12,294
|
def create_intrusion_set(
name: str,
aliases: List[str],
author: Identity,
primary_motivation: Optional[str],
secondary_motivations: List[str],
external_references: List[ExternalReference],
object_marking_refs: List[MarkingDefinition],
) -> IntrusionSet:
"""Create an intrusion set."""
return IntrusionSet(
created_by_ref=author,
name=name,
aliases=aliases,
primary_motivation=primary_motivation,
secondary_motivations=secondary_motivations,
labels=["intrusion-set"],
external_references=external_references,
object_marking_refs=object_marking_refs,
)
| 12,295
|
def key_up(handle: HWND, key: str):
"""放开指定按键
Args:
handle (HWND): 窗口句柄
key (str): 按键名
"""
vk_code = get_virtual_keycode(key)
scan_code = MapVirtualKeyW(vk_code, 0)
# https://docs.microsoft.com/en-us/windows/win32/inputdev/wm-keyup
wparam = vk_code
lparam = (scan_code << 16) | 0XC0000001
PostMessageW(handle, WM_KEYUP, wparam, lparam)
| 12,296
|
def broadcast_to(tensor, shape):
"""Broadcast an tensor to a new shape.
Parameters
----------
tensor : array_like
The tensor to broadcast.
shape : tuple
The shape of the desired array.
Returns
-------
broadcast : Tensor
Raises
------
ValueError
If the tensor is not compatible with the new shape according to Mars's
broadcasting rules.
Examples
--------
>>> import mars.tensor as mt
>>> x = mt.array([1, 2, 3])
>>> mt.broadcast_to(x, (3, 3)).execute()
array([[1, 2, 3],
[1, 2, 3],
[1, 2, 3]])
"""
from ..core import Tensor
tensor = tensor if isinstance(tensor, Tensor) else astensor(tensor)
shape = tuple(shape) if isinstance(shape, (list, tuple)) else (shape,)
if any(np.isnan(s) for s in tensor.shape):
raise ValueError('input tensor has unknown shape, '
'need to call `.execute()` first')
if tensor.shape == shape:
return tensor
new_ndim = len(shape) - tensor.ndim
if new_ndim < 0:
raise ValueError('input operand has more dimensions than allowed by the axis remapping')
if any(o != n for o, n in zip(tensor.shape, shape[new_ndim:]) if o != 1):
raise ValueError('operands could not be broadcast together '
'with remapped shapes [original->remapped]: {0} '
'and requested shape {1}'.format(tensor.shape, shape))
op = TensorBroadcastTo(shape, dtype=tensor.dtype, sparse=tensor.issparse())
return op(tensor, shape)
| 12,297
|
def admin_required(handler_method):
"""Require that a user be an admin.
To use it, decorate your method like this::
@admin_required
def get(self):
...
"""
@wraps(handler_method)
def check_admin(*args, **kwargs):
"""Perform the check."""
if current_user.is_anonymous:
return redirect(url_for('home.login'))
if current_user.is_admin:
return handler_method(*args, **kwargs)
abort(401)
return check_admin
| 12,298
|
def get(path):
"""Get."""
verify()
resp = requests.get(f"{URL}{path}", headers=auth)
try:
resp.raise_for_status()
except requests.exceptions.HTTPError as e:
error_msg(str(e))
return
return resp.json()
| 12,299
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.