content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def create_model(args, vocab_size, num_labels, mode='train'):
"""create lac model"""
# model's input data
words = fluid.data(name='words', shape=[-1, 1], dtype='int64', lod_level=1)
targets = fluid.data(
name='targets', shape=[-1, 1], dtype='int64', lod_level=1)
if mode == "train":
print("create model mode: ", mode)
teacher_crf_decode = fluid.data(
name='teacher_crf_decode', shape=[-1, 1], dtype='float32', lod_level=1)
else:
print("create model mode: ", mode)
teacher_crf_decode = None
feed_list = [words, targets]
if teacher_crf_decode:
feed_list.append(teacher_crf_decode)
pyreader = fluid.io.DataLoader.from_generator(
feed_list=feed_list,
capacity=200,
use_double_buffer=True,
iterable=False)
# for test or train process
avg_cost, crf_avg_cost, teacher_cost, crf_decode= nets.lex_net(
words, args, vocab_size, num_labels, teacher_crf_decode,for_infer=False, target=targets)
(precision, recall, f1_score, num_infer_chunks, num_label_chunks,
num_correct_chunks) = fluid.layers.chunk_eval(
input=crf_decode,
label=targets,
chunk_scheme="IOB",
num_chunk_types=int(math.ceil((num_labels - 1) / 2.0)))
chunk_evaluator = fluid.metrics.ChunkEvaluator()
chunk_evaluator.reset()
ret = {
"pyreader": pyreader,
"words": words,
"targets": targets,
"avg_cost": avg_cost,
"crf_avg_cost": crf_avg_cost,
"teacher_cost": teacher_cost,
"crf_decode": crf_decode,
"precision": precision,
"recall": recall,
"f1_score": f1_score,
"chunk_evaluator": chunk_evaluator,
"num_infer_chunks": num_infer_chunks,
"num_label_chunks": num_label_chunks,
"num_correct_chunks": num_correct_chunks
}
return ret
| 23,100
|
def time_nifti_to_numpy(N_TRIALS):
"""
Times how fast a framework can read a nifti file and convert it to numpy
"""
img_paths = [ants.get_ants_data('mni')]*10
def test_nibabel():
for img_path in img_paths:
array = nib.load(img_path).get_data()
def test_itk():
for img_path in img_paths:
array = itk.GetArrayFromImage(itk.imread(img_path))
def test_ants():
for img_path in img_paths:
array = ants.image_read(img_path, pixeltype='float').numpy()
nib_start = time.time()
for i in range(N_TRIALS):
test_nibabel()
nib_end = time.time()
print('NIBABEL TIME: %.3f seconds' % (nib_end-nib_start))
itk_start = time.time()
for i in range(N_TRIALS):
test_itk()
itk_end = time.time()
print('ITK TIME: %.3f seconds' % (itk_end-itk_start))
ants_start = time.time()
for i in range(N_TRIALS):
test_ants()
ants_end = time.time()
print('ANTS TIME: %.3f seconds' % (ants_end-ants_start))
| 23,101
|
def escape_string(value):
"""escape_string escapes *value* but not surround it with quotes.
"""
value = value.replace('\\', '\\\\')
value = value.replace('\0', '\\0')
value = value.replace('\n', '\\n')
value = value.replace('\r', '\\r')
value = value.replace('\032', '\\Z')
value = value.replace("'", "\\'")
value = value.replace('"', '\\"')
return value
| 23,102
|
def current_script_path() -> str:
"""
Return path to where the currently executing script is located
"""
return os.path.abspath(os.path.dirname(sys.argv[0]))
| 23,103
|
def _supports_masking(remask_kernel: bool):
"""Returns a decorator that turns layers into layers supporting masking.
Specifically:
1) `init_fn` is left unchanged.
2) `apply_fn` is turned from
a function that accepts a `mask=None` keyword argument (which indicates
`inputs[mask]` must be masked), into
a function that accepts a `mask_constant=None` keyword argument (which
indicates `inputs[inputs == mask_constant]` must be masked).
3) `kernel_fn` is modified to
3.a) propagate the `kernel.mask1` and `kernel.mask2` through intermediary
layers, and,
3.b) if `remask_kernel == True`, zeroes-out covariances between entries of
which at least one is masked.
4) If the decorated layers has a `mask_fn`, it is used to propagate masks
forward through the layer, in both `apply_fn` and `kernel_fn`. If not, it is
assumed the mask remains unchanged.
Must be applied before the `layer` decorator.
Args:
remask_kernel: `True` to zero-out kernel covariance entries between masked
inputs after applying `kernel_fn`. Some layers don't need this and setting
`remask_kernel=False` can save compute.
Returns:
A decorator that turns functions returning
`(init_fn, apply_fn, kernel_fn[, mask_fn])`
into functions returning
`(init_fn, apply_fn_with_masking, kernel_fn_with_masking)`.
"""
def supports_masking(layer):
@utils.wraps(layer)
def layer_with_masking(*args, **kwargs):
layer_fns = layer(*args, **kwargs)
init_fn, apply_fn, kernel_fn = layer_fns[:3]
if len(layer_fns) == 3:
# No mask propagation function supplied - use identity.
_mask_fn = lambda mask, input_shape: mask
elif len(layer_fns) == 4:
# Custom mask propagation function supplied.
_mask_fn = layer_fns[3]
else:
raise ValueError(f'Expected 3 (`init_fn`, `apply_fn`, `kernel_fn`) or 4'
f' (..., `mask_fn`) layer functions, '
f'got {len(layer_fns)}.')
@utils.wraps(_mask_fn)
def mask_fn(mask, input_shape):
if mask is None:
return None
return _mask_fn(mask, input_shape)
def apply_fn_with_masking(params, inputs, *,
mask_constant=None, **kwargs):
inputs = utils.get_masked_array(inputs, mask_constant)
inputs, mask = inputs.masked_value, inputs.mask
outputs = apply_fn(params, inputs, mask=mask, **kwargs)
outputs_mask = mask_fn(mask,
inputs.shape if isinstance(inputs, np.ndarray)
else [i.shape for i in inputs])
if outputs_mask is None:
return outputs
return utils.MaskedArray(outputs, outputs_mask)
def kernel_fn_with_masking(k: Kernels, **user_reqs):
if isinstance(k, Kernel):
mask1 = mask_fn(k.mask1, k.shape1)
mask2 = mask_fn(k.mask2, k.shape2)
elif isinstance(k, list):
mask1 = mask_fn([k.mask1 for k in k],
[k.shape1 for k in k])
mask2 = mask_fn([k.mask2 for k in k],
[k.shape2 for k in k])
else:
raise TypeError(type(Kernel), Kernel)
k = kernel_fn(k, **user_reqs) # type: Kernel
if remask_kernel:
k = k.mask(mask1, mask2)
else:
k = k.replace(mask1=mask1, mask2=mask2)
return k
if hasattr(kernel_fn, _INPUT_REQ):
setattr(kernel_fn_with_masking,
_INPUT_REQ,
getattr(kernel_fn, _INPUT_REQ))
return init_fn, apply_fn_with_masking, kernel_fn_with_masking
return layer_with_masking
return supports_masking
| 23,104
|
def warnings(request: HttpRequest):
"""Adiciona alguns avisos no content"""
warning = list()
if hasattr(request, 'user'):
user: User = request.user
if not user.is_anonymous:
# Testa email
if user.email is None or user.email == "":
warning.append({
'message': 'Você não possui um e-mail registrado, por favor registre um',
'link_page_name': 'escola:self-email-change'
})
else:
logger.info("Não há atributo user")
return {'warnings': warning}
| 23,105
|
def _get_raw_key(args, key_field_name):
"""Searches for key values in flags, falling back to a file if necessary.
Args:
args: An object containing flag values from the command surface.
key_field_name (str): Corresponds to a flag name or field name in the key
file.
Returns:
The flag value associated with key_field_name, or the value contained in the
key file.
"""
flag_key = getattr(args, key_field_name, None)
if flag_key is not None:
return flag_key
return _read_key_store_file().get(key_field_name)
| 23,106
|
def get_images(headers, name, handler_registry=None,
handler_override=None):
"""
This function is deprecated. Use Header.data instead.
Load images from a detector for given Header(s).
Parameters
----------
fs: RegistryRO
headers : Header or list of Headers
name : string
field name (data key) of a detector
handler_registry : dict, optional
mapping spec names (strings) to handlers (callable classes)
handler_override : callable class, optional
overrides registered handlers
Example
-------
>>> header = DataBroker[-1]
>>> images = Images(header, 'my_detector_lightfield')
>>> for image in images:
# do something
"""
res = DataBroker.get_images(headers=headers, name=name,
handler_registry=handler_registry,
handler_override=handler_override)
return res
| 23,107
|
def compare_outputs(images, questions, vqg, vocab, logging,
args, num_show=1):
"""Sanity check generated output as we train.
Args:
images: Tensor containing images.
questions: Tensor containing questions as indices.
vqg: A question generation instance.
vocab: An instance of Vocabulary.
logging: logging to use to report results.
"""
vqg.eval()
# Forward pass through the model.
outputs = vqg.predict_from_image(images)
for _ in range(num_show):
logging.info(" ")
i = random.randint(0, images.size(0) - 1) # Inclusive.
# Sample some types.
# Log the outputs.
output = vocab.tokens_to_words(outputs[i])
question = vocab.tokens_to_words(questions[i])
logging.info('Sampled question : %s\n'
'Target question (%s)'
% (output,
question))
logging.info(" ")
| 23,108
|
def shape_is_ok(sequence: Union[Sequence[Any], Any], expected_shape: Tuple[int, ...]) -> bool:
"""
Check the number of items the array has and compare it with the shape product
"""
try:
sequence_len = len(flatten(sequence))
except Exception as err:
logger.info(f"Error when trying to compare shapes. {err}")
return False
return prod(expected_shape) == sequence_len
| 23,109
|
def ChannelSE(reduction=16, **kwargs):
"""
Squeeze and Excitation block, reimplementation inspired by
https://github.com/Cadene/pretrained-models.pytorch/blob/master/pretrainedmodels/models/senet.py
Args:
reduction: channels squeeze factor
"""
channels_axis = 3 if backend.image_data_format() == 'channels_last' else 1
def layer(input_tensor):
# get number of channels/filters
channels = backend.int_shape(input_tensor)[channels_axis]
x = input_tensor
# squeeze and excitation block in PyTorch style with
x = layers.GlobalAveragePooling2D()(x)
x = layers.Lambda(expand_dims, arguments={'channels_axis': channels_axis})(x)
x = layers.Conv2D(channels // reduction, (1, 1), kernel_initializer='he_uniform')(x)
x = layers.Activation('relu')(x)
x = layers.Conv2D(channels, (1, 1), kernel_initializer='he_uniform')(x)
x = layers.Activation('sigmoid')(x)
# apply attention
x = layers.Multiply()([input_tensor, x])
return x
return layer
| 23,110
|
def add_trainingset_flag(cam_parquet,
trainingset_pkl_path,
cam=None):
"""
Add to a single-cam parquet the information flags (adding columns)
indicating if a given cam view was used in a training set for
melting, hydro classif or riming degree
Input
cam_parquet: parquet file to add the columns to
trainingset_pkl_path: path where the pickles of the trainingset flags are locally stored
cam = 'cam0', 'cam1' or 'cam2'
"""
print('CAM: '+cam)
# Read the parquet file
table = pd.read_parquet(cam_parquet)
flake_uid = table.datetime.apply(lambda x: x.strftime('%Y.%m.%d_%H.%M.%S'))+'_flake_'+table.flake_number_tmp.apply(str)
# 1 Add hydro columns
add = pd.read_pickle(trainingset_pkl_path+'hydro_trainingset_'+cam+'.pkl')
is_in = np.asarray([0] * len(table))
value_in = np.asarray([np.nan] * len(table))
# Intersect
intersect = np.intersect1d(flake_uid,add.flake_id,return_indices=True)
ind1=intersect[1]
ind2=intersect[2]
# Fill
is_in[ind1] = 1
value_in[ind1] = add.class_id.iloc[ind2]
table['hl_snowflake'] = is_in
table['hl_snowflake_class_id'] = value_in
print('Found: '+str(len(ind1))+' in training, for hydro' )
# 2 Add melting columns
add = pd.read_pickle(trainingset_pkl_path+'melting_trainingset_'+cam+'.pkl')
is_in = np.asarray([0] * len(table))
value_in = np.asarray([np.nan] * len(table))
# Intersect
intersect = np.intersect1d(flake_uid,add.flake_id,return_indices=True)
ind1=intersect[1]
ind2=intersect[2]
# Fill
is_in[ind1] = 1
value_in[ind1] = add.melting.iloc[ind2]
table['hl_melting'] = is_in
table['hl_melting_class_id'] = value_in
print('Found: '+str(len(ind1))+' in training, for melting' )
# 3 Add riming columns
add = pd.read_pickle(trainingset_pkl_path+'riming_trainingset_'+cam+'.pkl')
is_in = np.asarray([0] * len(table))
value_in = np.asarray([np.nan] * len(table))
# Intersect
intersect = np.intersect1d(flake_uid,add.flake_id,return_indices=True)
ind1=intersect[1]
ind2=intersect[2]
# Fill
is_in[ind1] = 1
value_in[ind1] = add.riming_id.iloc[ind2]
table['hl_riming'] = is_in
table['hl_riming_class_id'] = value_in
print('Found: '+str(len(ind1))+' in training, for riming' )
# Overwrite
table = pa.Table.from_pandas(table)
pq.write_table(table, cam_parquet)
return(None)
| 23,111
|
def pw_wavy(n_samples=200, n_bkps=3, noise_std=None, seed=None):
"""Return a 1D piecewise wavy signal and the associated changepoints.
Args:
n_samples (int, optional): signal length
n_bkps (int, optional): number of changepoints
noise_std (float, optional): noise std. If None, no noise is added
seed (int): random seed
Returns:
tuple: signal of shape (n_samples, 1), list of breakpoints
"""
# breakpoints
bkps = draw_bkps(n_samples, n_bkps, seed=seed)
# we create the signal
f1 = np.array([0.075, 0.1])
f2 = np.array([0.1, 0.125])
freqs = np.zeros((n_samples, 2))
for sub, val in zip(np.split(freqs, bkps[:-1]), cycle([f1, f2])):
sub += val
tt = np.arange(n_samples)
# DeprecationWarning: Calling np.sum(generator) is deprecated
# Use np.sum(np.from_iter(generator)) or the python sum builtin instead.
signal = np.sum([np.sin(2 * np.pi * tt * f) for f in freqs.T], axis=0)
if noise_std is not None:
rng = np.random.default_rng(seed=seed)
noise = rng.normal(scale=noise_std, size=signal.shape)
signal += noise
return signal, bkps
| 23,112
|
def date():
"""
____this is data type for date column____
"""
return Column(Date)
| 23,113
|
def sell_holdings(symbol, holdings_data):
""" Place an order to sell all holdings of a stock.
Args:
symbol(str): Symbol of the stock we want to sell
holdings_data(dict): dict obtained from get_modified_holdings() method
"""
shares_owned = 0
result = {}
for item in holdings_data:
if not item:
continue
if (symbol == item["currency"]["code"]):
shares_owned = int(float(item["quantity"]))
if not debug:
result = rr.order_sell_crypto_by_quantity(symbol, shares_owned)
print("####### Selling " + str(shares_owned) +
" shares of " + symbol + " #######")
send_text("SELL: \nSelling " + str(shares_owned) + " shares of " + symbol)
return result
| 23,114
|
def get_active_project_path():
"""
Arguments:
None
Return:
str: current project folder path
"""
window = sublime.active_window()
folders = window.folders()
if len(folders) == 1:
return folders[0]
else:
active_view = window.active_view()
active_file_name = active_view.file_name() if active_view else None
if not active_file_name:
return folders[0] if len(folders) else os.path.expanduser("~")
for folder in folders:
if active_file_name.startswith(folder):
return folder
return os.path.dirname(active_file_name)
| 23,115
|
def get_people_urls(gedcom_data, apid_full_map):
"""
Read in all the person URLs for later reference
"""
people = {}
found = False
logging.info("Extracting person specific URL information")
for line in gedcom_data.split("\n"):
if len(line) > 5:
tag = line.split(" ")[1]
if "@P" in tag:
person = tag
found = False
continue
if tag == "_APID" and not found:
apid = line.split(" ")[2]
if apid in apid_full_map:
if "person_url" in apid_full_map[apid]:
if apid_full_map[apid]["person_url"] != "":
people.update({person: apid_full_map[apid]["person_url"]})
found = True
logging.info("Person URL extraction completed")
return people
| 23,116
|
def permute_masks(old_masks):
"""
Function to randomly permute the mask in a global manner.
Arguments
---------
old_masks: List containing all the layer wise mask of the neural network, mandatory. No default.
seed: Integer containing the random seed to use for reproducibility. Default is 0
Returns
-------
new_masks: List containing all the masks permuted globally
"""
layer_wise_flatten = [] # maintain the layerwise flattened tensor
for i in range(len(old_masks)):
layer_wise_flatten.append(old_masks[i].flatten())
global_flatten = []
for i in range(len(layer_wise_flatten)):
if len(global_flatten) == 0:
global_flatten.append(layer_wise_flatten[i].cpu())
else:
global_flatten[-1] = np.append(global_flatten[-1], layer_wise_flatten[i].cpu())
permuted_mask = np.random.permutation(global_flatten[-1])
new_masks = []
idx1 = 0
idx2 = 0
for i in range(len(old_masks)):
till_idx = old_masks[i].numel()
idx2 = idx2 + till_idx
new_masks.append(permuted_mask[idx1:idx2].reshape(old_masks[i].shape))
idx1 = idx2
# Convert to tensor
for i in range(len(new_masks)):
new_masks[i] = torch.tensor(new_masks[i])
return new_masks
| 23,117
|
async def file_clang_formatted_correctly(filename, semaphore, verbose=False):
"""
Checks if a file is formatted correctly and returns True if so.
"""
ok = True
# -style=file picks up the closest .clang-format
cmd = "{} -style=file {}".format(CLANG_FORMAT_PATH, filename)
async with semaphore:
proc = await asyncio.create_subprocess_shell(cmd, stdout=asyncio.subprocess.PIPE)
# Read back the formatted file.
stdout, _ = await proc.communicate()
formatted_contents = stdout.decode()
# Compare the formatted file to the original file.
with open(filename) as orig:
orig_contents = orig.read()
if formatted_contents != orig_contents:
ok = False
if verbose:
print("{} is not formatted correctly".format(filename))
return ok
| 23,118
|
def service_builder(client: Client, is_for_update: bool, endpoint_tag: str,
name: str, service_type: str, protocol: str = None, source_port: int = None,
destination_port: int = None, protocol_name: str = None,
icmp_type: str = None, icmp_code: str = None,
icmp_v6_type: str = None, icmp_v6_code: str = None) -> dict:
"""Builder for the service object - build the body of the request
Args:
client (Client): Sophos XG Firewall Client
is_for_update (bool): True if the object should be updated
endpoint_tag (str): The endpoint_tag of the object we want to get data from
name (str): The name of the object we want to add/update
service_type (str, optional): Service Type information of the service
protocol (str, optional): Protocol information of the service
source_port (str, optional): Source Port information of the service
destination_port (str, optional): Destination Port information of the service
protocol_name (str, optional): Protocol Name information of the service
icmp_type (str, optional): ICMP Type information of the service
icmp_code (str, optional): ICMP Code information of the service
icmp_v6_type (str, optional): ICMP V6 Type information of the service
icmp_v6_code (str, optional): ICMP V6 Code information of the service
Raises:
Exception: Missing protocol, source port and destination port
Exception: Missing protocol name
Exception: Missing icmp_type and icmp_code
Exception: Missing icmp_v6_type and icmp_v6_code
Returns:
dict: returned dictionary
"""
previous_service_details = []
# if the object need to be updated, merge between old and new information will happen
if is_for_update:
previous_object = client.get_item_by_name(endpoint_tag, name)
previous_object = json.loads(xml2json(previous_object.text))
check_error_on_response(previous_object)
service_type = retrieve_dict_item_recursively(previous_object, 'Type')
previous_service_details = retrieve_dict_item_recursively(previous_object, 'ServiceDetail')
if not previous_service_details:
previous_service_details = []
elif not isinstance(previous_service_details, list):
previous_service_details = [previous_service_details]
json_data = {
'Name': name,
'Type': service_type,
}
if service_type == 'TCPorUDP':
if not (protocol and source_port and destination_port):
raise Exception('Please provide protocol, source_port and destination_port')
service_details = {
'Protocol': protocol,
'SourcePort': source_port,
'DestinationPort': destination_port
}
elif service_type == 'IP':
if not protocol_name:
raise Exception('Please provide protocol_name')
service_details = {
'ProtocolName': protocol_name
}
elif service_type == 'ICMP':
if not (icmp_type and icmp_code):
raise Exception('Please provide icmp_type and icmp_code')
service_details = {
'ICMPType': icmp_type,
'ICMPCode': icmp_code
}
else: # type == 'ICMPv6'
if not (icmp_v6_type and icmp_v6_code):
raise Exception('Please provide icmp_v6_type and icmp_v6_code')
service_details = {
'ICMPv6Type': icmp_v6_type,
'ICMPv6Code': icmp_v6_code
}
previous_service_details.append(service_details)
json_data.update({
'ServiceDetails': {
'ServiceDetail': previous_service_details
}
})
return remove_empty_elements(json_data)
| 23,119
|
def fkl( angles ):
"""
Convert joint angles and bone lenghts into the 3d points of a person.
Based on expmap2xyz.m, available at
https://github.com/asheshjain399/RNNexp/blob/7fc5a53292dc0f232867beb66c3a9ef845d705cb/structural_rnn/CRFProblems/H3.6m/mhmublv/Motion/exp2xyz.m
Args
angles: 99-long vector with 3d position and 3d joint angles in expmap format
parent: 32-long vector with parent-child relationships in the kinematic tree
offset: 96-long vector with bone lenghts
rotInd: 32-long list with indices into angles
expmapInd: 32-long list with indices into expmap angles
Returns
xyz: 32x3 3d points that represent a person in 3d space
"""
parent, offset, posInd, expmapInd = _some_variables()
assert len(angles) == 117
# Structure that indicates parents for each joint
njoints = 38
xyzStruct = [dict() for x in range(njoints)]
for i in np.arange( njoints ):
# try:
# if not rotInd[i] : # If the list is empty
# xangle, yangle, zangle = 0, 0, 0
# else:
# xangle = angles[ rotInd[i][2]-1 ]
# yangle = angles[ rotInd[i][1]-1 ]
# zangle = angles[ rotInd[i][0]-1 ]
# except:
# print (i)
try:
if not posInd[i] : # If the list is empty
xangle, yangle, zangle = 0, 0, 0
else:
xangle = angles[ posInd[i][2]-1 ]
yangle = angles[ posInd[i][1]-1 ]
zangle = angles[ posInd[i][0]-1 ]
except:
print (i)
r = angles[ expmapInd[i] ]
thisRotation = expmap2rotmat(r)
thisPosition = np.array([zangle, yangle, xangle])
if parent[i] == -1: # Root node
xyzStruct[i]['rotation'] = thisRotation
xyzStruct[i]['xyz'] = np.reshape(offset[i,:], (1,3)) + thisPosition
else:
xyzStruct[i]['xyz'] = (offset[i,:] + thisPosition).dot( xyzStruct[ parent[i] ]['rotation'] ) + xyzStruct[ parent[i] ]['xyz']
xyzStruct[i]['rotation'] = thisRotation.dot( xyzStruct[ parent[i] ]['rotation'] )
xyz = [xyzStruct[i]['xyz'] for i in range(njoints)]
xyz = np.array( xyz ).squeeze()
xyz = xyz[:,[0,2,1]]
return np.reshape( xyz, [-1] )
| 23,120
|
def no_matplotlib(monkeypatch):
""" Mock an import error for matplotlib"""
import_orig = builtins.__import__
def mocked_import(name, globals, locals, fromlist, level):
""" """
if name == 'matplotlib.pyplot':
raise ImportError("This is a mocked import error")
return import_orig(name, globals, locals, fromlist, level)
monkeypatch.setattr(builtins, '__import__', mocked_import)
| 23,121
|
def profile(func: Callable[..., Any]) -> Callable[..., Any]:
"""
Create a decorator for wrapping a provided function in a LineProfiler context.
Parameters
----------
func : callable
The function that is to be wrapped inside the LineProfiler context.
Returns
-------
wrapper : callable
The context containing the wrapped function.
"""
@wraps(func)
def wrapper(*args: Optional[Any], **kwargs: Optional[Any]) -> LineProfiler:
prof = LineProfiler()
try:
return prof(func)(*args, **kwargs)
finally:
prof.print_stats()
return wrapper
| 23,122
|
def phase_comp(psi_comp, uwrap=False, dens=None):
"""Compute the phase (angle) of a single complex wavefunction component.
Parameters
----------
psi_comp : NumPy :obj:`array` or PyTorch :obj:`Tensor`
A single wavefunction component.
Returns
-------
angle : NumPy :obj:`array` or PyTorch :obj:`Tensor`
The phase (angle) of the component's wavefunction.
"""
if isinstance(psi_comp, np.ndarray):
ang = np.angle(psi_comp)
if uwrap:
ang = rest.unwrap_phase(ang)
elif isinstance(psi_comp, torch.Tensor):
ang = torch.angle(psi_comp)
if uwrap:
raise NotImplementedError("Unwrapping the complex phase is not "
"implemented for PyTorch tensors.")
if dens is not None:
ang[dens < (dens.max() * 1e-6)] = 0
return ang
| 23,123
|
def test_config_changed_different_state_absent(mock_module,
mock_exec):
"""
cl-interface - test config_changed with state == noconfig
"""
instance = mock_module.return_value
instance.params.get_return_value = 'lo'
iface = {'name': 'lo', 'ifacetype': 'loopback',
'config': {
'alias': 'noconfig'
}}
mock_exec.return_value = ''.join(open('tests/lo.txt').readlines())
assert_equals(config_changed(instance, iface), True)
| 23,124
|
def packets(args):
"""Show packet info."""
with open(args.infile, 'rb') as tsi:
offset = 0
packet_idx = 0
for packet in iter(lambda: tsi.read(args.packet_size), b''):
ts_packet = get_ts_packet(packet, args.packet_size)
offset += args.packet_size - TS_PACKET_SIZE
# Resync
# while get_sync_byte(ts_packet) != 0x47:
# ts_packet = ts_packet[1:] + tsi.read(1)
# offset += 1
pid = get_pid(ts_packet)
print('{:012d} [0x{:04X}]'.format(offset, pid))
offset += TS_PACKET_SIZE
packet_idx += 1
| 23,125
|
def EG(d1,d2,P):
"""
Méthode permettant de calculer l'esperance de gain du joueur 1 s'il lance d1 dés et
que le joueur 2 lance d2 dés
----------------------------------------------------
Args:
- d1 : nombre de dés lancés par le joueur 1
- d2 : nombre de dés lancés par le joueur 2
- P : matrice de probabilités
"""
s = 0
L = np.arange(1,6*d2+1)
for k in range(1,6*d1+1):
s += np.sum(P[d1,k]*P[d2,L[L<k]]) - np.sum(P[d1,k]*P[d2,L[L>k]])
return s
| 23,126
|
def save_current_editor_as():
"""
Saves the current editor as (the save as dialog will be shown
automatically).
"""
_window().save_current_as()
| 23,127
|
def get_tx_data(request):
"""
JSON Needed:
1. txid
E.g.:
{"txid": "hgjsyher6ygfdg"}
"""
txid = request.data['txid']
try:
# req_hex_data = get_tx_data(txid)
req_hex_data = api.gettxoutdata(txid,0) # TODO
except:
return Response(status=status.HTTP_403_FORBIDDEN, data={'status': 'failure',
'message': 'Request Unsuccessful. Error while connecting with blockchain node'})
try:
# get requested data from txid
req_json_data = hex_to_json(req_hex_data) # TODO: this is LIST
return Response(data=req_json_data, status=status.HTTP_202_ACCEPTED)
except Exception as e:
return Response(data={"status":"failure", "message": "Something Wrong Occurred."
# ,"exception":e
},
status=status.HTTP_403_FORBIDDEN)
| 23,128
|
def roi_max_counts(images_sets, label_array):
"""
Return the brightest pixel in any ROI in any image in the image set.
Parameters
----------
images_sets : array
iterable of 4D arrays
shapes is: (len(images_sets), )
label_array : array
labeled array; 0 is background.
Each ROI is represented by a distinct label (i.e., integer).
Returns
-------
max_counts : int
maximum pixel counts
"""
max_cts = 0
for img_set in images_sets:
for img in img_set:
max_cts = max(max_cts, ndim.maximum(img, label_array))
return max_cts
| 23,129
|
def normalize_full_width(text):
"""
a function to normalize full width characters
"""
return unicodedata.normalize('NFKC', text)
| 23,130
|
def make_definitions(acronym, words_by_letter, limit=1):
"""Find definitions an acronym given groupings of words by letters"""
definitions = []
for _ in range(limit):
definition = []
for letter in acronym.lower():
opts = words_by_letter.get(letter.lower(), [])
definition.append(random.choice(opts).title() if opts else "?")
definitions.append(" ".join(definition))
return definitions
| 23,131
|
def plot_effective_area_from_file(file, all_cuts=False, ax=None, **kwargs):
""" """
ax = plt.gca() if ax is None else ax
if all_cuts:
names = ["", "_NO_CUTS", "_ONLY_GH", "_ONLY_THETA"]
else:
names = tuple([""])
label_basename = kwargs["label"] if "label" in kwargs else ""
kwargs.setdefault("ls", "")
for name in names:
area = QTable.read(file, hdu="EFFECTIVE_AREA" + name)[0]
kwargs["label"] = label_basename + name.replace("_", " ")
ax.errorbar(
0.5 * (area["ENERG_LO"] + area["ENERG_HI"]).to_value(u.TeV)[1:-1],
area["EFFAREA"].to_value(u.m ** 2).T[1:-1, 0],
xerr=0.5 * (area["ENERG_LO"] - area["ENERG_HI"]).to_value(u.TeV)[1:-1],
**kwargs,
)
# Style settings
ax.set_xscale("log")
ax.set_yscale("log")
ax.set_xlabel("True energy / TeV")
ax.set_ylabel("Effective collection area / m²")
ax.grid(which="both")
ax.legend()
ax.grid(True, which="both")
return ax
| 23,132
|
def get_spreading_coefficient(dist):
"""Calculate the spreading coefficient.
Args:
dist: A Distribution from a direct (GC) spreading simulation.
Returns:
The dimensionless spreading coefficient (beta*s*A).
"""
potential = -dist.log_probs
valley = np.amin(potential)
split = int(0.5 * len(potential))
plateau = np.mean(potential[split:])
return valley - plateau
| 23,133
|
def get_config_path() -> Path:
"""Returns path to the root of the project"""
return Path(__file__).parent / "config"
| 23,134
|
def round(x):
"""
Return ``x`` rounded to an ``Integer``.
"""
return create_RealNumber(x).round()
| 23,135
|
def test_delete_record_get_request(client, set_up, login):
"""test if record confirm delete template is displayed"""
record_to_delete = CoupeDay.objects.first()
response = client.get(reverse('home:record-delete', kwargs={'pk': record_to_delete.id}), follow=True)
delete_txt = f"Confirm delete"
assert delete_txt in response.content.decode('UTF-8')
| 23,136
|
def test_testclass(ctestdir):
"""Using test classes
"""
with get_example("testclass.py").open("rt") as f:
ctestdir.makepyfile(f.read())
result = ctestdir.runpytest("--verbose")
try:
result.assert_outcomes(passed=4, skipped=4, failed=0, xfailed=2)
except TypeError:
result.assert_outcomes(passed=4, skipped=4, failed=0)
result.stdout.re_match_lines(r"""
.*::TestClass::test_a (?:XFAIL(?:\s+\(.*\))?|xfail)
.*::TestClass::test_b PASSED
.*::TestClass::test_c SKIPPED(?:\s+\(.*\))?
.*::TestClass::test_d PASSED
.*::TestClass::test_e SKIPPED(?:\s+\(.*\))?
.*::TestClassNamed::test_a (?:XFAIL(?:\s+\(.*\))?|xfail)
.*::TestClassNamed::test_b PASSED
.*::TestClassNamed::test_c SKIPPED(?:\s+\(.*\))?
.*::TestClassNamed::test_d PASSED
.*::TestClassNamed::test_e SKIPPED(?:\s+\(.*\))?
""")
| 23,137
|
def author(repo, subset, x):
"""``author(string)``
Alias for ``user(string)``.
"""
# i18n: "author" is a keyword
n = encoding.lower(getstring(x, _("author requires a string")))
return [r for r in subset if n in encoding.lower(repo[r].user())]
| 23,138
|
def p_definition (t):
"""definition : type_def
| constant_def"""
t[0] = t[1]
| 23,139
|
def create_test_example_solution_files(exercise_name, prob_spec_exercise):
"""
Auto-generates the test file.
Function creates the test file in its own right, but also calls the
create_example_and_solution_files function. This function also creates
the parameters to feed into the create_example_and_solution_files function.
Parameter exercise_name: Name of the exercise to be generated.
Precondition: exercise_name is a string of a valid exercise.
Parameter prob_spec_exercise: A filepath to the location of the exercise folder
in the problem-specifications repository.
Precondition: prob_spec_exercise is a string of a valid filepath.
"""
data = None
with open(f"{prob_spec_exercise}/canonical-data.json") as file:
data = json.load(file)
# Boilerplate test code. Multiline docstring format used to maintain
# correct indentation and to increase readability.
exercise_string = """;; Ensures that {0}.lisp and the testing library are always loaded
(eval-when (:compile-toplevel :load-toplevel :execute)
(load "{0}")
(quicklisp-client:quickload :fiveam))
;; Defines the testing package with symbols from {0} and FiveAM in scope
;; The `run-tests` function is exported for use by both the user and test-runner
(defpackage :{0}-test
(:use :cl :fiveam)
(:export :run-tests))
;; Enter the testing package
(in-package :{0}-test)
;; Define and enter a new FiveAM test-suite
(def-suite* {0}-suite)
""".format(exercise_name)
# func_name_dict is a dictionary of all function names and their
# expected input argument names.
func_name_dict, tests_string = create_test(data["cases"], exercise_name)
# tests_string is sandwiched between exercise_string and more boilerplate
# code at the end of the file.
exercise_string += tests_string + """
(defun run-tests (&optional (test-or-suite '{0}-suite))
"Provides human readable results of test run. Default to entire suite."
(run! test-or-suite))
""".format(exercise_name)
with open(f"{TARGET}/{exercise_name}/{exercise_name}-test.lisp", 'w') as file:
file.write(exercise_string)
create_example_and_solution_files(exercise_name, func_name_dict)
| 23,140
|
def pianoroll_plot_setup(figsize=None, side_piano_ratio=0.025,
faint_pr=True, xlim=None):
"""Makes a tiny piano left of the y-axis and a faint piano on the main figure.
This function sets up the figure for pretty plotting a piano roll. It makes a
small imshow plot to the left of the main plot that looks like a piano. This
piano side plot is aligned along the y-axis of the main plot, such that y
values align with MIDI values (y=0 is the lowest C-1, y=11 is C0, etc).
Additionally, a main figure is set up that shares the y-axis of the piano side
plot. Optionally, a set of faint horizontal lines are drawn on the main figure
that correspond to the black keys on the piano (and a line separating B & C
and E & F). This function returns the formatted figure, the side piano axis,
and the main axis for plotting your data.
By default, this will draw 11 octaves of piano keys along the y-axis; you will
probably want reduce what is visible using `ax.set_ylim()` on either returned
axis.
Using with imshow piano roll data:
A common use case is for using imshow() on the main axis to display a piano
roll alongside the piano side plot AND the faint piano roll behind your
data. In this case, if your data is a 2D array you have to use a masked
numpy array to make certain values invisible on the plot, and therefore make
the faint piano roll visible. Here's an example:
midi = np.flipud([
[0.0, 0.0, 1.0],
[0.0, 1.0, 0.0],
[1.0, 0.0, 0.0],
])
midi_masked = np.ma.masked_values(midi, 0.0) # Mask out all 0.0's
fig, ax, sp = plotting.pianoroll_plot_setup()
ax.imshow(midi_masked, origin='lower', aspect='auto') # main subplot axis
sp.set_ylabel('My favorite MIDI data') # side piano axis
fig.show()
The other option is to use imshow in RGBA mode, where your data is split
into 4 channels. Every alpha value that is 0.0 will be transparent and show
the faint piano roll below your data.
Args:
figsize: Size if the matplotlib figure. Will be passed to `plt.figure()`.
Defaults to None.
side_piano_ratio: Width of the y-axis piano in terms of raio of the whole
figure. Defaults to 1/40th.
faint_pr: Whether to draw faint black & white keys across the main plot.
Defaults to True.
xlim: Tuple containing the min and max of the x values for the main plot.
Only used to determine the x limits for the faint piano roll in the main
plot. Defaults to (0, 1000).
Returns:
(figure, main_axis, left_piano_axis)
figure: A matplotlib figure object containing both subplots set up with an
aligned piano roll.
main_axis: A matplotlib axis object to be used for plotting. Optionally
has a faint piano roll in the background.
left_piano_axis: A matplotlib axis object that has a small, aligned piano
along the left side y-axis of the main_axis subplot.
"""
octaves = 11
# Setup figure and gridspec.
fig = plt.figure(figsize=figsize)
gs_ratio = int(1 / side_piano_ratio)
gs = gridspec.GridSpec(1, 2, width_ratios=[1, gs_ratio])
left_piano_ax = fig.add_subplot(gs[0])
# Make a piano on the left side of the y-axis with imshow().
keys = np.array(
[0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0] # notes in descending order; B -> C
)
keys = np.tile(keys, octaves)[:, None]
left_piano_ax.imshow(keys, cmap='binary', aspect='auto',
extent=[0, 0.625, -0.5, octaves*12-0.5])
# Make the lines between keys.
for i in range(octaves):
left_piano_ax.hlines(i*12 - 0.5, -0.5, 1, colors='black', linewidth=0.5)
left_piano_ax.hlines(i*12 + 1.0, -0.5, 1, colors='black', linewidth=0.5)
left_piano_ax.hlines(i*12 + 3.0, -0.5, 1, colors='black', linewidth=0.5)
left_piano_ax.hlines(i*12 + 4.5, -0.5, 1, colors='black', linewidth=0.5)
left_piano_ax.hlines(i*12 + 6.0, -0.5, 1, colors='black', linewidth=0.5)
left_piano_ax.hlines(i*12 + 8.0, -0.5, 1, colors='black', linewidth=0.5)
left_piano_ax.hlines(i*12 + 10.0, -0.5, 1, colors='black', linewidth=0.5)
# Set the limits of the side piano and remove ticks so it looks nice.
left_piano_ax.set_xlim(0, 0.995)
left_piano_ax.set_xticks([])
# Create the aligned axis we'll return to the user.
main_ax = fig.add_subplot(gs[1], sharey=left_piano_ax)
# Draw a faint piano roll behind the main axes (if the user wants).
if faint_pr:
xlim = (0, 1000) if xlim is None else xlim
x_min, x_max = xlim
x_delta = x_max - x_min
main_ax.imshow(np.tile(keys, x_delta), cmap='binary', aspect='auto',
alpha=0.05, extent=[x_min, x_max, -0.5, octaves*12-0.5])
for i in range(octaves):
main_ax.hlines(i * 12 + 4.5, x_min, x_max, colors='black',
linewidth=0.5, alpha=0.25)
main_ax.hlines(i * 12 - 0.5, x_min, x_max, colors='black',
linewidth=0.5, alpha=0.25)
main_ax.set_xlim(*xlim)
# Some final cosmetic tweaks before returning the axis obj's and figure.
plt.setp(main_ax.get_yticklabels(), visible=False)
gs.tight_layout(fig)
return fig, main_ax, left_piano_ax
| 23,141
|
def exercise(request, exercisename):
"""Show single sport and its totals."""
e = exercisename
cur_user = request.user
exercises = Exercise.objects.filter(owner=cur_user, sport=e).order_by('-date')
context = {'exercises': exercises, 'total': Stats.total(cur_user, sport=e),
'totaltime': Stats.totaltime(cur_user, sport=e)}
return render(request, 'distances/exercises.html', context)
| 23,142
|
def rng() -> int:
"""Return a 30-bit hardware generated random number."""
pass
| 23,143
|
def randomBinaryMatrix(scale, type):
"""
Generates a pseudo random BinaryMatrix of a given scale(small,large) and
datatype(int).
"""
if(scale == "small" and type == "int"):
nrow = random.randint(1, 10)
ncol = random.randint(1, 10)
data = []
for i in range(nrow):
data.append([])
for _j in range(ncol):
data[i].append(random.randint(0, 1))
return BinaryMatrix(
nrow=nrow,
ncol=ncol,
data=data
)
if(scale == "large" and type == "int"):
nrow = random.randint(10, 100)
ncol = random.randint(10, 100)
data = []
for i in range(nrow):
data.append([])
for _j in range(ncol):
data[i].append(random.randint(0, 1))
return BinaryMatrix(
nrow=nrow,
ncol=ncol,
data=data
)
| 23,144
|
def draw_contours(mat, contours, color=(0, 0, 255), thickness=1):
"""
Draws contours on the input image. The input image is modified.
:param mat: input image
:param contours: contours to draw
:param color: color of contours
:param thickness: thickness of contours, filled if -1
:return: None
"""
cv2.drawContours(mat, contours, -1, color, thickness=thickness)
| 23,145
|
def load_batch(server_context: ServerContext, assay_id: int, batch_id: int) -> Optional[Batch]:
"""
Loads a batch from the server.
:param server_context: A LabKey server context. See utils.create_server_context.
:param assay_id: The protocol id of the assay from which to load a batch.
:param batch_id:
:return:
"""
load_batch_url = server_context.build_url("assay", "getAssayBatch.api")
loaded_batch = None
payload = {"assayId": assay_id, "batchId": batch_id}
json_body = server_context.make_request(load_batch_url, json=payload)
if json_body is not None:
loaded_batch = Batch(**json_body["batch"])
return loaded_batch
| 23,146
|
def canvas_compose(mode, dst, src):
"""Compose two alpha premultiplied images
https://ciechanow.ski/alpha-compositing/
http://ssp.impulsetrain.com/porterduff.html
"""
src_a = src[..., -1:] if len(src.shape) == 3 else src
dst_a = dst[..., -1:] if len(dst.shape) == 3 else dst
if mode == COMPOSE_OVER:
return src + dst * (1 - src_a)
elif mode == COMPOSE_OUT:
return src * (1 - dst_a)
elif mode == COMPOSE_IN:
return src * dst_a
elif mode == COMPOSE_ATOP:
return src * dst_a + dst * (1 - src_a)
elif mode == COMPOSE_XOR:
return src * (1 - dst_a) + dst * (1 - src_a)
elif isinstance(mode, tuple) and len(mode) == 4:
k1, k2, k3, k4 = mode
return (k1 * src * dst + k2 * src + k3 * dst + k4).clip(0, 1)
raise ValueError(f"invalid compose mode: {mode}")
| 23,147
|
async def get_timelog_user_id(
*,
user_id: int,
epic_id: int,
month: int,
year: int,
session: Session = Depends(get_session),
):
"""
Get list of timelogs by user_id, month.
Parameters
----------
user_id : str
ID of user from which to pull timelogs.
year_month : int
Month and year from which to pull timelog(s).
session : Session
SQL session that is to be used to get the timelogs.
Defaults to creating a dependency on the running SQL model session.
"""
statement = (
select(
TimeLog.id,
AppUser.username.label("username"),
Epic.name.label("epic_name"),
EpicArea.name.label("epic_area_name"),
TimeLog.start_time,
TimeLog.end_time,
TimeLog.count_hours,
TimeLog.count_days,
)
.join(AppUser)
.join(EpicArea)
.join(Epic)
.where(TimeLog.user_id == user_id)
.where(TimeLog.epic_id == epic_id)
.where(TimeLog.month == month)
.where(TimeLog.year == year)
.order_by(TimeLog.end_time.desc())
)
results = session.exec(statement).all()
return results
| 23,148
|
def _cross(
vec1,
vec2,
):
"""Cross product between vec1 and vec2 in R^3"""
vec3 = np.zeros((3,))
vec3[0] = +(vec1[1] * vec2[2] - vec1[2] * vec2[1])
vec3[1] = -(vec1[0] * vec2[2] - vec1[2] * vec2[0])
vec3[2] = +(vec1[0] * vec2[1] - vec1[1] * vec2[0])
return vec3
| 23,149
|
def _simplex_dot3D(g, x, y, z):
""" 3D dot product """
return g[0] * x + g[1] * y + g[2] * z
| 23,150
|
def __get_report_failures(test_data: TestData) -> str:
"""
Gets test report with all failed test soft asserts
:param test_data: test data from yaml file
:return: str test report with all soft asserts
"""
test_id = __get_test_id()
failed_assert_reports = __FAILED_EXPECTATIONS.get(test_id)
meta_info = inspect.stack()[2][1:4]
expectation_report = ExpectationReport(test_data.test_name, failed_assert_reports, meta_info)
return expectation_report.get_report_message()
| 23,151
|
def make_complex_heatmap(df_data, heatmap_cmap='coolwarm',
vmax=4,
vmin=-4,
figsize=(16, 9),
row_metadata=None,
col_metadata=None,
col_colorbar_anchor=[0.12, 0.1, 0.7, 0.05],
row_colorbar_anchor=[0.85, 0.15, 0.02, 0.7],
figname=None):
"""Script for making complex heatmaps with sidebars and legends of each colorbar.
Row metadata are shown in additional columns of heatmap and col metadata are shown in additional rows.
Parameters
--------
df_data : pd.DataFrame
a table with data.
row_metadata, col_metadata : pd.Series or pd.DataFrame
metadata of rows and columns that needs to be represented as row and column sidebars.
figsize : figure size as in matplotlib.
vmax, vmin : : float
max or min value in the heatmap function fron Seaborn
col_colorbar_anchor, row_colorbar_anchor : list of length of 4
coordinants and size of color bar, first two values are x and y co-ordinnants, third and fourth one are width and height.
figname : str
path of output figure, if None, print to console.
"""
# Initialize subplots.
row_metadata = pd.DataFrame(row_metadata)
col_metadata = pd.DataFrame(col_metadata)
n_row = row_metadata.shape[1] + 1
n_col = col_metadata.shape[1] + 1
height_ratios = [15] + [1] * (n_col - 1)
width_ratios = [15] + [1] * (n_row - 1)
fig, axes = plt.subplots(n_col, n_row, sharex=False, sharey=False, figsize=figsize, gridspec_kw={'height_ratios': height_ratios,
'width_ratios': width_ratios,
'wspace': 0.1,
'hspace': 0})
if n_row * n_col > 1:
# Axes are flattened for easier indexing
axes = axes.ravel()
main_fig = sns.heatmap(df_data, vmax=vmax, vmin=vmin, ax=axes[
0], cbar=False, cmap=heatmap_cmap, robust=True)
else:
main_fig = sns.heatmap(
df_data, vmax=vmax, vmin=vmin, cbar=False, cmap=heatmap_cmap, robust=True)
# Make the main heatmap as the first subplot
main_fig_axes = fig.add_axes([0.13, 0.95, 0.7, 0.05])
main_fig_cb = plt.colorbar(main_fig.get_children()[
0], orientation='horizontal', cax=main_fig_axes)
main_fig_cb.ax.set_title("Heatmap", position=(1.06, 0.1), fontsize=16)
main_fig.set_xticks([])
main_fig.set_yticks([])
main_fig.set_ylabel(
'logFC change compared with corresponding DMSO', fontsize=14)
# Iterate through each metadata dataframe and start ploting the color bar
# and heatmaps row-wise or column-wise
for metadata, base_anchor, anchor_offset_location in zip([row_metadata, col_metadata], [row_colorbar_anchor, col_colorbar_anchor], [0, 1]):
axes_offset = 1
if metadata is None:
continue
# Iterate through each metadata colorbar
for col in metadata.columns:
metadata_vector = metadata[col]
# Handling continuous heatmap sidebar values
try:
metadata_vector = metadata_vector.astype(float)
metadata_vector = pd.DataFrame(metadata_vector, columns=[col])
levels = metadata_vector[col].sort_values().unique()
cmap = 'Blues'
cb_type = 'continuous'
# Handling descrete heatmap sidebar values, which are factorized.
except ValueError:
levels = metadata_vector.factorize()[1]
metadata_vector = pd.DataFrame(
metadata_vector.factorize()[0], columns=[col])
cmap = sns.color_palette("cubehelix_r", levels.shape[0])
cb_type = 'discreet'
# Calculate the axes index and location of the "legend" of the
# sidebar, which are actually colorbar objects.
if anchor_offset_location == 0:
offset = 0.1
# Column side bar offsets.
ax = axes[axes_offset]
cbar_label_orientation = 'vertical'
cbar_title_location = (1.03, 1)
else:
offset = -0.1
# Row side bar offsets.
ax = axes[axes_offset * n_row]
cbar_label_orientation = 'horizontal'
cbar_title_location = (1.03, 0.1)
metadata_vector = metadata_vector.transpose()
# Plotting the sidebar and its colorbar
anchor = base_anchor
anchor[anchor_offset_location] = anchor[
anchor_offset_location] + offset
colorbar_ax = fig.add_axes(anchor)
g = sns.heatmap(metadata_vector, ax=ax, cbar=False, xticklabels=False,
yticklabels=False, cmap=cmap, vmax=metadata_vector.values.max() + 1)
# g.set_title(col)
if cb_type != 'continuous':
cb = plt.colorbar(
g.get_children()[0], orientation=cbar_label_orientation, cax=colorbar_ax)
# Make correct ticks and tick labels, need to offset the lenth
# to fix the miss-by-one problem.
cb.set_ticks(np.arange(0.5, 0.5 + len(levels), 1))
if anchor_offset_location == 0:
cb.ax.set_yticklabels(levels.values, fontsize=14)
else:
cb.ax.set_xticklabels(levels.values, fontsize=14)
else:
cb = plt.colorbar(
g.get_children()[0], orientation=cbar_label_orientation, cax=colorbar_ax)
cb.ax.set_title(col, position=cbar_title_location, fontsize=14)
cb.ax.invert_yaxis()
# To the next subplot axes
axes_offset += 1
# Get rid of empty subplots not used in the figure.
valid_axes_id = [x for x in range(
n_col)] + [x * n_row for x in range(n_col)]
for axes_id in range(len(axes)):
if axes_id not in valid_axes_id:
fig.delaxes(axes[axes_id])
# This is a hack in order to make the correct X axis label
axes[n_row * (n_col - 1)].set_xlabel('Treatments', fontsize=14)
if figname is not None:
plt.savefig(figname, bbox_inches='tight')
plt.close()
| 23,152
|
def initialized_sm(registrations, uninitialized_sm):
""" The equivalent of an app with commit """
uninitialized_sm.initialize()
return uninitialized_sm
| 23,153
|
def compute_options(
platform: PlatformName,
package_dir: Path,
output_dir: Path,
config_file: Optional[str],
args_archs: Optional[str],
prerelease_pythons: bool,
) -> BuildOptions:
"""
Compute the options from the environment and configuration file.
"""
manylinux_identifiers = {
f"manylinux-{build_platform}-image" for build_platform in MANYLINUX_ARCHS
}
musllinux_identifiers = {
f"musllinux-{build_platform}-image" for build_platform in MUSLLINUX_ARCHS
}
disallow = {
"linux": {"dependency-versions"},
"macos": manylinux_identifiers | musllinux_identifiers,
"windows": manylinux_identifiers | musllinux_identifiers,
}
options = ConfigOptions(package_dir, config_file, platform=platform, disallow=disallow)
build_config = options("build", env_plat=False, sep=" ") or "*"
skip_config = options("skip", env_plat=False, sep=" ")
test_skip = options("test-skip", env_plat=False, sep=" ")
prerelease_pythons = prerelease_pythons or strtobool(
os.environ.get("CIBW_PRERELEASE_PYTHONS", "0")
)
deprecated_selectors("CIBW_BUILD", build_config, error=True)
deprecated_selectors("CIBW_SKIP", skip_config)
deprecated_selectors("CIBW_TEST_SKIP", test_skip)
package_files = {"setup.py", "setup.cfg", "pyproject.toml"}
if not any(package_dir.joinpath(name).exists() for name in package_files):
names = ", ".join(sorted(package_files, reverse=True))
msg = f"cibuildwheel: Could not find any of {{{names}}} at root of package"
print(msg, file=sys.stderr)
sys.exit(2)
# This is not supported in tool.cibuildwheel, as it comes from a standard location.
# Passing this in as an environment variable will override pyproject.toml, setup.cfg, or setup.py
requires_python_str: Optional[str] = os.environ.get(
"CIBW_PROJECT_REQUIRES_PYTHON"
) or get_requires_python_str(package_dir)
requires_python = None if requires_python_str is None else SpecifierSet(requires_python_str)
build_selector = BuildSelector(
build_config=build_config,
skip_config=skip_config,
requires_python=requires_python,
prerelease_pythons=prerelease_pythons,
)
test_selector = TestSelector(skip_config=test_skip)
return _compute_single_options(
options, args_archs, build_selector, test_selector, platform, package_dir, output_dir
)
| 23,154
|
def coordinateToIndex(coordinate):
"""Return a raw index (e.g [4, 4]) from board coordinate (e.g. e4)"""
return [abs(int(coordinate[1]) - 8), ("a", "b", "c", "d", "e", "f", "g", "h").index(coordinate[0])]
| 23,155
|
def parse_page(url):
"""parge the page and get all the links of images, max number is 100 due to limit by google
Args:
url (str): url of the page
Returns:
A set containing the urls of images
"""
page_content = download_page(url)
if page_content:
link_list = re.findall('src="(.*?)"', page_content)
if len(link_list) == 0:
print('get 0 links from page {0}'.format(url))
logging.info('get 0 links from page {0}'.format(url))
return set()
else:
return set(link_list)
else:
return set()
| 23,156
|
def assert_ndim(arg, ndims):
"""Raise exception if `arg` has a different number of dimensions than `ndims`."""
if not is_array(arg):
arg = np.asarray(arg)
if isinstance(ndims, Iterable):
if arg.ndim not in ndims:
raise AssertionError(f"Number of dimensions must be one of {ndims}, not {arg.ndim}")
else:
if arg.ndim != ndims:
raise AssertionError(f"Number of dimensions must be {ndims}, not {arg.ndim}")
| 23,157
|
def full_file_names(file_dir):
"""
List all full file names(with extension) in target directory.
:param file_dir:
target directory.
:return:
a list containing full file names.
"""
for _, _, files in os.walk(file_dir):
return files
| 23,158
|
def json_page_resp(name, page, paginator):
"""
Returns a standardized page response
"""
page_rows = paginator.get_page(page)
return JsonResponse({'page':page, 'pages':paginator.num_pages, name:[x['json'] for x in page_rows], 'size':len(page_rows)}, safe=False)
| 23,159
|
def input_house_detail(driver):
"""进入房源预订页面"""
pass
| 23,160
|
def get_script_runner():
"""
Gets the script runner plugin instance if any otherwise returns None.
:rtype: hackedit.api.interpreters.ScriptRunnerPlugin
"""
from .interpreters import ScriptRunnerPlugin
return _window().get_plugin_instance(ScriptRunnerPlugin)
| 23,161
|
def detect_peak(a, thresh=0.3):
"""
Detect the extent of the peak in the array by looking for where the slope
changes to flat. The highest peak is detected and data and followed until
the slope flattens to a threshold.
"""
iPk = np.argmax(a)
d = np.diff(a)
g1 = np.gradient(a)
g2 = np.gradient(g1)
threshPos = np.nanmax(d) * thresh
threshNeg = -1 * threshPos
# Start searching away from the peak zone
g2L = np.flipud(g2[:iPk])
g2R = g2[iPk+1:]
iL = iPk - np.min(np.where(g2L>=0))
iR = iPk + np.min(np.where(g2R>=0)) + 1
g1[iL:iR] = np.nan
# Search for the threshold crossing point
g1L = np.flipud(g1[:iPk])
g1R = g1[iPk+1:]
iL = iPk - np.min(np.where(g1L<=threshPos))
iR = iPk + np.min(np.where(g1R>=threshNeg))
msk = np.zeros_like(a)
msk[iL:iR] = 1
# DEBUG
if False:
pl.plot(a, marker='.')
pl.plot(g1, marker='^')
pl.plot(msk, marker='o')
pl.plot(np.ones_like(a)*threshPos, color='k')
pl.plot(np.ones_like(a)*threshNeg, color='k')
pl.axhline(0, color='grey')
pl.show()
return msk
| 23,162
|
def latest_version():
"""
Returns the latest version, as specified by the Git tags.
"""
versions = []
for t in tags():
assert t == t.strip()
parts = t.split(".")
assert len(parts) == 3, t
parts[0] = parts[0].lstrip("v")
v = tuple(map(int, parts))
versions.append((v, t))
_, latest = max(versions)
assert latest in tags()
return latest
| 23,163
|
def get_age_group(df,n: int=10):
"""Assigns a category to the age DR
Parameters
----------
df : Dataframe
n : number of categories
Returns
-------
Dataset with Age_group column
"""
df["Age_group"] = pd.cut(df["Age"], n, labels = list(string.ascii_uppercase)[:n])
return df
| 23,164
|
def test_thermal_info(duthosts, enum_rand_one_per_hwsku_hostname, snmp_physical_entity_and_sensor_info):
"""
Verify thermal information in physical entity mib with redis database
:param duthost: DUT host object
:param snmp_physical_entity_info: Physical entity information from snmp fact
:return:
"""
snmp_physical_entity_info = snmp_physical_entity_and_sensor_info["entity_mib"]
snmp_entity_sensor_info = snmp_physical_entity_and_sensor_info["sensor_mib"]
duthost = duthosts[enum_rand_one_per_hwsku_hostname]
keys = redis_get_keys(duthost, STATE_DB, THERMAL_KEY_TEMPLATE.format('*'))
assert keys, 'Thermal information does not exist in DB'
for key in keys:
thermal_info = redis_hgetall(duthost, STATE_DB, key)
if is_null_str(thermal_info['temperature']):
continue
name = key.split(TABLE_NAME_SEPARATOR_VBAR)[-1]
entity_info_key = PHYSICAL_ENTITY_KEY_TEMPLATE.format(name)
entity_info = redis_hgetall(duthost, STATE_DB, entity_info_key)
if not entity_info or entity_info['parent_name'] != CHASSIS_KEY:
continue
position = int(entity_info['position_in_parent'])
expect_oid = CHASSIS_MGMT_SUB_ID + DEVICE_TYPE_CHASSIS_THERMAL + position * DEVICE_INDEX_MULTIPLE + \
SENSOR_TYPE_TEMP
assert expect_oid in snmp_physical_entity_info, 'Cannot find thermal {} in physical entity mib'.format(name)
thermal_snmp_fact = snmp_physical_entity_info[expect_oid]
assert thermal_snmp_fact['entPhysDescr'] == name
assert thermal_snmp_fact['entPhysContainedIn'] == CHASSIS_MGMT_SUB_ID
assert thermal_snmp_fact['entPhysClass'] == PHYSICAL_CLASS_SENSOR
assert thermal_snmp_fact['entPhyParentRelPos'] == position
assert thermal_snmp_fact['entPhysName'] == name
assert thermal_snmp_fact['entPhysHwVer'] == ''
assert thermal_snmp_fact['entPhysFwVer'] == ''
assert thermal_snmp_fact['entPhysSwVer'] == ''
assert thermal_snmp_fact['entPhysSerialNum'] == ''
assert thermal_snmp_fact['entPhysMfgName'] == ''
assert thermal_snmp_fact['entPhysModelName'] == ''
assert thermal_snmp_fact['entPhysIsFRU'] == NOT_REPLACEABLE
# snmp_entity_sensor_info is only supported in image newer than 202012
if is_sensor_test_supported(duthost):
thermal_sensor_snmp_fact = snmp_entity_sensor_info[expect_oid]
assert thermal_sensor_snmp_fact['entPhySensorType'] == str(int(EntitySensorDataType.CELSIUS))
assert thermal_sensor_snmp_fact['entPhySensorPrecision'] == '3'
assert thermal_sensor_snmp_fact['entPhySensorScale'] == EntitySensorDataScale.UNITS
assert thermal_sensor_snmp_fact['entPhySensorOperStatus'] == str(int(EntitySensorStatus.OK)) \
or thermal_sensor_snmp_fact['entPhySensorOperStatus'] == str(int(EntitySensorStatus.NONOPERATIONAL)) \
or thermal_sensor_snmp_fact['entPhySensorOperStatus'] == str(int(EntitySensorStatus.UNAVAILABLE))
| 23,165
|
def transformer_encoder_layer(query_input,
key_input,
attn_bias,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
prepostprocess_dropout,
attention_dropout,
relu_dropout,
hidden_act,
preprocess_cmd="n",
postprocess_cmd="da",
param_initializer=None,
name=''):
"""The encoder layers that can be stacked to form a deep encoder.
This module consits of a multi-head (self) attention followed by
position-wise feed-forward networks and both the two components companied
with the post_process_layer to add residual connection, layer normalization
and droput.
"""
key_input = pre_process_layer(
key_input,
preprocess_cmd,
prepostprocess_dropout,
name=name + '_pre_att') if key_input else None
value_input = key_input if key_input else None
attn_output = multi_head_attention(
pre_process_layer(
query_input,
preprocess_cmd,
prepostprocess_dropout,
name=name + '_pre_att'),
key_input,
value_input,
attn_bias,
d_key,
d_value,
d_model,
n_head,
attention_dropout,
param_initializer=param_initializer,
name=name + '_multi_head_att')
attn_output = post_process_layer(
query_input,
attn_output,
postprocess_cmd,
prepostprocess_dropout,
name=name + '_post_att')
ffd_output = positionwise_feed_forward(
pre_process_layer(
attn_output,
preprocess_cmd,
prepostprocess_dropout,
name=name + '_pre_ffn'),
d_inner_hid,
d_model,
relu_dropout,
hidden_act,
param_initializer=param_initializer,
name=name + '_ffn')
return post_process_layer(
attn_output,
ffd_output,
postprocess_cmd,
prepostprocess_dropout,
name=name + '_post_ffn')
| 23,166
|
def generate(ode, lenght=int(2e4)):
"""
Time series generation from a ODE
:param ode: ODE object;
:param lenght: serie lenght;
:return: time serie.
"""
state = ode.initial_state
data = np.zeros([int(state.shape[0]), lenght])
for i in range(5000):
state = runge_kutta(ode, state)
for i in range(lenght):
state = runge_kutta(ode, state)
data[:, i] = state
return data
| 23,167
|
def complexFormatToRealImag(complexVec):
"""
A reformatting function which converts a complex vector into real valued array.
Let the values in the input array be [r1+j*i1,r 2+j*i2,..., rN+j*iN]
then the output array will be [r1, i1, r2, i2,..., rN, iN]
:param complexVec: complex numpy ndarray
:return: returns a 1D numpy array of a length N containing complex numbers.
"""
N = len(complexVec)
ret = np.empty((2*N,), dtype=np.real(complexVec).dtype)
ret[0::2] = complexVec.real
ret[1::2] = complexVec.imag
return ret
| 23,168
|
def send_invoice(order_id):
"""
當交易成功時,發送 email 的任務
P.S. 為「PDF 收據」,非 通知訂單成立的文字 email 訊息
"""
order = Order.objects.get(id=order_id)
subject = "【Cloth2U】 Your invoice"
message = "Please kindly find the attached file," +\
" the invoice for your recent purchase."
email_message = EmailMessage(subject, message,
settings.EMAIL_HOST_USER,
[order.email])
# 產生 PDF
pdf_file_name = f"order_{order_id}.pdf"
content_type = "application/pdf"
html = render_to_string("orders/order/pdf.html",\
{"order": order}).encode("utf-8")
outstream = BytesIO()
stylesheet = weasyprint.CSS(settings.STATIC_ROOT +\
"css/pdf.css")
weasyprint.HTML(string=html).write_pdf(outstream,
stylesheets=[stylesheet])
# 將渲染好的 html PDF 文件添加為 email 附件
email_message.attach(pdf_file_name,
outstream.getvalue(),
content_type)
# 發送 email
email_message.send()
| 23,169
|
def displayaction(uid):
""" Display the command from the xml file
"""
tree = ET.parse(OPENSTRIATOFILE)
root = tree.getroot()
textaction = root.findall("./action[@uid='"+uid+"']")
if len(textaction) == 0:
return "This UID does not exist!"
else:
return "UID %s action: %s" % (uid, textaction[0].text)
| 23,170
|
def get_parent(inst, rel_type='cloudify.relationships.contained_in'):
"""
Gets the parent of an instance
:param `cloudify.context.NodeInstanceContext` inst: Cloudify instance
:param string rel_type: Relationship type
:returns: Parent context
:rtype: :class:`cloudify.context.RelationshipSubjectContext` or None
"""
for rel in inst.relationships:
if rel_type in rel.type_hierarchy:
return rel.target
return None
| 23,171
|
def _query_param(key, value):
"""ensure that a query parameter's value is a string
of bytes in UTF-8 encoding.
"""
if isinstance(value, unicode):
pass
elif isinstance(value, str):
value = value.decode('utf-8')
else:
value = unicode(value)
return key, value.encode('utf-8')
| 23,172
|
def _create_or_get_dragonnet(embedding, is_training, treatment, outcome, split, getter=None):
"""
Make predictions for the outcome, using the treatment and embedding,
and predictions for the treatment, using the embedding
Both outcome and treatment are assumed to be binary
Note that we return the loss as a sum (and not a mean). This makes more sense for training dynamics
Parameters
----------
bert
is_training
treatment
outcome
label_dict
split
getter custom getter, for polyak averaging support
Returns
-------
"""
treatment_float = tf.cast(treatment, tf.float32)
with tf.variable_scope('dragon_net', reuse=tf.AUTO_REUSE, custom_getter=getter):
with tf.variable_scope('treatment'):
loss_t, per_example_loss_t, logits_t, expectation_t = _make_feedforward_classifier(
embedding, treatment, 2, split, num_hidden_layers=2)
with tf.variable_scope('outcome_st_treatment'):
loss_ot1, per_example_loss_ot1, logits_ot1, expectation_ot1 = _make_feedforward_classifier(
embedding, outcome, 2, split=split*treatment_float, num_hidden_layers=0)
with tf.variable_scope('outcome_st_no_treatment'):
loss_ot0, per_example_loss_ot0, logits_ot0, expectation_ot0 = _make_feedforward_classifier(
embedding, outcome, 2, split=split*(1.-treatment_float), num_hidden_layers=0)
tf.losses.add_loss(loss_ot0)
tf.losses.add_loss(loss_ot1)
tf.losses.add_loss(loss_t)
training_loss = loss_ot0 + loss_ot1 + loss_t
training_loss = training_loss
outcome_st_treat = {'per_example_loss': per_example_loss_ot1,
'logits': logits_ot1,
'expectations': expectation_ot1}
outcome_st_no_treat = {'per_example_loss': per_example_loss_ot0,
'logits': logits_ot0,
'expectations': expectation_ot0}
treat = {'per_example_loss': per_example_loss_t,
'logits': logits_t,
'expectations': expectation_t}
return training_loss, outcome_st_treat, outcome_st_no_treat, treat
| 23,173
|
def fit_ols(Y, X):
"""Fit OLS model to both Y and X"""
model = sm.OLS(Y, X)
model = model.fit()
return model
| 23,174
|
def compute_modularity_per_code(mutual_information):
"""Computes the modularity from mutual information."""
# Mutual information has shape [num_codes, num_factors].
squared_mi = np.square(mutual_information)
max_squared_mi = np.max(squared_mi, axis=1)
numerator = np.sum(squared_mi, axis=1) - max_squared_mi
denominator = max_squared_mi * (squared_mi.shape[1] - 1.)
delta = numerator / denominator
modularity_score = 1. - delta
index = (max_squared_mi == 0.)
modularity_score[index] = 0.
return modularity_score
| 23,175
|
def _CheckSemanticColorsReferences(input_api, output_api):
"""
Checks colors defined in semantic_colors_non_adaptive.xml only referencing
resources in color_palette.xml.
"""
errors = []
color_palette = None
for f in IncludedFiles(input_api):
if not f.LocalPath().endswith('/semantic_colors_non_adaptive.xml'):
continue
if color_palette is None:
color_palette = _colorXml2Dict(
input_api.ReadFile(helpers.COLOR_PALETTE_PATH))
for line_number, line in f.ChangedContents():
r = helpers.COLOR_REFERENCE_PATTERN.search(line)
if not r:
continue
color = r.group()
if _removePrefix(color) not in color_palette:
errors.append(
' %s:%d\n \t%s' % (f.LocalPath(), line_number, line.strip()))
if errors:
return [
output_api.PresubmitError(
'''
Android Semantic Color Reference Check failed:
Your new color values added in semantic_colors_non_adaptive.xml are not
defined in ui/android/java/res/values/color_palette.xml, listed below.
This is banned. Colors in semantic colors can only reference
the existing color resource from color_palette.xml.
See https://crbug.com/775198 for more information.
''', errors)
]
return []
| 23,176
|
def post_new_attending():
"""Posts attending physician information to the server
This method generates the new attending physician’s
dictionary with all of his/her information, then validates
that all of the information is the correct type. If the
validation stage is satisfied, then the attending’s
dictionary is added to the database.
Parameters
----------
N/A
Returns
-------
String
result of adding a new attending
"""
new_dict = request.get_json()
validate = validate_new_attending(new_dict)
if validate is not True:
return validate, 400
attending = add_new_attending(new_dict["attending_username"],
new_dict["attending_email"],
new_dict["attending_phone"])
if attending is True:
logging.info("New Attending Physician Added!")
logging.info("Physician User Name: {}".format(
new_dict["attending_username"]))
logging.info("Physician Email: {}".format(
new_dict["attending_email"]))
return "New Attending Physician Successfully Added", 200
else:
return "Failed to Add New Attending Physician", 400
| 23,177
|
def submitFeatureWeightedGridStatistics(geoType, dataSetURI, varID, startTime, endTime, attribute, value,
gmlIDs, verbose, coverage, delim, stat, grpby, timeStep, summAttr,
weighted, wfs_url, outputfname, sleepSecs, async=False):
"""
Makes a featureWeightedGridStatistics algorithm call.
The web service interface implemented is summarized here:
https://my.usgs.gov/confluence/display/GeoDataPortal/Generating+Area+Weighted+Statistics+Of+A+Gridded+Dataset+For+A+Set+Of+Vector+Polygon+Features
Note that varID and stat can be a list of strings.
"""
# test for dods:
dataSetURI = dodsReplace(dataSetURI)
log.info('Generating feature collection.')
featureCollection = _getFeatureCollectionGeoType(geoType, attribute, value, gmlIDs, wfs_url)
if featureCollection is None:
return
processid = 'gov.usgs.cida.gdp.wps.algorithm.FeatureWeightedGridStatisticsAlgorithm'
if not weighted:
processid = 'gov.usgs.cida.gdp.wps.algorithm.FeatureGridStatisticsAlgorithm'
solo_inputs = [("FEATURE_ATTRIBUTE_NAME", attribute),
("DATASET_URI", dataSetURI),
("TIME_START", startTime),
("TIME_END", endTime),
("REQUIRE_FULL_COVERAGE", str(coverage).lower()),
("DELIMITER", delim),
("GROUP_BY", grpby),
("SUMMARIZE_TIMESTEP", str(timeStep).lower()),
("SUMMARIZE_FEATURE_ATTRIBUTE", str(summAttr).lower()),
("FEATURE_COLLECTION", featureCollection)]
if isinstance(stat, list):
num_stats = len(stat)
if num_stats > 7:
raise Exception('Too many statistics were submitted.')
else:
num_stats = 1
if isinstance(varID, list):
num_varids = len(varID)
else:
num_varids = 1
inputs = [('', '')] * (len(solo_inputs) + num_varids + num_stats)
count = 0
rm_cnt = 0
for solo_input in solo_inputs:
if solo_input[1] is not None:
inputs[count] = solo_input
count += 1
else:
rm_cnt += 1
del inputs[count:count + rm_cnt]
if num_stats > 1:
for stat_in in stat:
if stat_in not in ["MEAN", "MINIMUM", "MAXIMUM", "VARIANCE", "STD_DEV", "SUM", "COUNT"]:
raise Exception('The statistic {} is not in the allowed list: "MEAN", "MINIMUM", "MAXIMUM", ' +
'"VARIANCE", "STD_DEV", "SUM", "COUNT"'.format(stat_in))
inputs[count] = ("STATISTICS", stat_in)
count += 1
elif num_stats == 1:
if stat not in ["MEAN", "MINIMUM", "MAXIMUM", "VARIANCE", "STD_DEV", "SUM", "COUNT"]:
raise Exception('The statistic {} is not in the allowed list: "MEAN", "MINIMUM", "MAXIMUM", ' +
'"VARIANCE", "STD_DEV", "SUM", "COUNT"'.format(stat))
inputs[count] = ("STATISTICS", stat)
count += 1
if num_varids > 1:
for var in varID:
inputs[count] = ("DATASET_ID", var)
count += 1
elif num_varids == 1:
inputs[count] = ("DATASET_ID", varID)
output = "OUTPUT"
return _executeRequest(processid, inputs, output, verbose, outputfname, sleepSecs, async=async)
| 23,178
|
def fetch_words(url):
"""
Fetch a list of words from a URL
Args:
url: the url of any text document (no decoding to utf-8 added)
Returns:
A list of strings containing the words in the document
"""
with urlopen(url) as story:
story_words = []
for line in story:
line_words = line.split()
for word in line_words:
story_words.append(word)
return story_words
| 23,179
|
def generate_fgsm_examples(sess, model, x, y, X, Y, attack_params, verbose, attack_log_fpath):
"""
Untargeted attack. Y is not needed.
"""
fgsm = FastGradientMethod(model, back='tf', sess=sess)
fgsm_params = {'eps': 0.1, 'ord': np.inf, 'y': None, 'clip_min': 0, 'clip_max': 1}
fgsm_params = override_params(fgsm_params, attack_params)
X_adv = fgsm.generate_np(X, **fgsm_params)
return X_adv
| 23,180
|
def add_class(attrs_dict, classes_str):
"""Adds the classes_str to the 'class' key in attrs_dict, or creates it"""
try:
attrs_dict["class"] += " " + classes_str
except KeyError:
attrs_dict["class"] = classes_str
| 23,181
|
def s3_bucket_for(bucket_prefix, path):
"""returns s3 bucket for path"""
suffix = s3_bucket_suffix_for(path)
return "{}-{}".format(bucket_prefix, suffix)
| 23,182
|
def regrid_create_operator(regrid, name, parameters):
"""Create a new `RegridOperator` instance.
:Parameters:
regrid: `ESMF.Regrid`
The `ESMF` regridding operator between two fields.
name: `str`
A descriptive name for the operator.
parameters: `dict`
Parameters that describe the complete coordinate system of
the destination grid.
:Returns:
`RegridOperator`
The new regrid operator.
"""
return RegridOperator(regrid, name, **parameters)
| 23,183
|
def _decode_hmc_values(hmc_ref):
"""Decrypts any sensitive HMC values that were encrypted in the DB"""
if hmc_ref is not None:
hmc_ref = jsonutils.to_primitive(hmc_ref)
#Make sure to DeCrypt the Password after retrieving from the database
## del two lines by lixx
#if hmc_ref.get('password') is not None:
# hmc_ref['password'] = EncryptHandler().decode(hmc_ref['password'])
return hmc_ref
| 23,184
|
def config_source_local(src_folder, conanfile, output, conanfile_path, hook_manager):
""" Entry point for the "conan source" command.
"""
conanfile_folder = os.path.dirname(conanfile_path)
_run_source(conanfile, conanfile_path, src_folder, hook_manager, output, reference=None,
client_cache=None, export_folder=None, export_source_folder=None,
local_sources_path=conanfile_folder)
| 23,185
|
def init_data(my_data, rp):
""" initialize the quadrant problem """
msg.bold("initializing the quadrant problem...")
# make sure that we are passed a valid patch object
if not isinstance(my_data, patch.CellCenterData2d):
print("ERROR: patch invalid in quad.py")
print(my_data.__class__)
sys.exit()
# get the density, momenta, and energy as separate variables
dens = my_data.get_var("density")
xmom = my_data.get_var("x-momentum")
ymom = my_data.get_var("y-momentum")
ener = my_data.get_var("energy")
# initialize the components, remember, that ener here is
# rho*eint + 0.5*rho*v**2, where eint is the specific
# internal energy (erg/g)
r1 = rp.get_param("quadrant.rho1")
u1 = rp.get_param("quadrant.u1")
v1 = rp.get_param("quadrant.v1")
p1 = rp.get_param("quadrant.p1")
r2 = rp.get_param("quadrant.rho2")
u2 = rp.get_param("quadrant.u2")
v2 = rp.get_param("quadrant.v2")
p2 = rp.get_param("quadrant.p2")
r3 = rp.get_param("quadrant.rho3")
u3 = rp.get_param("quadrant.u3")
v3 = rp.get_param("quadrant.v3")
p3 = rp.get_param("quadrant.p3")
r4 = rp.get_param("quadrant.rho4")
u4 = rp.get_param("quadrant.u4")
v4 = rp.get_param("quadrant.v4")
p4 = rp.get_param("quadrant.p4")
cx = rp.get_param("quadrant.cx")
cy = rp.get_param("quadrant.cy")
gamma = rp.get_param("eos.gamma")
# there is probably an easier way to do this, but for now, we
# will just do an explicit loop. Also, we really want to set
# the pressue and get the internal energy from that, and then
# compute the total energy (which is what we store). For now
# we will just fake this
myg = my_data.grid
iq1 = np.logical_and(myg.x2d >= cx, myg.y2d >= cy)
iq2 = np.logical_and(myg.x2d < cx, myg.y2d >= cy)
iq3 = np.logical_and(myg.x2d < cx, myg.y2d < cy)
iq4 = np.logical_and(myg.x2d >= cx, myg.y2d < cy)
# quadrant 1
dens[iq1] = r1
xmom[iq1] = r1*u1
ymom[iq1] = r1*v1
ener[iq1] = p1/(gamma - 1.0) + 0.5*r1*(u1*u1 + v1*v1)
# quadrant 2
dens[iq2] = r2
xmom[iq2] = r2*u2
ymom[iq2] = r2*v2
ener[iq2] = p2/(gamma - 1.0) + 0.5*r2*(u2*u2 + v2*v2)
# quadrant 3
dens[iq3] = r3
xmom[iq3] = r3*u3
ymom[iq3] = r3*v3
ener[iq3] = p3/(gamma - 1.0) + 0.5*r3*(u3*u3 + v3*v3)
# quadrant 4
dens[iq4] = r4
xmom[iq4] = r4*u4
ymom[iq4] = r4*v4
ener[iq4] = p4/(gamma - 1.0) + 0.5*r4*(u4*u4 + v4*v4)
| 23,186
|
def dtensor_shutdown_tpu_system():
"""Shutdown TPU system."""
@def_function.function
def _shutdown_tpu_system():
return gen_dtensor_ops.shutdown_tpu_system()
success = _shutdown_tpu_system() if context.is_tfrt_enabled() else True
if success:
logging.info("TPU system shut down.")
else:
logging.warning("TPU system fails to shut down.")
| 23,187
|
def edit_comment(request):
"""
Edit an existing comment
"""
response = {"status": "success",
"data": {}}
if "char_id" in request.POST:
char_id = request.POST["char_id"]
else:
response["status"] = "fail"
response["data"]["message"] = "Paste ID was not provided (POST parameter 'char_id')"
return HttpResponse(json.dumps(response), status=422)
try:
paste = Paste.objects.get(char_id=char_id)
except ObjectDoesNotExist:
response["status"] = "fail"
response["data"]["message"] = "The paste couldn't be found."
return HttpResponse(json.dumps(response))
if "id" in request.POST:
id = int(request.POST["id"])
else:
response["status"] = "fail"
response["data"]["message"] = "Comment ID was not provided (POST parameter 'id')"
return HttpResponse(json.dumps(response), status=422)
if "page" in request.POST:
page = int(request.POST["page"])
else:
page = 0
if not request.user.is_authenticated():
response["status"] = "fail"
response["data"]["message"] = "You are not logged in."
return HttpResponse(json.dumps(response), status=422)
try:
comment = Comment.objects.get(id=id)
except ObjectDoesNotExist:
response["status"] = "fail"
response["data"]["message"] = "The comment doesn't exist."
return HttpResponse(json.dumps(response), status=400)
if comment.user != request.user:
response["status"] = "fail"
response["data"]["message"] = "You are trying to edit someone else's comment."
return HttpResponse(json.dumps(response), status=422)
submit_form = SubmitCommentForm(request.POST or None)
if submit_form.is_valid():
comment_data = submit_form.cleaned_data
comment.text = comment_data["text"]
comment.save()
total_comment_count = Comment.objects.filter(paste=paste).count()
start = page * Comment.COMMENTS_PER_PAGE
end = start + Comment.COMMENTS_PER_PAGE
response["data"]["edited_comment_id"] = comment.id
response["data"]["comments"] = queryset_to_list(Comment.objects.filter(paste=paste) \
.select_related("user") \
[start:end],
fields=["id", "text", "submitted", "edited", "user__username=username"])
response["data"]["page"] = page
response["data"]["pages"] = math.ceil(float(total_comment_count) / float(Comment.COMMENTS_PER_PAGE))
if response["data"]["pages"] == 0:
response["data"]["pages"] = 1
response["data"]["total_comment_count"] = total_comment_count
else:
response["status"] = "fail"
response["data"]["message"] = "Provided text wasn't valid."
return HttpResponse(json.dumps(response))
| 23,188
|
def aws_get_size(size):
""" Get Node Size - Ex: (cmd:<size>)"""
conn = util_get_connection()
sizes = [i for i in conn.list_sizes()]
if size:
for i in sizes:
if str(i.ram) == size or i.id == size:
print >> sys.stderr, ' - '.join([i.id, str(i.ram), str(i.price)])
return i
return None
| 23,189
|
def make_positions(tensor, padding_idx, onnx_trace=False):
"""Replace non-padding symbols with their position numbers.
Position numbers begin at padding_idx+1. Padding symbols are ignored.
"""
# The series of casts and type-conversions here are carefully
# balanced to both work with ONNX export and XLA. In particular XLA
# prefers ints, cumsum defaults to output longs, and ONNX doesn't know
# how to handle the dtype kwarg in cumsum.
mask = tensor.ne(padding_idx).int()
return (
(torch.cumsum(mask, dim=1) - 1).type_as(mask) * mask
).long()
| 23,190
|
def taubin_curv(coords, resolution):
"""Curvature calculation based on algebraic circle fit by Taubin.
Adapted from: "https://github.com/PmagPy/PmagPy/blob/2efd4a92ddc19c26b953faaa5c08e3d8ebd305c9/SPD/lib
/lib_curvature.py"
G. Taubin, "Estimation Of Planar Curves, Surfaces And Nonplanar
Space Curves Defined By Implicit Equations, With
Applications To Edge And Range Image Segmentation",
IEEE Trans. PAMI, Vol. 13, pages 1115-1138, (1991)
Parameters
----------
coords : list
Nested list of paired x and y coordinates for each point of the line where a curve needs to be fited.
[[x_1, y_1], [x_2, y_2], ....]
resolution : float or int
Number of pixels per mm in original image.
Returns
-------
float or int(0)
If the radius of the fitted circle is finite, it will return the curvature (1/radius).
If the radius is infinite, it will return 0.
"""
warnings.filterwarnings("ignore") # suppress RuntimeWarnings from dividing by zero
xy = np.array(coords)
x = xy[:, 0] - np.mean(xy[:, 0]) # norming points by x avg
y = xy[:, 1] - np.mean(xy[:, 1]) # norming points by y avg
# centroid = [np.mean(xy[:, 0]), np.mean(xy[:, 1])]
z = x * x + y * y
zmean = np.mean(z)
z0 = ((z - zmean) / (2. * np.sqrt(zmean))) # changed from using old_div to Python 3 native division
zxy = np.array([z0, x, y]).T
u, s, v = np.linalg.svd(zxy, full_matrices=False) #
v = v.transpose()
a = v[:, 2]
a[0] = (a[0]) / (2. * np.sqrt(zmean))
a = np.concatenate([a, [(-1. * zmean * a[0])]], axis=0)
# a, b = (-1 * a[1:3]) / a[0] / 2 + centroid
r = np.sqrt(a[1] * a[1] + a[2] * a[2] - 4 * a[0] * a[3]) / abs(a[0]) / 2
if np.isfinite(r):
curv = 1 / (r / resolution)
if curv >= 0.00001:
return curv
else:
return 0
else:
return 0
| 23,191
|
def periodic_general(box: Box,
fractional_coordinates: bool=True,
wrapped: bool=True) -> Space:
"""Periodic boundary conditions on a parallelepiped.
This function defines a simulation on a parallelepiped, $X$, formed by
applying an affine transformation, $T$, to the unit hypercube
$U = [0, 1]^d$ along with periodic boundary conditions across all
of the faces.
Formally, the space is defined such that $X = {Tu : u \in [0, 1]^d}$.
The affine transformation, $T$, can be specified in a number of different
ways. For a parallelepiped that is: 1) a cube of side length $L$, the affine
transformation can simply be a scalar; 2) an orthorhombic unit cell can be
specified by a vector `[Lx, Ly, Lz]` of lengths for each axis; 3) a general
triclinic cell can be specified by an upper triangular matrix.
There are a number of ways to parameterize a simulation on $X$.
`periodic_general` supports two parametrizations of $X$ that can be selected
using the `fractional_coordinates` keyword argument.
1) When `fractional_coordinates=True`, particle positions are stored in the
unit cube, $u\in U$. Here, the displacement function computes the
displacement between $x, y \in X$ as $d_X(x, y) = Td_U(u, v)$ where
$d_U$ is the displacement function on the unit cube, $U$, $x = Tu$, and
$v = Tv$ with $u, v\in U$. The derivative of the displacement function
is defined so that derivatives live in $X$ (as opposed to being
backpropagated to $U$). The shift function, `shift_fn(R, dR)` is defined
so that $R$ is expected to lie in $U$ while $dR$ should lie in $X$. This
combination enables code such as `shift_fn(R, force_fn(R))` to work as
intended.
2) When `fractional_coordinates=False`, particle positions are stored in
the parallelepiped $X$. Here, for $x, y\in X$, the displacement function
is defined as $d_X(x, y) = Td_U(T^{-1}x, T^{-1}y)$. Since there is an
extra multiplication by $T^{-1}$, this parameterization is typically
slower than `fractional_coordinates=False`. As in 1), the displacement
function is defined to compute derivatives in $X$. The shift function
is defined so that $R$ and $dR$ should both lie in $X$.
Example:
```python
from jax import random
side_length = 10.0
disp_frac, shift_frac = periodic_general(side_length,
fractional_coordinates=True)
disp_real, shift_real = periodic_general(side_length,
fractional_coordinates=False)
# Instantiate random positions in both parameterizations.
R_frac = random.uniform(random.PRNGKey(0), (4, 3))
R_real = side_length * R_frac
# Make some shfit vectors.
dR = random.normal(random.PRNGKey(0), (4, 3))
disp_real(R_real[0], R_real[1]) == disp_frac(R_frac[0], R_frac[1])
transform(side_length, shift_frac(R_frac, 1.0)) == shift_real(R_real, 1.0)
```
It is often desirable to deform a simulation cell either: using a finite
deformation during a simulation, or using an infinitesimal deformation while
computing elastic constants. To do this using fractional coordinates, we can
supply a new affine transformation as `displacement_fn(Ra, Rb, box=new_box)`.
When using real coordinates, we can specify positions in a space $X$ defined
by an affine transformation $T$ and compute displacements in a deformed space
$X'$ defined by an affine transformation $T'$. This is done by writing
`displacement_fn(Ra, Rb, new_box=new_box)`.
There are a few caveats when using `periodic_general`. `periodic_general`
uses the minimum image convention, and so it will fail for potentials whose
cutoff is longer than the half of the side-length of the box. It will also
fail to find the correct image when the box is too deformed. We hope to add a
more robust box for small simulations soon (TODO) along with better error
checking. In the meantime caution is recommended.
Args:
box: A `(spatial_dim, spatial_dim)` affine transformation.
fractional_coordinates: A boolean specifying whether positions are stored
in the parallelepiped or the unit cube.
wrapped: A boolean specifying whether or not particle positions are
remapped back into the box after each step
Returns:
(displacement_fn, shift_fn) tuple.
"""
inv_box = inverse(box)
def displacement_fn(Ra, Rb, **kwargs):
_box, _inv_box = box, inv_box
if 'box' in kwargs:
_box = kwargs['box']
if not fractional_coordinates:
_inv_box = inverse(_box)
if 'new_box' in kwargs:
_box = kwargs['new_box']
if not fractional_coordinates:
Ra = transform(_inv_box, Ra)
Rb = transform(_inv_box, Rb)
dR = periodic_displacement(f32(1.0), pairwise_displacement(Ra, Rb))
return transform(_box, dR)
def u(R, dR):
if wrapped:
return periodic_shift(f32(1.0), R, dR)
return R + dR
def shift_fn(R, dR, **kwargs):
if not fractional_coordinates and not wrapped:
return R + dR
_box, _inv_box = box, inv_box
if 'box' in kwargs:
_box = kwargs['box']
_inv_box = inverse(_box)
if 'new_box' in kwargs:
_box = kwargs['new_box']
dR = transform(_inv_box, dR)
if not fractional_coordinates:
R = transform(_inv_box, R)
R = u(R, dR)
if not fractional_coordinates:
R = transform(_box, R)
return R
return displacement_fn, shift_fn
| 23,192
|
def filter(
needle: Callable[[T], object],
haystack: Iterable[T],
parse: None = None,
*,
allow_mismatch: Literal[False] = False,
allow_many: bool = False,
allow_duplicates: bool = False,
) -> T:
"""
`haystack` is a sequence of T elements, and `needle` must accept `T` values. If `parse` is None, and allow_mismatch=False, we
return a T.
"""
| 23,193
|
def test_additive_hash_returns_correct_values(key, value):
"""Test that additive hash returns the correct values."""
from hash import additive_hash
assert additive_hash(key) == value
| 23,194
|
def calculate_mac(mac_type, credentials, options, url_encode=False):
"""Calculates a message authentication code (MAC)."""
normalized = normalize_string(mac_type, options)
digestmod = module_for_algorithm(credentials['algorithm'])
result = hmac.new(credentials['key'], normalized, digestmod)
if url_encode:
mac = urlsafe_b64encode(result.digest())
else:
mac = b64encode(result.digest())
return mac
| 23,195
|
def run():
"""
Test Case - Fbx mesh group Import scaling in Atom:
1. Creates a new level called MeshScalingTemporaryLevel
2. Has a list of 12 meshes, which it will do the following for each one:
- Create an entity and attach the mesh to it.
- Sets it with an initial offset of x:-15, y:0, z:0
- For each additional mesh the x offset is modified by +3.0
3. Enters game mode to take a screenshot for comparison, then exits game mode.
4. Prints general.log("FBX mesh group scaling test has completed.")
5. Exit the Editor and ends the test.
Tests will fail immediately if any of these log lines are found:
1. Trace::Assert
2. Trace::Error
3. Traceback (most recent call last):
:return: None
"""
def after_level_load():
"""Function to call after creating/opening a level to ensure it loads."""
# Give everything a second to initialize.
general.idle_enable(True)
general.update_viewport()
general.idle_wait(0.5) # half a second is more than enough for updating the viewport.
# Close out problematic windows, FPS meters, and anti-aliasing.
if general.is_helpers_shown(): # Turn off the helper gizmos if visible
general.toggle_helpers()
if general.is_pane_visible("Error Report"): # Close Error Report windows that block focus.
general.close_pane("Error Report")
if general.is_pane_visible("Error Log"): # Close Error Log windows that block focus.
general.close_pane("Error Log")
general.run_console("r_displayInfo=0")
general.run_console("r_antialiasingmode=0")
return True
# Create a new test level
test_level_name = 'MeshGroupingTemporaryLevel'
heightmap_resolution = 128
heightmap_meters_per_pixel = 1
terrain_texture_resolution = 128
use_terrain = False
# Return codes are ECreateLevelResult defined in CryEdit.h
return_code = general.create_level_no_prompt(
test_level_name, heightmap_resolution, heightmap_meters_per_pixel, terrain_texture_resolution, use_terrain)
if return_code == 1:
general.log(f"{test_level_name} level already exists")
elif return_code == 2:
general.log("Failed to create directory")
elif return_code == 3:
general.log("Directory length is too long")
elif return_code != 0:
general.log("Unknown error, failed to create level")
else:
general.log(f"{test_level_name} level created successfully")
after_level_load()
helper.init_idle()
helper.open_level(test_level_name)
general.idle_wait_frames(1)
# These are the meshes that are used to test FBX mesh import scaling.
meshes = [
"cube_group.azmodel",
"cube_parent.azmodel",
"cube_parent_plus_locator.azmodel",
"cube_parent_plus_locator_rotatez_90.azmodel",
"cube_parent__rotatez_90_locator.azmodel",
"cube_parent__scaley_2_locator.azmodel",
"cube_parent__transx_100_locator.azmodel"
]
# Initial offset values to iterate off of for mesh scaling of meshes.
offset = math.Vector3()
offset.x = -15.0
offset.y = 0.0
offset.z = 0.0
# For each mesh, create an entity and attach the mesh to it, then scale it using the values in offset.
meshIndex = 0
for mesh in meshes:
meshIndex = meshIndex + 1
offset.x += 3.0
entityName = "TestEntity{}".format(meshIndex)
helper_create_entity_with_mesh("dag_hierarchy/" + mesh, offset, entityName)
helper.enter_game_mode(["", ""])
# Example: how to capture a screenshot
general.set_viewport_size(1280, 720)
general.set_cvar_integer('r_DisplayInfo', 0)
general.idle_wait_frames(1)
ScreenshotHelper(general.idle_wait_frames).capture_screenshot_blocking(
"screenshot_atom_FBXMeshGroupImportScaling.dds")
helper.exit_game_mode(["", ""])
general.log("FBX mesh group scaling test has completed.")
helper.close_editor()
| 23,196
|
def get_prime(num_dict):
"""获取字典里所有的素数"""
prime_dict = {}
for key, value in num_dict.items():
if value:
prime_dict.update({key: key})
return prime_dict
| 23,197
|
def get_shared_keys(param_list):
"""
For the given list of parameter dictionaries, return a list of the dictionary
keys that appear in every parameter dictionary
>>> get_shared_keys([{'a':0, 'b':1, 'c':2, 'd':3}, {'a':0, 'b':1, 'c':3}, {'a':0, 'b':'beta'}])
['a', 'b']
>>> get_shared_keys([{'a':0, 'd':3}, {'a':0, 'b':1, 'c':2, 'd':3}, {'a':0, 'b':1, 'c':2}])
['a']
"""
if not param_list:
return
keys = set(param_list[0].keys())
for i in range(1, len(param_list)):
keys = keys.intersection(param_list[i].keys())
keys = list(keys)
keys.sort()
return keys
| 23,198
|
def labeledTest(*labels):
"""This decorator mark a class as an integrationTest
this is used in the test call for filtering integrationTest
and unittest.
We mark the difference by the usage of service dependency:
* An unittest can run without additional services.
* An integration test need additional services (such as
redis or postgres).
Usage:
@labeledTest("integration")
class FakeOutputTest(BaseApiTest):
pass
"""
def wrapper(cl):
cl._label = set(labels)
return cl
return wrapper
| 23,199
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.