content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def _make_rel_url_path(src, dst):
"""src is a file or dir which wants to adress dst relatively, calculate
the appropriate path to get from here to there."""
srcdir = os.path.abspath(src + "/..")
dst = os.path.abspath(dst)
# For future reference, I hate doing dir munging with string operations
# with a fiery passion, but pragmatism won out over making a lib.. .
common = os.path.commonprefix((srcdir, dst))
reldst = dst[len(common):]
srcdir = srcdir[len(common):]
newpath = re.sub(""".*?[/\\\]|.+$""", "../", srcdir) or "./"
newpath = newpath + reldst
newpath = newpath.replace("\\", "/")
newpath = newpath.replace("//", "/")
return newpath
| 20,900
|
def get_required_params(request, expected_params: list, type: str = 'POST') -> dict:
"""Gets the list of params from request, or returns None if ANY is missing.
:param request: The Request
:type request: flask.Request
:param expected_params: The list of expected parameters
:type expected_params: list
:param type: The request type, defaults to POST, can be GET to get query params.
:type type: str
:return: Dictorinary with parameters as keys and values as values
:rtype: dict
"""
res = {}
for param in expected_params:
if type == 'POST':
val = request.form.get(param)
elif type == 'GET':
val = request.args.get(param)
else:
val = None
if not val:
return None
res[param] = val
return res
| 20,901
|
def md_to_notebook(text):
"""Convert a Markdown text to a Jupyter notebook, using Pandoc"""
tmp_file = tempfile.NamedTemporaryFile(delete=False)
tmp_file.write(text.encode('utf-8'))
tmp_file.close()
pandoc(u'--from markdown --to ipynb -s --atx-headers --wrap=preserve --preserve-tabs', tmp_file.name, tmp_file.name)
with open(tmp_file.name, encoding='utf-8') as opened_file:
notebook = ipynb_reads(opened_file.read(), as_version=4)
os.unlink(tmp_file.name)
return notebook
| 20,902
|
def bot_send(msg, bot_id, broadcast):
"""
Send a message to a telegram user or group specified on chat_id
chat_id must be a number!
bot_id == bot_username
"""
if broadcast == True:
bot = telegram.Bot(token=config[bot_id]["bot_api_token"])
bot.sendMessage(chat_id=config[bot_id]["group_chat_id"], text=msg)
else:
print(msg)
return None
| 20,903
|
def output_data(path, df):
"""
This file removes unwanted columns and outputs the final dataset
:param path:
:param df:
:return:
"""
df.to_csv(path,index=False)
| 20,904
|
def dataset_source_xnat(bids_dir):
"""
Method to check if the data was downloaded from xnat
:param bids_dir: BIDS Directory
:return: True or False
"""
dataset_description_file = glob.glob(bids_dir + "/**/dataset_description.json", recursive = True)
if not os.path.exists(dataset_description_file[0]):
return False
else:
with open(dataset_description_file[0], 'r') as f:
json_contents = json.load(f)
if 'DatasetDOI' not in json_contents:
return False
elif not json_contents['DatasetDOI'].endswith('xnat'):
return False
return True
| 20,905
|
def split_dataset(dataset, num_train=1200):
"""
Split the dataset into a training and test set.
Args:
dataset: an iterable of Characters.
Returns:
A tuple (train, test) of Character sequences.
"""
all_data = list(dataset)
random.shuffle(all_data)
return all_data[:num_train], all_data[num_train:]
| 20,906
|
def get_constant():
"""
Keep learning rate constant
"""
def update(lr, epoch):
return lr
return update
| 20,907
|
def spatial_pack_nhwc(data, kernel, stride, padding, in_bits, weight_bits,
pack_dtype, out_dtype, dorefa=False):
""" Compute convolution with pack on spatial axes. """
assert data.shape[0].value == 1, "spatial pack convolution only support batch size=1"
data_q = bitpack(data, in_bits, pack_axis=3, bit_axis=4, pack_type=pack_dtype)
kernel_q = bitpack(kernel, weight_bits, pack_axis=2, bit_axis=4, pack_type=pack_dtype)
_, H, W, CI, IB = data_q.shape
KH, KW, _, CO, KB = kernel_q.shape
HPAD, WPAD, _, _ = get_pad_tuple(padding, kernel)
if isinstance(stride, (tuple, list)):
HSTR, WSTR = stride
else:
HSTR, WSTR = stride, stride
HCAT, WCAT = KH-1, KW-1
wkl = _get_workload(data, kernel, stride, padding, out_dtype, "NHWC")
sch = _get_schedule(wkl, "NHWC")
VH = sch.vh
VW = sch.vw
VC = sch.vc
PAD_H = H + 2*HPAD
PAD_W = W + 2*WPAD
OH = (H + 2*HPAD - KH) // HSTR + 1
OW = (W + 2*WPAD - KW) // WSTR + 1
dvshape = (1, PAD_H//(VH*HSTR), PAD_W//(VW*WSTR), VH*HSTR+HCAT, VW*WSTR+WCAT, CI, IB)
kvshape = (CO, KH, KW, CI, VC, KB)
ovshape = (1, OH, OW, CO, VH, VW, VC)
oshape = (1, OH, OW, CO)
if (HPAD != 0 and WPAD != 0):
data_pad = pad(data_q, (0, HPAD, WPAD, 0, 0), name="data_pad")
else:
data_pad = data_q
data_vec = tvm.compute(dvshape, lambda n, h, w, vh, vw, ci, b: \
data_pad[n][h*VH*HSTR+vh][w*VW*WSTR+vw][ci][b], name='data_vec')
kernel_vec = tvm.compute(kvshape, lambda co, dh, dw, ci, vc, b: \
kernel_q[dh][dw][ci][co*VC+vc][b], name='kernel_vec')
ci = tvm.reduce_axis((0, CI), name='ci')
dh = tvm.reduce_axis((0, KH), name='dh')
dw = tvm.reduce_axis((0, KW), name='dw')
b1 = tvm.reduce_axis((0, IB), name='ib')
b2 = tvm.reduce_axis((0, KB), name='kb')
def _conv(n, h, w, co, vh, vw, vc):
b1b2 = (b1+b2).astype(out_dtype)
if dorefa:
return tvm.sum(
(tvm.popcount(data_vec[n, h, w, vh*HSTR+dh, vw*WSTR+dw, ci, b1].astype(out_dtype) &
kernel_vec[co, dh, dw, ci, vc, b2].astype(out_dtype)) -
tvm.popcount(data_vec[n, h, w, vh*HSTR+dh, vw*WSTR+dw, ci, b1].astype(out_dtype) &
~kernel_vec[co, dh, dw, ci, vc, b2]).astype(out_dtype)) << b1b2,
axis=[dh, dw, ci, b1, b2])
return tvm.sum(tvm.popcount(
data_vec[n, h, w, vh*HSTR+dh, vw*WSTR+dw, ci, b1] &
kernel_vec[co, dh, dw, ci, vc, b2]).astype(out_dtype) << b1b2,
axis=[dh, dw, ci, b1, b2])
conv = tvm.compute(ovshape, _conv, name='conv')
return tvm.compute(oshape, lambda n, h, w, co:
conv[n][h//VH][w//VW][co//VC][h%VH][w%VW][co%VC],
name='output_unpack', tag='spatial_bitserial_conv_nhwc')
| 20,908
|
def load_random_tt_distribution(numAgents, r, pu, samples):
"""
Load a file with a population of random turn-taking values, assuming that it exists
Parameters:
* numAgents -- the desired number of probabilistic agents to include
* r -- the turn-taking resolution
* pu -- the probability that a bit in each usage attempt sequence will be 1
* samples -- the number of random turn-taking values to generate
See Section 3 of:
Raffensperger, P. A., Webb, R. Y., Bones, P. J., and McInnes, A. I. (2012).
A simple metric for turn-taking in emergent communication.
Adaptive Behavior, 20(2):104-116.
"""
filename = get_tt_distribution_filename(numAgents, r, pu, samples)
file = open(filename, 'r')
return pickle.load(file)
| 20,909
|
def fail_after(seconds: float) -> ContextManager[CancelScope]:
"""
Create a cancel scope with the given timeout, and raises an error if it is actually
cancelled.
This function and move_on_after() are similar in that both create a cancel scope
with a given timeout, and if the timeout expires then both will cause CancelledError
to be raised within the scope. The difference is that when the CancelledError
exception reaches move_on_after(), it’s caught and discarded. When it reaches
fail_after(), then it’s caught and TimeoutError is raised in its place.
"""
return fail_at(get_running_loop().time() + seconds)
| 20,910
|
def _get_dataset_names_mapping(
names: Union[str, Set[str], Dict[str, str]] = None
) -> Dict[str, str]:
"""Take a name or a collection of dataset names
and turn it into a mapping from the old dataset names to the provided ones if necessary.
Args:
names: A dataset name or collection of dataset names.
When str or Set[str] is provided, the listed names will stay
the same as they are named in the provided pipeline.
When Dict[str, str] is provided, current names will be
mapped to new names in the resultant pipeline.
Returns:
A dictionary that maps the old dataset names to the provided ones.
Examples:
>>> _get_dataset_names_mapping("dataset_name")
{"dataset_name": "dataset_name"} # a str name will stay the same
>>> _get_dataset_names_mapping(set(["ds_1", "ds_2"]))
{"ds_1": "ds_1", "ds_2": "ds_2"} # a Set[str] of names will stay the same
>>> _get_dataset_names_mapping({"ds_1": "new_ds_1_name"})
{"ds_1": "new_ds_1_name"} # a Dict[str, str] of names will map key to value
"""
if names is None:
return {}
if isinstance(names, str):
return {names: names}
if isinstance(names, dict):
return copy.deepcopy(names)
return {item: item for item in names}
| 20,911
|
def test_worker_serialise(worker):
"""Test serialiseation of the worker"""
worker_dict = worker.to_dict()
worker_dict['computer_id'] = 'remote'
worker2 = AiiDAFWorker.from_dict(worker_dict)
assert worker2.computer_id == 'remote'
worker_dict.pop("username")
worker2 = AiiDAFWorker.from_dict(worker_dict)
assert worker2.username == DEFAULT_USERNAME
| 20,912
|
def _json_keyify(args):
""" converts arguments into a deterministic key used for memoizing """
args = tuple(sorted(args.items(), key=lambda e: e[0]))
return json.dumps(args)
| 20,913
|
def tgsegsm_vect(time_in, data_in):
"""
Transform data from GSE to GSM.
Parameters
----------
time_in: list of float
Time array.
data_in: list of float
xgse, ygse, zgse cartesian GSE coordinates.
Returns
-------
xgsm: list of float
Cartesian GSM coordinates.
ygsm: list of float
Cartesian GSM coordinates.
zgsm: list of float
Cartesian GSM coordinates.
"""
xgsm, ygsm, zgsm = 0, 0, 0
d = np.array(data_in)
xgse, ygse, zgse = d[:, 0], d[:, 1], d[:, 2]
gd1, gd2, gd3 = cdipdir_vect(time_in)
gst, slong, sra, sdec, obliq = csundir_vect(time_in)
gs1 = np.cos(sra) * np.cos(sdec)
gs2 = np.sin(sra) * np.cos(sdec)
gs3 = np.sin(sdec)
sgst = np.sin(gst)
cgst = np.cos(gst)
ge1 = 0.0
ge2 = -np.sin(obliq)
ge3 = np.cos(obliq)
gm1 = gd1 * cgst - gd2 * sgst
gm2 = gd1 * sgst + gd2 * cgst
gm3 = gd3
gmgs1 = gm2 * gs3 - gm3 * gs2
gmgs2 = gm3 * gs1 - gm1 * gs3
gmgs3 = gm1 * gs2 - gm2 * gs1
rgmgs = np.sqrt(gmgs1**2 + gmgs2**2 + gmgs3**2)
cdze = (ge1 * gm1 + ge2 * gm2 + ge3 * gm3)/rgmgs
sdze = (ge1 * gmgs1 + ge2 * gmgs2 + ge3 * gmgs3)/rgmgs
xgsm = xgse
ygsm = cdze * ygse + sdze * zgse
zgsm = -sdze * ygse + cdze * zgse
return xgsm, ygsm, zgsm
| 20,914
|
async def gmake_(message: Message):
""" make folder """
await Worker(message).make_folder()
| 20,915
|
def test_xcorr_zscored():
"""
Test this function, which is not otherwise tested in the testing of the
EventRelatedAnalyzer
"""
cycles = 10
l = 1024
unit = 2 * np.pi / l
t = np.arange(0, 2 * np.pi + unit, unit)
signal = np.sin(cycles * t)
events = np.zeros(t.shape)
#Zero crossings:
idx = np.where(np.abs(signal) < 0.03)[0]
#An event occurs at the beginning of every cycle:
events[idx[:-2:2]] = 1
a = tsa.freq_domain_xcorr_zscored(signal, events, 1000, 1000)
npt.assert_almost_equal(np.mean(a), 0, 1)
npt.assert_almost_equal(np.std(a), 1, 1)
| 20,916
|
def real_main():
"""Main program without profiling.
"""
import django.core.handlers.wsgi
# Create a Django application for WSGI.
application = django.core.handlers.wsgi.WSGIHandler()
from soc.modules import callback
from soc.modules import core
callback.registerCore(core.Core())
callback.getCore().registerModuleCallbacks()
# Run the WSGI CGI handler with that application.
util.run_wsgi_app(application)
| 20,917
|
def choose_a_pick_naive(numbers_left):
"""
Choose any larger number
:param numbers_left:
:return:
"""
if numbers_left[0] > numbers_left[-1]:
return 0, numbers_left[0]
elif numbers_left[-1] > numbers_left[0]:
return -1, numbers_left[-1]
else:
return 0, numbers_left[0]
| 20,918
|
def get_group_names(exp_path, uniquechannel ='Ki_t', fname="trajectoriesDat.csv"):
"""Similar to get_grp_list, but uses trajectoriesDat column names"""
if "_combined" in exp_path:
pattern = 'trajectoriesDat_region'
grp_suffix = 'EMS'
files = os.listdir(exp_path)
trajectories = [x.replace(f'{pattern}_','') for x in files if pattern in x]
grp_numbers = [int(x.replace('.csv','')) for x in trajectories]
grp_numbers.sort()
grp_list = [f'{grp_suffix}-{str(x)}' for x in grp_numbers if x > 0]
if 0 in grp_numbers:
grp_list = grp_list + ['All']
else:
trajectories_cols = pd.read_csv(os.path.join(exp_path, fname), index_col=0,
nrows=0).columns.tolist()
cols = [col for col in trajectories_cols if uniquechannel in col]
if len(cols) != 0:
grp_list = [col.replace(f'{uniquechannel}_', '') for col in cols]
grp_suffix = grp_list[0][:3]
grp_numbers = [int(grp.replace('EMS-', '')) for grp in grp_list]
if len(cols) > 1:
grp_list = grp_list + ['All']
grp_numbers = grp_numbers + [0]
else:
grp_list = None
grp_suffix=None
grp_numbers = None
return grp_list, grp_suffix, grp_numbers
| 20,919
|
def serializer_roundtrip(serializer, obj):
"""Serializes an object to a file, then deserializes it and returns the result"""
@with_temporary_directory
def helper(tmp_dir, serializer, obj):
"""Helper function: takes care of creating and deleting the temp directory for the output"""
path = os.path.join(tmp_dir, 'out.txt')
with open(path, 'w') as f:
try:
json.dump(serializer.serialize(obj), f)
except ValueError as e:
print("test_serialization.serializer_roundtrip - invalid serialization:")
print(str(serializer.serialize(obj)))
raise e
with open(path, 'r') as f:
return serializer.deserialize(json.load(f))
return helper(serializer, obj)
| 20,920
|
def _Run(args, holder, target_https_proxy_arg, release_track):
"""Issues requests necessary to import target HTTPS proxies."""
client = holder.client
resources = holder.resources
target_https_proxy_ref = target_https_proxy_arg.ResolveAsResource(
args,
holder.resources,
default_scope=compute_scope.ScopeEnum.GLOBAL,
scope_lister=compute_flags.GetDefaultScopeLister(client))
data = console_io.ReadFromFileOrStdin(args.source or '-', binary=False)
try:
target_https_proxy = export_util.Import(
message_type=client.messages.TargetHttpsProxy,
stream=data,
schema_path=_GetSchemaPath(release_track))
except yaml_validator.ValidationError as e:
raise compute_exceptions.ValidationError(str(e))
# Get existing target HTTPS proxy.
try:
old_target_https_proxy = target_https_proxies_utils.SendGetRequest(
client, target_https_proxy_ref)
except apitools_exceptions.HttpError as error:
if error.status_code != 404:
raise error
# Target HTTPS proxy does not exist, create a new one.
return _SendInsertRequest(client, resources, target_https_proxy_ref,
target_https_proxy)
if old_target_https_proxy == target_https_proxy:
return
console_io.PromptContinue(
message=('Target Https Proxy [{0}] will be overwritten.').format(
target_https_proxy_ref.Name()),
cancel_on_no=True)
# Populate id and fingerprint fields. These two fields are manually
# removed from the schema files.
target_https_proxy.id = old_target_https_proxy.id
if hasattr(old_target_https_proxy, 'fingerprint'):
target_https_proxy.fingerprint = old_target_https_proxy.fingerprint
# Unspecified fields are assumed to be cleared.
cleared_fields = []
if target_https_proxy.description is None:
cleared_fields.append('description')
if target_https_proxy.serverTlsPolicy is None:
cleared_fields.append('serverTlsPolicy')
if target_https_proxy.authorizationPolicy is None:
cleared_fields.append('authorizationPolicy')
if hasattr(target_https_proxy,
'certificateMap') and target_https_proxy.certificateMap is None:
cleared_fields.append('certificateMap')
if hasattr(target_https_proxy,
'httpFilters') and not target_https_proxy.httpFilters:
cleared_fields.append('httpFilters')
if target_https_proxy.proxyBind is None:
cleared_fields.append('proxyBind')
if target_https_proxy.quicOverride is None:
cleared_fields.append('quicOverride')
if not target_https_proxy.sslCertificates:
cleared_fields.append('sslCertificates')
if target_https_proxy.sslPolicy is None:
cleared_fields.append('sslPolicy')
if target_https_proxy.urlMap is None:
cleared_fields.append('urlMap')
with client.apitools_client.IncludeFields(cleared_fields):
return _SendPatchRequest(client, resources, target_https_proxy_ref,
target_https_proxy)
| 20,921
|
def visualize_data(df):
"""
Takes in a Pandas Dataframe and then slices and dices it to create graphs
"""
fig, (ax1, ax2) = plt.subplots(nrows = 2, ncols = 1)
ax1.set_xlabel('epochs')
ax1.set_ylabel('validation accuracy')
ax2.set_xlabel('epochs')
ax2.set_ylabel('loss')
legend1 = ax1.legend(loc='upper center', shadow=True)
legend2 = ax2.legend(loc='upper center', shadow=True)
for i, group in df.groupby('network name'):
group.plot(x='epochs', y='validation accuracy', ax=ax1, label=i, marker='o', linewidth=2)
group.plot(x='epochs', y='loss', ax=ax2, label=i, marker='o', linewidth=2)
plt.show()
| 20,922
|
def get_mono_cell(locus_file, TotalSNPs, TotalBi_SNPs_used):
"""Determine value to add to [0,0] cell"""
TotalBP, Loci_count = totalbp(locus_file)
return int((TotalBi_SNPs_used * TotalBP) / TotalSNPs) - TotalBi_SNPs_used, \
TotalBP, Loci_count
| 20,923
|
def remove_bad_particles(st, min_rad='calc', max_rad='calc', min_edge_dist=2.0,
check_rad_cutoff=[3.5, 15], check_outside_im=True,
tries=50, im_change_frac=0.2, **kwargs):
"""
Removes improperly-featured particles from the state, based on a
combination of particle size and the change in error on removal.
Parameters
-----------
st : :class:`peri.states.State`
The state to remove bad particles from.
min_rad : Float, optional
All particles with radius below min_rad are automatically deleted.
Set to 'calc' to make it the median rad - 25* radius std.
Default is 'calc'.
max_rad : Float, optional
All particles with radius above max_rad are automatically deleted.
Set to 'calc' to make it the median rad + 15* radius std.
Default is 'calc'.
min_edge_dist : Float, optional
All particles within min_edge_dist of the (padded) image
edges are automatically deleted. Default is 2.0
check_rad_cutoff : 2-element list of floats, optional
Particles with radii < check_rad_cutoff[0] or > check_rad_cutoff[1]
are checked if they should be deleted. Set to 'calc' to make it the
median rad +- 3.5 * radius std. Default is [3.5, 15].
check_outside_im : Bool, optional
If True, checks if particles located outside the unpadded image
should be deleted. Default is True.
tries : Int, optional
The maximum number of particles with radii < check_rad_cutoff
to try to remove. Checks in increasing order of radius size.
Default is 50.
im_change_frac : Float, , optional
Number between 0 and 1. If removing a particle decreases the
error by less than im_change_frac*the change in the image, then
the particle is deleted. Default is 0.2
Returns
-------
removed: Int
The cumulative number of particles removed.
"""
is_near_im_edge = lambda pos, pad: (((pos + st.pad) < pad) | (pos >
np.array(st.ishape.shape) + st.pad - pad)).any(axis=1)
# returns True if the position is within 'pad' of the _outer_ image edge
removed = 0
attempts = 0
n_tot_part = st.obj_get_positions().shape[0]
q10 = int(0.1 * n_tot_part) # 10% quartile
r_sig = np.sort(st.obj_get_radii())[q10:-q10].std()
r_med = np.median(st.obj_get_radii())
if max_rad == 'calc':
max_rad = r_med + 15*r_sig
if min_rad == 'calc':
min_rad = r_med - 25*r_sig
if check_rad_cutoff == 'calc':
check_rad_cutoff = [r_med - 7.5*r_sig, r_med + 7.5*r_sig]
# 1. Automatic deletion:
rad_wrong_size = np.nonzero(
(st.obj_get_radii() < min_rad) | (st.obj_get_radii() > max_rad))[0]
near_im_edge = np.nonzero(is_near_im_edge(st.obj_get_positions(),
min_edge_dist - st.pad))[0]
delete_inds = np.unique(np.append(rad_wrong_size, near_im_edge)).tolist()
delete_poses = st.obj_get_positions()[delete_inds].tolist()
message = ('-'*27 + 'SUBTRACTING' + '-'*28 +
'\n Z\t Y\t X\t R\t|\t ERR0\t\t ERR1')
with log.noformat():
CLOG.info(message)
for pos in delete_poses:
ind = st.obj_closest_particle(pos)
old_err = st.error
p, r = st.obj_remove_particle(ind)
p = p[0]
r = r[0]
part_msg = '%2.2f\t%3.2f\t%3.2f\t%3.2f\t|\t%4.3f \t%4.3f' % (
tuple(p) + (r,) + (old_err, st.error))
with log.noformat():
CLOG.info(part_msg)
removed += 1
# 2. Conditional deletion:
check_rad_inds = np.nonzero((st.obj_get_radii() < check_rad_cutoff[0]) |
(st.obj_get_radii() > check_rad_cutoff[1]))[0]
if check_outside_im:
check_edge_inds = np.nonzero(
is_near_im_edge(st.obj_get_positions(), st.pad))[0]
check_inds = np.unique(np.append(check_rad_inds, check_edge_inds))
else:
check_inds = check_rad_inds
check_inds = check_inds[np.argsort(st.obj_get_radii()[check_inds])]
tries = np.min([tries, check_inds.size])
check_poses = st.obj_get_positions()[check_inds[:tries]].copy()
for pos in check_poses:
old_err = st.error
ind = st.obj_closest_particle(pos)
killed, p, r = check_remove_particle(
st, ind, im_change_frac=im_change_frac)
if killed:
removed += 1
check_inds[check_inds > ind] -= 1 # cleaning up indices....
delete_poses.append(pos)
part_msg = '%2.2f\t%3.2f\t%3.2f\t%3.2f\t|\t%4.3f \t%4.3f' % (
p + r + (old_err, st.error))
with log.noformat():
CLOG.info(part_msg)
return removed, delete_poses
| 20,924
|
def _pyside_import_module(moduleName):
""" The import for PySide
"""
pyside = __import__('PySide', globals(), locals(), [moduleName], -1)
return getattr(pyside, moduleName)
| 20,925
|
def get_models(args):
"""
:param args: argparse.Namespace
commandline arguments
:return: dict of BaseReport
"""
models = dict()
if os.path.isfile(args.cm_input):
models[args.cm_input] = CheckmarxReport
if os.path.isfile(args.sn_input):
models[args.sn_input] = SnykReport
return models
| 20,926
|
def sanitize_str(value: str) -> str:
"""Removes Unicode control (Cc) characters EXCEPT for tabs (\t), newlines (\n only), line separators (U+2028) and paragraph separators (U+2029)."""
return "".join(ch for ch in value if unicodedata.category(ch) != 'Cc' and ch not in {'\t', '\n', '\u2028', '\u2029'})
| 20,927
|
def p_assign_semicolon(p):
"""assign : assignation ';'"""
p[0] = p[1]
| 20,928
|
def get_tariff_estimated(reporter,
partner='000',
product='all',
year=world_trade_data.defaults.DEFAULT_YEAR,
name_or_id='name'):
"""Tariffs (estimated)"""
return _get_data(reporter, partner, product, year,
datatype='aveestimated', datasource='trn', name_or_id=name_or_id)
| 20,929
|
def task_result_api_view(request, taskid):
"""
Get task `state` and `result` from API endpoint.
Use case: you want to provide to some user with async feedback about
about status of some task.
Example:
# urls.py
urlpatterns = [
url(r'^api/task/result/(.+)/', task_result_api_view),
...
]
# some_views.py
context = {}
# ...
async_request = some_important_task.delay(...)
# ...
context['async_task_id'] = str(async_request.id)
Now we can check the state and result form Front-end side.
"""
result = AsyncResult(taskid)
response = {'task-id': taskid, 'state': result.state}
response.update({'result': _safe_result(result.result)})
return JsonResponse(response)
| 20,930
|
async def test_8():
"""Test query POST endpoint.
Send a query with missing required params. Expect a bad request (400).
"""
LOG.debug('Test post query (missing params)')
error_text = "Provided input: '{'start': 9, 'referenceBases': 'T', 'alternateBases': 'C', 'assemblyId': 'GRCh38', 'includeDatasetResponses': 'HIT'}' \
does not seem correct because: ''referenceName' is a required property'"
payload = {"start": 9,
"referenceBases": "T",
"alternateBases": "C",
"assemblyId": "GRCh38",
"includeDatasetResponses": "HIT"}
async with aiohttp.ClientSession() as session:
async with session.post('http://localhost:5050/query', data=json.dumps(payload)) as resp:
data = await resp.json()
if 'error' in data and len(data['error']) > 0:
assert resp.status == 400, 'HTTP Status code error'
assert data['error']['errorCode'] == 400, 'HTTP Status code error'
assert data['error']['errorMessage'] == error_text
else:
sys.exit('Query POST Endpoint Error!')
| 20,931
|
def get_horizon_coordinates(fp_pointings_spherical):
"""
It converts from spherical to Horizon coordinates, with the conventions:
Altitute = np.pi / 2 - zenith angle (theta)
Azimuth = 2 * np.pi - phi
Parameters
----------
fp_pointings_spherical : numpy array of shape (..., 2), radians
They are the spherical coordinates (theta, phi) that will be converted.
Returns
-------
out : numpy array of shape (..., ), numpy array of shape (..., )
"""
Alt = np.pi/2 - fp_pointings_spherical[..., 0] #rad
Az = 2 * np.pi - fp_pointings_spherical[..., 1] #rad
return Alt, Az
| 20,932
|
def add_todo(username, password, title, description):
""" To add a todo task -- requires title and description of the task """
userpass = username + password
dt = datetime.now().strftime("%m/%d/%Y %H:%M:%S")
add_message(userpass, dt, title, description)
result = check_user(username, password)
if(len(result) == 0):
click.secho('Username or password is wrong.', fg='red')
click.secho("Type 'main.py register' if not registered yet.", fg='yellow')
else:
click.secho('To-Do task added', fg='yellow')
click.secho("Type 'main.py view-todos' to view all To-do's", fg='blue')
| 20,933
|
def fire_receiver_type_metric(sender, instance, created, **kwargs):
"""Fires a metric for each receiver message type of each subscription."""
from .tasks import fire_metric, is_valid_msg_receiver
if (created and instance.data and instance.data['msg_receiver'] and
is_valid_msg_receiver(instance.data['msg_receiver'])):
msg_receiver = instance.data['msg_receiver']
fire_metric.apply_async(kwargs={
"metric_name": 'registrations.receiver_type.%s.sum' % msg_receiver,
"metric_value": 1.0,
})
total_key = 'registrations.receiver_type.%s.total.last' % msg_receiver
total = get_or_incr_cache(
total_key,
Registration.objects.filter(data__msg_receiver=msg_receiver).count)
fire_metric.apply_async(kwargs={
'metric_name': total_key,
'metric_value': total,
})
| 20,934
|
def indexed_chunking_random_test(f_list=indexed_chunking_f_list,
x=None,
return_debug_info=False,
verbose=0):
"""made it so you can just run a function (several times) to test, but if you want to see print outs use verbose=1,
and if you want to get a bunch of variables that will then allow you to diagnose things,
specify return_debug_info=True"""
if x is None:
x = randint(10, 1000)
if isinstance(x, int):
n_pts = x
x = sorted(randint(1, 100000, n_pts))
assert sorted(x) == x, "x is not sorted!"
kwargs = random_kwargs_for_list(x)
if verbose:
print(("x: {} elements. min: {}, max: {}".format(len(x), x[0], x[-1])))
t = {k: v for k, v in kwargs.items() if k != 'key'}
if verbose:
print(("kwargs: {}\n".format(json.dumps(t, indent=2))))
b = list(f_list[0](iter(x), **kwargs))
bb = None
all_good = True
idx_where_different = array([])
for i, f in enumerate(f_list[1:], 1):
bb = list(f(iter(x), **kwargs))
all_good = True
if len(b) != len(bb):
all_good &= False
if verbose:
print(("{}: Not the same length! Base had {} elements, comp has {}".format(
i, len(b), len(bb))))
idx_where_different = where([x[0] != x[1] for x in zip(b, bb)])[0]
if len(idx_where_different) > 0:
all_good &= False
if verbose:
print(("{} values where different".format(len(idx_where_different))))
if not all_good:
if verbose:
print("STOPPING HERE: Check the variables for diagnosis")
break
print("")
if all_good:
if verbose:
print("All good!")
if return_debug_info:
return all_good, idx_where_different, x, b, bb, kwargs
else:
return all_good
| 20,935
|
def patch_object_type() -> None:
"""
Patches `graphene.ObjectType` to make it indexable at runttime. This is necessary for it be
generic at typechecking time.
"""
# Lazily import graphene as it is actually an expensive thing to do and we don't want to slow down things at
# type-checking time.
from graphene import ObjectType # pylint: disable=import-outside-toplevel
ObjectTypeMetaclass = type(ObjectType)
def __getitem__(cls: TypeOf[TypeOf[ObjectType]], _: Any) -> TypeOf[TypeOf[ObjectType]]:
return cls
ObjectTypeMetaclass.__getitem__ = __getitem__
| 20,936
|
def GetFiles(dir, dirname):
"""Given a directory and the dirname of the directory, recursively
traverse the directory and return a list of tuples containing
(filename, relative filename), where 'relative filename' is
generated using GetZipPath.
"""
files = []
for (dirpath, dirnames, filenames) in os.walk(dir, True):
# skip emacs backup files
files.extend([os.path.join(dirpath, f) for f in filenames
if not f.endswith("~")])
# skip CVS and .svn dirs
# funky slice assignment here
dirnames[:] = [d for d in dirnames if d != 'CVS' and d != '.svn']
return [(f, GetZipPath(f, dir, dirname)) for f in files]
| 20,937
|
def date_format(time_obj=time, fmt='%Y-%m-%d %H:%M:%S') -> str:
"""
时间转字符串
:param time_obj:
:param fmt:
:return:
"""
_tm = time_obj.time()
_t = time.localtime(_tm)
return time.strftime(fmt, _t)
| 20,938
|
async def delete(
id: str,
requester_id: str = Depends(with_user_id),
permission: str = Depends(
requires_permission(Permission.Participant, Permission.Director)
),
db: AsyncSession = Depends(with_db),
) -> None:
"""
Deletes an application by id
"""
if Permission.Participant.matches(permission) and id != requester_id:
raise HTTPException(
status_code=HTTPStatus.FORBIDDEN, detail="invalid permissions"
)
application = await db.get(Application, id)
if application:
await db.delete(application)
await db.commit()
| 20,939
|
def clean_pin_cite(pin_cite: Optional[str]) -> Optional[str]:
"""Strip spaces and commas from pin_cite, if it is not None."""
if pin_cite is None:
return pin_cite
return pin_cite.strip(", ")
| 20,940
|
def instrument_code_to_name(rwc_instrument_code):
"""Use the rwc_instrument_map.json to convert an rwc_instrument_code
to its instrument name.
Parameters
----------
rwc_instrument_code : str
Two character instrument code
Returns
-------
instrument_name : str
Full instrument name, if it exists, else None
"""
code = RWC_INSTRUMENT_MAP.get(rwc_instrument_code, None)
return code if code else None
| 20,941
|
def record_time(ad, fallback_to_launch=True):
"""
RecordTime falls back to launch time as last-resort and for jobs in the queue
For Completed/Removed/Error jobs, try to update it:
- to CompletionDate if present
- else to EnteredCurrentStatus if present
- else fall back to launch time
"""
if ad["JobStatus"] in [3, 4, 6]:
if ad.get("CompletionDate", 0) > 0:
return ad["CompletionDate"]
elif ad.get("EnteredCurrentStatus", 0) > 0:
return ad["EnteredCurrentStatus"]
if fallback_to_launch:
return _LAUNCH_TIME
return 0
| 20,942
|
def getTeamCompatibility(mentor, team):
"""
Gets a "compatibility score" between a mentor and a team (used as the weight in the later optimization problem)
Uses the functions defined above to compute different aspects of the score
"""
score = 0
# find value from overlapping availabilities
# value may differ depending on transportation type used, so try them all
bestOverlap = -noOverlapCost # baseline to beat is no overlap at all
for transitType in range(numTypesTransit):
# check if this transit type is better than previous best and update if needed
bestOverlap = max(bestOverlap, getSingleOverlapValue(mentor, team, transitType))
score += bestOverlap
# find value from team type matches
score += getTeamTypeValue(mentor, team)
# find value from team requests / requirements
score += getTeamRequestedValue(mentor, team)
return score
| 20,943
|
def mean_edges(graph, feat, weight=None):
"""Averages all the values of edge field :attr:`feat` in :attr:`graph`,
optionally multiplies the field by a scalar edge field :attr:`weight`.
Parameters
----------
graph : DGLGraph
The graph.
feat : str
The feature field.
weight : optional, str
The weight field. If None, no weighting will be performed,
otherwise, weight each edge feature with field :attr:`feat`.
for calculating mean. The weight feature associated in the :attr:`graph`
should be a tensor of shape ``[graph.number_of_edges(), 1]``.
Returns
-------
tensor
The averaged tensor.
Notes
-----
Return a stacked tensor with an extra first dimension whose size equals
batch size of the input graph.
The i-th row of the stacked tensor contains the readout result of
the i-th graph in the batched graph. If a graph has no edges,
a zero tensor with the same shape is returned at the corresponding row.
Examples
--------
>>> import dgl
>>> import torch as th
Create two :class:`~dgl.DGLGraph` objects and initialize their
edge features.
>>> g1 = dgl.DGLGraph() # Graph 1
>>> g1.add_nodes(2)
>>> g1.add_edges([0, 1], [1, 0])
>>> g1.edata['h'] = th.tensor([[1.], [2.]])
>>> g1.edata['w'] = th.tensor([[3.], [6.]])
>>> g2 = dgl.DGLGraph() # Graph 2
>>> g2.add_nodes(3)
>>> g2.add_edges([0, 1, 2], [1, 2, 0])
>>> g2.edata['h'] = th.tensor([[1.], [2.], [3.]])
Average over edge attribute :attr:`h` without weighting for each graph in a
batched graph.
>>> bg = dgl.batch([g1, g2], edge_attrs='h')
>>> dgl.mean_edges(bg, 'h')
tensor([[1.5000], # (1 + 2) / 2
[2.0000]]) # (1 + 2 + 3) / 3
Sum edge attribute :attr:`h` with normalized weight from edge attribute :attr:`w`
for a single graph.
>>> dgl.mean_edges(g1, 'h', 'w') # h1 * (w1 / (w1 + w2)) + h2 * (w2 / (w1 + w2))
tensor([[1.6667]]) # 1 * (3 / (3 + 6)) + 2 * (6 / (3 + 6))
See Also
--------
sum_nodes
mean_nodes
sum_edges
"""
return _mean_on(graph, 'edges', feat, weight)
| 20,944
|
async def update_config_file(config: ConfigDTO, reboot_processor: Optional[bool] = True):
"""
Overwrites the configuration used by the processor.
"""
config_dict = map_to_file_format(config)
success = update_config(config_dict, reboot_processor)
if not success:
return handle_response(config_dict, success)
return map_config(extract_config(), "")
| 20,945
|
def integration_session(scope="session"):
"""
creates a Session object which will persist over the entire test run ("session").
http connections will be reused (higher performance, less resource usage)
Returns a Session object
"""
s = requests.sessions.Session()
s.headers.update(test_headers)
return s
| 20,946
|
def get_json_dump(json_object, indent=4, sort_keys=False):
""" Short handle to get a pretty printed str from a JSON object. """
return json.dumps(json_object, indent=indent, sort_keys=sort_keys)
| 20,947
|
def number_of_real_roots(f, *gens, **args):
"""Returns the number of distinct real roots of `f` in `(inf, sup]`.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> from sympy.polys.polyroots import number_of_real_roots
>>> f = Poly(x**2 - 1, x)
Count real roots in the (-oo, oo) interval:
>>> number_of_real_roots(f)
2
Count real roots in the (0, 2) interval:
>>> number_of_real_roots(f, inf=0, sup=2)
1
Count real roots in the (2, oo) interval:
>>> number_of_real_roots(f, inf=2)
0
References
==========
.. [Davenport88] J.H. Davenport, Y. Siret, E. Tournier, Computer
Algebra Systems and Algorithms for Algebraic Computation,
Academic Press, London, 1988, pp. 124-128
"""
def sign_changes(seq):
count = 0
for i in xrange(1, len(seq)):
if (seq[i-1] < 0 and seq[i] >= 0) or \
(seq[i-1] > 0 and seq[i] <= 0):
count += 1
return count
F = Poly(f, *gens, **args)
if not F.is_Poly:
return 0
if F.is_multivariate:
raise ValueError('multivariate polynomials not supported')
if F.degree() < 1:
return 0
inf = args.get('inf', None)
if inf is not None:
inf = sympify(inf)
if not inf.is_number:
raise ValueError("Not a number: %s" % inf)
elif abs(inf) is S.Infinity:
inf = None
sup = args.get('sup', None)
if sup is not None:
sup = sympify(sup)
if not sup.is_number:
raise ValueError("Not a number: %s" % sup)
elif abs(sup) is S.Infinity:
sup = None
sturm = F.sturm()
if inf is None:
signs_inf = sign_changes([ s.LC()*(-1)**s.degree() for s in sturm ])
else:
signs_inf = sign_changes([ s.eval(inf) for s in sturm ])
if sup is None:
signs_sup = sign_changes([ s.LC() for s in sturm ])
else:
signs_sup = sign_changes([ s.eval(sup) for s in sturm ])
return abs(signs_inf - signs_sup)
| 20,948
|
def test_ui_data_in_record(
app, client, minimal_record, headers, ui_headers):
"""Publish a record and check that it contains the UI data."""
recid = _create_and_publish(client, minimal_record, headers)
BibliographicRecord.index.refresh()
# Check if list results contain UI data
response = client.get(
'/records', query_string={'q': f'id:{recid}'}, headers=ui_headers)
assert response.json['hits']['hits'][0]['ui']
# Check if item results contain UI data
response = client.get(f'/records/{recid}', headers=ui_headers)
assert response.json['ui']
| 20,949
|
def heap_sort(arr: list):
"""
Heap sorting a list. Big-O: O(n log n).
@see https://www.geeksforgeeks.org/heap-sort/
"""
def heapify(sub: list, rdx: int, siz: int):
"""
Heapifying range between rdx and size ([rdx:siz]).
@param sub: a slice of list.
@param rdx: root/parent index to start.
@param siz: size of heap.
"""
largest = ndx = rdx # assuming the root is the largest
while ndx < siz:
l_index = 2 * ndx + 1 # child index at left = 2*i + 1
r_index = 2 * ndx + 2 # child index at right = 2*i + 2
# reset largest index if left child exists and is greater than root.
if l_index < siz and sub[ndx] < sub[l_index]:
largest = l_index
# check if right child is greater than the value at the largest index.
if r_index < siz and sub[largest] < sub[r_index]:
largest = r_index
# change root, if needed
if largest != ndx:
sub[ndx], sub[largest] = sub[largest], sub[ndx] # swap
ndx = largest # heapify the root.
continue
return
pass
n = len(arr)
# build a max heap.
parent = n // 2 - 1 # the last parent (that can have children)
for i in range(parent, -1, -1):
heapify(arr, i, n)
# extract elements one by one.
for i in range(n-1, 0, -1):
arr[i], arr[0] = arr[0], arr[i] # swap
heapify(arr, 0, i)
return arr
| 20,950
|
def find_max_path(triangle):
"""
Find maximum-sum path from top of triangle to bottom
"""
# Start by copying the values
sums = [[x for x in row] for row in triangle]
# Efficient algorithm: start at the bottom and work our way up, computing max sums
for reverse_index, row in enumerate(reversed(sums)):
if reverse_index == 0:
# Easy: max value for subpaths from last row is cell value itself
continue
# Now we need to take sum of each cell and max of two subpaths
row_below = sums[-reverse_index]
for col_index, col in enumerate(row):
left = row_below[col_index]
right = row_below[col_index + 1]
row[col_index] = col + max(left, right)
return sums[0][0]
| 20,951
|
def plot_partregress(results, exog_idx=None, xnames=None, grid=None, fig=None):
"""Plot partial regression for a set of regressors.
Parameters
----------
results : results instance
A regression model results instance
exog_idx : None or list of int
(column) indices of the exog used in the plot, default is all.
xnames : None or list of strings
Names for the numbers given in exog_idx. Default is
results.model.exog_names.
grid : None or tuple of int (nrows, ncols)
If grid is given, then it is used for the arrangement of the subplots.
If grid is None, then ncol is one, if there are only 2 subplots, and
the number of columns is two otherwise.
fig : Matplotlib figure instance, optional
If given, this figure is simply returned. Otherwise a new figure is
created.
Returns
-------
fig : Matplotlib figure instance
If `fig` is None, the created figure. Otherwise `fig` itself.
Notes
-----
A subplot is created for each explanatory variable given by exog_idx.
The partial regression plot shows the relationship between the response
and the given explanatory variable after removing the effect of all other
explanatory variables in exog.
See Also
--------
plot_partregress_ax : Plot partial regression for a single regressor.
plot_ccpr
References
----------
See http://www.itl.nist.gov/div898/software/dataplot/refman1/auxillar/partregr.htm
"""
fig = utils.create_mpl_fig(fig)
#maybe add option for using wendog, wexog instead
y = results.model.endog
exog = results.model.exog
k_vars = exog.shape[1]
#this function doesn't make sense if k_vars=1
if xnames is None:
exog_idx = range(k_vars)
xnames = results.model.exog_names
else:
exog_idx = []
for name in xnames:
exog_idx.append(results.model.exog_names.index(name))
if not grid is None:
nrows, ncols = grid
else:
if len(exog_idx) > 2:
nrows = int(np.ceil(len(exog_idx)/2.))
ncols = 2
title_fontsize = 'small'
else:
nrows = len(exog_idx)
ncols = 1
title_fontsize = None
for i,idx in enumerate(exog_idx):
others = range(k_vars)
others.pop(idx)
exog_others = exog[:, others]
ax = fig.add_subplot(nrows, ncols, i+1)
plot_partregress_ax(y, exog[:, idx], exog_others, ax=ax,
varname=xnames[i])
return fig
| 20,952
|
def ufloats_overlap_range(ufloats, vmin, vmax):
"""Return whether the +/- 1 sigma range overlaps the value range."""
vals = []
sigmas = []
for val in ufloats:
if isinstance(val, float):
vals.append(val)
sigmas.append(0)
else:
vals.append(val.nominal_value)
sigmas.append(val.std_dev)
vals = np.array(vals)
sigmas = np.array(sigmas)
return ((vals - sigmas <= vmax) | (vals + sigmas >= vmin)).all()
| 20,953
|
def save_volume(filename, volume, dtype='float32', overwrite_file=True):
"""
Save volumetric data that is a
`Nibabel SpatialImage <http://nipy.org/nibabel/reference/nibabel.spatialimages.html#nibabel.spatialimages.SpatialImage>`_
to a file
Parameters
----------
filename: str
Full path and filename under which volume should be saved. The
extension determines the file format (must be supported by Nibabel)
volume: Nibabel SpatialImage
Volumetric data to be saved
dtype: str, optional
Datatype in which volumetric data should be stored (default is float32)
overwrite_file: bool, optional
Overwrite existing files (default is True)
Notes
----------
Originally created as part of Laminar Python [1]_ .
References
-----------
.. [1] Huntenburg et al. (2017), Laminar Python: Tools for cortical
depth-resolved analysis of high-resolution brain imaging data in
Python. DOI: 10.3897/rio.3.e12346
""" # noqa
import os
if dtype is not None:
volume.set_data_dtype(dtype)
if os.path.isfile(filename) and overwrite_file is False:
print("\nThis file exists and overwrite_file was set to False, "
"file not saved.")
else:
try:
volume.to_filename(filename)
print("\nSaving {0}").format(filename)
except AttributeError:
print('\nInput volume must be a Nibabel SpatialImage.')
| 20,954
|
def exact_qaoa_values_on_grid(
graph: nx.Graph,
xlim: Tuple[float, float] = (0, np.pi / 2),
ylim: Tuple[float, float] = (-np.pi / 4, np.pi / 4),
x_grid_num: int = 20,
y_grid_num: int = 20,
num_processors: int = 1,
dtype=np.complex128):
"""Compute exact p=1 QAOA values on a grid.
Args:
graph: The graph representing the Hamiltonian.
xlim: The range of values for gamma.
ylim: The range of values for beta.
num: The number of points in a single dimension of the grid.
The total number of points evaluated will be num^2.
Returns:
A 2-dimensional Numpy array containing the QAOA values.
The rows index the betas and the columns index the gammas.
"""
a, b = xlim
c, d = ylim
gammas = np.linspace(a, b, x_grid_num)
betas = np.linspace(c, d, y_grid_num)
HamC = create_ZZ_HamC(graph, dtype=dtype)
N = graph.number_of_nodes()
with multiprocessing.Pool(num_processors) as pool:
vals = pool.starmap(_ising_qaoa_expectation,
[(N, HamC, x, True, dtype)
for x in itertools.product(gammas, betas)])
return np.reshape(np.array(vals), (x_grid_num, y_grid_num)).T
| 20,955
|
async def on_command(command, ctx):
"""when a command happens it logs it"""
bot.commands_used[command.name] += 1
message = ctx.message
destination = None
if message.channel.is_private:
destination = 'Private Message'
else:
destination = '#{0.channel.name} ({0.server.name})'.format(message)
log.info('{0.timestamp}: {0.author.name} in {1}: {0.content}'.format(message, destination))
| 20,956
|
def typecheck_eq(expr, ctxt=[]):
"""(par (A) (= A A Bool :chainable))
(par (A) (distinct A A Bool :pairwise))
"""
typ = typecheck_expr(expr.subterms[0], ctxt)
for term in expr.subterms[1:]:
t = typecheck_expr(term, ctxt)
if t != typ:
if not (is_subtype(t, typ) or is_subtype(typ, t)):
raise TypeCheckError(expr, term, typ, t)
return BOOLEAN_TYPE
| 20,957
|
def precision(x, for_sum=False):
"""
This function returns the precision of a given datatype using a comporable numpy array
"""
if not for_sum:
return np.finfo(x.dtype).eps
else:
return np.finfo(x.dtype).eps * x.size
| 20,958
|
def parse_line(line, line_count, retries):
"""Coordinate retrieval of scientific name or taxonomy ID.
Read line from input file, calling functions as appropriate to retrieve
scientific name or taxonomy ID.
:param line: str, line from input file
:line_count: number of line in input file - enable tracking if error occurs
:param retries: parser argument, maximum number of retries excepted if network error encountered
Return list of genus, species and taxonomy ID """
line_data = []
# For taxonomy ID retrieve scientific name
if line.startswith("NCBI:txid"):
gs_name = get_genus_species_name(line[9:], line_count, retries)
line_data = gs_name.split(" ", 1)
line_data.append(line)
# For scientific name retrieve taxonomy ID
else:
tax_id = get_tax_id(line, line_count, retries)
line_data = line.split()
line_data.append(tax_id)
return line_data
| 20,959
|
def prop_rotate(old_image, theta, **kwargs):
"""Rotate and shift an image via interpolation (bilinear by default)
Parameters
----------
old_image : numpy ndarray
Image to be rotated
theta : float
Angle to rotate image in degrees counter-clockwise
Returns
-------
new_image : numpy ndarray
Returns rotated & shifted image with the same dimensions as the input image
Other Parameteres
-----------------
XC, YC : float
Center of rotation in image pixels; (0,0) is at center of first pixel;
if not specified, the center of the image is assumed to be the center
of rotation
XSHIFT, YSHIFT : float
Amount to shift rotated image in pixels
MISSING : float
Value to set extrapolated pixels.
"""
if old_image.dtype == np.dtype("complex128") or old_image.dtype == np.dtype("complex64"):
is_complex = 1
else:
is_complex = 0
new_image = np.copy(old_image)
if proper.use_cubic_conv:
n = old_image.shape[0]
if not "XC" in kwargs:
XC = int(n / 2)
if not "YC" in kwargs:
YC = int(n / 2)
if not "XSHIFT" in kwargs:
xshift = 0.
if not "YSHIFT" in kwargs:
yshift = 0.
if not "MISSING" in kwargs:
missing = 0.
t = -theta * np.pi / 180.
x0 = np.arange(n, dtype = np.float64) - XC - xshift
for j in range(n):
y0 = j - YC - yshift
xi = x0 * np.cos(t) - y0 * np.sin(t) + YC
yi = x0 * np.sin(t) + y0 * np.cos(t) + XC
new_image[j,:] = proper.prop_cubic_conv(old_image, xi, yi, GRID = False)
else:
theta = -1. * theta
if is_complex:
new_image.real = rotate(old_image.real, theta, reshape = False, prefilter = True)
new_image.imag = rotate(old_image.imag, theta, reshape = False, prefilter = True)
else:
new_image = rotate(old_image, theta, reshape = False, prefilter = False)
return new_image
| 20,960
|
def append_tf_example(data: Dict[Text, Any],
schema: Dict[Text, Any]) -> tf.train.Example:
"""Add tf example to row"""
feature = {}
for key, value in data.items():
data_type = schema[key]
value = CONVERTER_MAPPING[data_type](value)
if data_type == DataType.INT:
feature[key] = tf.train.Feature(
int64_list=tf.train.Int64List(value=value))
elif data_type == DataType.FLOAT:
feature[key] = tf.train.Feature(
float_list=tf.train.FloatList(value=value))
elif data_type == DataType.BYTES:
feature[key] = tf.train.Feature(
bytes_list=tf.train.BytesList(value=value))
else:
feature[key] = tf.train.Feature(
bytes_list=tf.train.BytesList(value=value))
tf_example = tf.train.Example(features=tf.train.Features(feature=feature))
return tf_example
| 20,961
|
def can_login(email, password):
"""Validation login parameter(email, password) with rules.
return validation result True/False.
"""
login_user = User.find_by_email(email)
return login_user is not None and argon2.verify(password, login_user.password_hash)
| 20,962
|
def Main(output):
"""Transforms power_manager prefs/defaults into jsonschema.
Args:
output: Output file that will be generated by the transform.
"""
result_lines = ["""powerd_prefs_default: &powerd_prefs_default
description: For details, see https://chromium.googlesource.com/chromiumos/platform2/+/HEAD/power_manager/
type: string
powerd_prefs: &powerd_prefs"""]
with open(PREF_DEF_FILE, 'r') as defs_stream:
defs_content = defs_stream.read()
prefs = re.findall(
r'const char .*Pref.. =[ |\n] *"(.*)";', defs_content, re.MULTILINE)
for pref in prefs:
default_pref_path = os.path.join(PREF_DEFAULTS_DIR, pref)
pref_name = pref.replace('_', '-')
if os.path.exists(default_pref_path):
result_lines.append(' %s:' % pref_name)
result_lines.append(' <<: *powerd_prefs_default')
with open(default_pref_path, 'r') as default_stream:
default = default_stream.read()
result_lines.append(
' default: "%s"' % default.strip().replace('\n', ' '))
else:
result_lines.append(' %s: *powerd_prefs_default' % pref_name)
full_result = '\n'.join(result_lines)
if output:
with open(output, 'w') as output_stream:
print(full_result, file=output_stream)
else:
print(full_result)
| 20,963
|
def toPlanar(arr: np.ndarray, shape: tuple = None) -> np.ndarray:
"""
Converts interleaved frame into planar
Args:
arr (numpy.ndarray): Interleaved frame
shape (tuple, optional): If provided, the interleaved frame will be scaled to specified shape before converting into planar
Returns:
numpy.ndarray: Planar frame
"""
if shape is None:
return arr.transpose(2, 0, 1)
return cv2.resize(arr, shape).transpose(2, 0, 1)
| 20,964
|
def _convert_client_cert():
"""
Convert the client certificate pfx to crt/rsa required by nginx.
If the certificate does not exist then no action is taken.
"""
cert_file = os.path.join(SECRETS, 'streams-certs', 'client.pfx')
if not os.path.exists(cert_file):
return
pwd_file = os.path.join(SECRETS, 'streams-certs', 'client.pass')
certs_dir = os.path.join(OPT, 'streams-certs')
if not os.path.exists(certs_dir):
os.mkdir(certs_dir)
crt = os.path.join(certs_dir, 'client.crt')
rsa = os.path.join(certs_dir, 'client.rsa')
args = ['/usr/bin/openssl', 'pkcs12', '-in', cert_file, '-password', 'file:' + pwd_file]
subprocess.run(args + ['-clcerts', '-nokeys', '-out', crt], check=True)
subprocess.run(args + ['-nocerts', '-nodes', '-out', rsa], check=True)
return crt, rsa
| 20,965
|
def downsample_grid(
xg: np.ndarray, yg: np.ndarray, distance: float, mask: np.ndarray = None
) -> tuple[np.ndarray, np.ndarray, np.ndarray]:
"""
Downsample grid locations to approximate spacing provided by 'distance'.
Notes
-----
This implementation is more efficient than the 'downsample_xy' function
for locations on a regular grid.
:param xg: Meshgrid-like array of Easting coordinates.
:param yg: Meshgrid-like array of Northing coordinates.
:param distance: Desired coordinate spacing.
:param mask: Optionally provide an existing mask and return the union
of the two masks and it's effect on xg and yg.
:return: mask: Boolean mask that was applied to xg, and yg.
:return: xg[mask]: Masked input array xg.
:return: yg[mask]: Masked input array yg.
"""
u_diff = lambda u: np.unique(np.diff(u, axis=1))[0]
v_diff = lambda v: np.unique(np.diff(v, axis=0))[0]
du = np.linalg.norm(np.c_[u_diff(xg), u_diff(yg)])
dv = np.linalg.norm(np.c_[v_diff(xg), v_diff(yg)])
u_ds = np.max([int(np.rint(distance / du)), 1])
v_ds = np.max([int(np.rint(distance / dv)), 1])
downsample_mask = np.zeros_like(xg, dtype=bool)
downsample_mask[::v_ds, ::u_ds] = True
if mask is not None:
downsample_mask &= mask
return downsample_mask, xg[downsample_mask], yg[downsample_mask]
| 20,966
|
def convert_12bit_to_type(image, desired_type=np.uint8):
"""
Converts the 12-bit tiff from a 6X sensor to a numpy compatible form
:param desired_type: The desired type
:return: The converted image in numpy.array format
"""
image = image / MAX_VAL_12BIT # Scale to 0-1
image = np.iinfo(desired_type).max * image # Scale back to desired type
return image.astype(desired_type)
| 20,967
|
def dm_hdu(hdu):
""" Compute DM HDU from the actual FITS file HDU."""
if lsst.afw.__version__.startswith('12.0'):
return hdu + 1
return hdu
| 20,968
|
def test_get_formatted_as_type_default_no_subst():
"""On get_formatted_as_type returns default no formatting."""
context = Context()
result = context.get_formatted_as_type(None, default=10, out_type=int)
assert isinstance(result, int)
assert result == 10
| 20,969
|
def start_upload():
"""
Start the cron task to upload new jobs to the elasticsearch database
"""
sources ={
"adzuna":{
"extract_func": adzuna,
},
"jobsearcher":{
"extract_func": jobsearcher,
},
"monster":{
"extract_func": monster_scraper
}
}
for k, v in sources.items():
print(k)
try:
# extract
df = v["extract_func"]()
logging.info(f"{k} extract layer complete")
# transform
transformed_df = transform_df(df)
logging.info(f"{k} transform layer complete")
# load
query(transformed_df)
logging.info(f"{k} load layer complete")
except Exception:
logging.error(f"{k} unable to complete")
| 20,970
|
def get_hdf_filepaths(hdf_dir):
"""Get a list of downloaded HDF files which is be used for iterating through hdf file conversion."""
print "Building list of downloaded HDF files..."
hdf_filename_list = []
hdf_filepath_list = []
for dir in hdf_dir:
for dir_path, subdir, files in os.walk(dir):
for f in files:
if f.endswith(".hdf"):
hdf_filename_list.append(os.path.splitext(f)[0])
hdf_filepath_list.append(os.path.join(dir_path, f))
return hdf_filename_list, hdf_filepath_list
| 20,971
|
def mask_depth_image(depth_image, min_depth, max_depth):
""" mask out-of-range pixel to zero """
ret, depth_image = cv2.threshold(
depth_image, min_depth, 100000, cv2.THRESH_TOZERO)
ret, depth_image = cv2.threshold(
depth_image, max_depth, 100000, cv2.THRESH_TOZERO_INV)
depth_image = np.expand_dims(depth_image, 2)
return depth_image
| 20,972
|
def p2naive():
"""Good for small inputs"""
desc = lines[1].split(",")
buses = [(i, int(x)) for i, x in enumerate(desc) if x != "x"]
t = 0
while True:
if all((t + i) % b == 0 for i, b in buses):
print("p2add:", t)
break
t += buses[0][1]
| 20,973
|
def database_exists(url):
"""Check if a database exists.
:param url: A SQLAlchemy engine URL.
Performs backend-specific testing to quickly determine if a database
exists on the server. ::
database_exists('postgres://postgres@localhost/name') #=> False
create_database('postgres://postgres@localhost/name')
database_exists('postgres://postgres@localhost/name') #=> True
Supports checking against a constructed URL as well. ::
engine = create_engine('postgres://postgres@localhost/name')
database_exists(engine.url) #=> False
create_database(engine.url)
database_exists(engine.url) #=> True
"""
url = copy(make_url(url))
database = url.database
if url.drivername.startswith('postgresql'):
url.database = 'template1'
else:
url.database = None
engine = sa.create_engine(url)
if engine.dialect.name == 'postgresql':
text = "SELECT 1 FROM pg_database WHERE datname='%s'" % database
return bool(engine.execute(text).scalar())
elif engine.dialect.name == 'mysql':
text = ("SELECT SCHEMA_NAME FROM INFORMATION_SCHEMA.SCHEMATA "
"WHERE SCHEMA_NAME = '%s'" % database)
return bool(engine.execute(text).scalar())
elif engine.dialect.name == 'sqlite':
return database == ':memory:' or os.path.exists(database)
else:
text = 'SELECT 1'
try:
url.database = database
engine = sa.create_engine(url)
engine.execute(text)
return True
except (ProgrammingError, OperationalError):
return False
| 20,974
|
def prompt_for_password(prompt=None):
"""Fake prompt function that just returns a constant string"""
return 'promptpass'
| 20,975
|
def preprocess_data(filename_in='../data/chembl_smiles', filename_out='', model_type='BIMODAL', starting_point='fixed',
invalid=True, duplicates=True, salts=True, stereochem=True, canonicalize=True, min_len=34,
max_len=74, augmentation=1):
"""Pre-processing of SMILES based on the user-defined parameters
:param filename_in path to the file containing the SMILES to pretreat (SMILES only) -- default = ChEMBL
:param filename_out path for file export -- default = ../data/
:param model_type model to be used after data preparation -- default = 'BIMODAL'
:param starting_point starting point for training -- default = 'fixed'
:param invalid if True (default), removes invalid SMILES
:param duplicates if True (default), removes duplicates
:param salts if True (default), removes salts
:param stereochem if True (default), removes stereochemistry
:param canonicalize if True (default), produces canonical SMILES
:param max_len maximum length of the SMILES to retain after pretreatment
:param min_len minimum length of the SMILES to retain after pretreatment
:param augmentation augmentation folds
:return:
FG, v1
"""
from preprocessor import Preprocessor
p = Preprocessor(filename_in)
print('Pre-processing of "' + filename_in + '" started.')
# user-defined pretreatment
if invalid:
p.remove_not_valid()
print(' invalid SMILES - removed.')
if duplicates:
p.remove_duplicates()
print(' duplicate SMILES - removed.')
if salts:
p.remove_salts()
print(' salts - removed.')
if stereochem:
p.remove_stereochem()
print(' stereochemistry - removed.')
if canonicalize:
p.canonicalize()
print(' canonicalized SMILES.')
# retains SMILES in the defined-length
p.remove_length(min_len, max_len)
# prepares the data based on the method type
dataname = filename_in.split('/')[-1]
if model_type == "ForwardRNN":
name = model_type
else:
name = model_type + "_" + starting_point
if augmentation > 1 and starting_point is 'fixed': # removes augmentation for fixed starting point
augmentation = 1
p.preprocess(name, aug=augmentation, length=max_len)
if filename_out is '':
filename_out = '../data/' + dataname + '_' + name + '.csv'
# Store new file
p.save_data(filename_out)
print('Data processed saved')
| 20,976
|
def heading(start, end):
"""
Find how to get from the point on a planet specified as a tuple start
to a point specified in the tuple end
"""
start = ( radians(start[0]), radians(start[1]))
end = ( radians(end[0]), radians(end[1]))
delta_lon = end[1] - start[1]
delta_lat = log(tan(pi/4 + end[0]/2)/tan(pi/4 + start[0]/2))
return int(round((360 + degrees(atan2(delta_lon, delta_lat))) % 360))
| 20,977
|
def get_frame_list(video, jump_size = 6, **kwargs):
"""
Returns list of frame numbers including first and last frame.
"""
frame_numbers =\
[frame_number for frame_number in range(0, video.frame_count, jump_size)]
last_frame_number = video.frame_count - 1;
if frame_numbers[-1] != last_frame_number:
frame_numbers.append(last_frame_number)
return frame_numbers
| 20,978
|
def build_xlsx_response(wb, title="report"):
""" Take a workbook and return a xlsx file response """
title = generate_filename(title, '.xlsx')
myfile = BytesIO()
myfile.write(save_virtual_workbook(wb))
response = HttpResponse(
myfile.getvalue(),
content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet')
response['Content-Disposition'] = 'attachment; filename=%s' % title
response['Content-Length'] = myfile.tell()
return response
| 20,979
|
def top_k(*args, **kwargs):
""" See https://www.tensorflow.org/api_docs/python/tf/nn/top_k .
"""
return tensorflow.nn.top_k(*args, **kwargs)
| 20,980
|
def offline_evaluate(
predict_fn: Callable[[np.ndarray, Any], Dict[Text, np.ndarray]],
observe_fn: Callable[[np.ndarray, np.ndarray, np.ndarray, Any], Any],
reset_fn: Callable[..., Any],
train_fn: Callable[[Text], None] = None,
train_dir: Text = None,
enable_train: bool = False,
train_eval_iterations: int = 0,
online_eval_task: tasks.Task = None,
online_eval_planner: planners.Planner = None,
online_eval_episodes: int = 0,
eval_dir: Text = None,
model_dir: Text = None,
result_dir: Text = None,
episode_length: int = None,
num_episodes: int = 100,
prediction_horizon: int = 1,
batch: int = 128):
"""offline model evaluation."""
assert eval_dir, "eval_dir is required"
assert model_dir, "model_dir is required"
assert result_dir, "result_dir is required"
assert episode_length, "episode_length is required"
if enable_train:
assert train_dir, "train_dir is required for training"
assert train_eval_iterations, ("train_eval_iterations is required for "
"training")
for i in range(train_eval_iterations):
train_fn(train_dir)
result_dir_at_step = os.path.join(result_dir, "%d" % i)
eval_once(
result_dir=result_dir_at_step,
eval_dir=eval_dir,
episode_length=episode_length,
prediction_horizon=prediction_horizon,
batch=batch,
num_episodes=num_episodes,
reset_fn=reset_fn,
observe_fn=observe_fn,
predict_fn=predict_fn)
if online_eval_episodes:
summary_dir = os.path.join(result_dir, "online_eval")
episodes, predictions, score = simulate.simulate(
online_eval_task, online_eval_planner, online_eval_episodes)
train_eval.visualize(summary_dir, i, episodes, predictions,
{"score": score})
else:
eval_once(
result_dir=result_dir,
eval_dir=eval_dir,
episode_length=episode_length,
prediction_horizon=prediction_horizon,
batch=batch,
num_episodes=num_episodes,
reset_fn=reset_fn,
observe_fn=observe_fn,
predict_fn=predict_fn)
| 20,981
|
def determine_d_atoms_without_connectivity(zmat, coords, a_atoms, n):
"""
A helper function to determine d_atoms without connectivity information.
Args:
zmat (dict): The zmat.
coords (list, tuple): Just the 'coords' part of the xyz dict.
a_atoms (list): The determined a_atoms.
n (int): The 0-index of the atom in the zmat to be added.
Returns:
list: The d_atoms.
"""
d_atoms = [atom for atom in a_atoms]
for i in reversed(range(n)):
if i not in d_atoms and i in list(zmat['map'].keys()) and (i >= len(zmat['symbols']) or not is_dummy(zmat, i)):
angle = calculate_angle(coords=coords, atoms=[zmat['map'][z_index] for z_index in d_atoms[1:] + [i]])
if not is_angle_linear(angle):
d_atoms.append(i)
break
if len(d_atoms) < 4:
# try again and consider dummies
for i in reversed(range(n)):
if i not in d_atoms and i in list(zmat['map'].keys()):
angle = calculate_angle(coords=coords, atoms=[zmat['map'][z_index] for z_index in d_atoms[1:] + [i]])
if not is_angle_linear(angle):
d_atoms.append(i)
break
return d_atoms
| 20,982
|
def plot_confusion_matrix(y_true, y_pred,
pad_index=None, ymap=None, figsize=(20, 10),
show_total=['x', 'y'], show_zeros=True, show_empty_tags=False,
plot_title='Confusion Matrix', save_name=None):
"""
Generate matrix plot of confusion matrix with pretty annotations.
The plot image is saved to disk.
Args:
y_true: true labels of the data, with shape (nsamples,)
y_pred: label predictions, with shape (nsamples,)
pad_index: if not `None` and not present in `y_pred`, `pad_index`
will not be included in the plot.
ymap: dict: index -> tag.
if not `None`, map the predictions to their categorical labels.
if `None`, `range(1, len(set(y_true+y_pred))`
is used for labels.
figsize: tuple: the size of the figure plotted.
show_total: list of `str`. Where to display total number of
class occurrences in the corpus: diagonal and/or axes.
Up to all from `['diag', 'x', 'y']` can be chosen.
Default = `['x', 'y']` (horizontal and vertical axes respectively).
If `None`, total values are not displayed on the plot.
show_zeros: bool: whether to show zeros in the confusion matrix.
show_empty_tags: only active when when `ymap` is specified.
If `True`, all tags, including those that weren't met
neither in `y_true` or `y_pred` are displayed
(filled with `0` in both axes).
NB! If `True`, `pad_idx` will also be displayed even if specified.
Default: `False`, 'empty' tags are skipped.
plot_title: str: plot title, default title - 'Confusion Matrix'.
save_name: str: filename of figure file to save.
if `None`, image is not saved to disk.
"""
# if pad_index is not None and does not exist in y_pred, it's excluded
# from labels
mpl.style.use('default')
cm = confusion_matrix(y_true, y_pred)
if ymap:
if show_empty_tags:
for i in ymap.keys():
if i not in set(y_true+y_pred):
cm = np.insert(cm, i, 0, axis=0)
cm = np.insert(cm, i, 0, axis=1)
labels = ymap.values()
if not show_empty_tags:
labels = [ymap[i] if pad_index is not None and i != pad_index
and pad_index not in y_pred else
ymap[i]
for i in set(y_true+y_pred)]
else:
labels = [i if pad_index is not None and i != pad_index
and pad_index not in y_pred else
i
for i in set(y_true+y_pred)]
cm_sum = np.sum(cm, axis=1, keepdims=True)
cm_perc = np.divide(cm, cm_sum.astype(float), where=cm_sum!=0)*100
annot = np.empty_like(cm).astype(str)
nrows, ncols = cm.shape
for i in range(nrows):
for j in range(ncols):
c = cm[i, j]
p = cm_perc[i, j]
if i == j:
if show_total and 'diag' in show_total:
s = cm_sum[i]
annot[i, j] = '%.2f%%\n%d/%d' % (p, c, s)
else:
annot[i, j] = '%.2f%%\n%d' % (p, c)
elif c == 0:
if show_zeros:
annot[i, j] = '0'
else:
annot[i, j] = ''
else:
annot[i, j] = '%.2f%%\n%d' % (p, c)
total_labels = [str(i)+'\n'+str(n[0]) for i, n in zip(labels, cm_sum)]
cm = pd.DataFrame(cm,
index=total_labels if show_total and 'y' in show_total else labels,
columns=total_labels if show_total and 'x' in show_total else labels)
cm.index.name = 'Actual'
cm.columns.name = 'Predicted'
fig, ax = plt.subplots(figsize=figsize)
sns.heatmap(cm, annot=annot, fmt='', ax=ax)
plt.title(plot_title)
if save_name:
plt.savefig(save_name, bbox_inches='tight')
plt.show()
| 20,983
|
def choose_domain(path: Path, domain: str or None, dot_mlf_core: dict = None):
"""
Prompts the user for the template domain.
Creates the .mlf_core file.
Prompts the user whether or not to create a Github repository
:param path: The path, the project should be created at
:param domain: Template domain
:param dot_mlf_core: Dictionary created from the .mlf_core.yml file. None if no .mlf_core.yml file was used.
"""
if not domain:
domain = mlf_core_questionary_or_dot_mlf_core(function='select',
question='Choose the project\'s domain',
choices=['mlflow', 'package'],
default='mlflow',
dot_mlf_core=dot_mlf_core,
to_get_property='domain')
switcher = {
'mlflow': MlflowCreator,
'package': PackageCreator
}
creator_obj = switcher.get(domain.lower())()
creator_obj.create_template(path, dot_mlf_core)
| 20,984
|
def all_of_them():
"""
Return page with all products with given name from API.
"""
if 'username' in session:
return render_template('productsearch.html', username=escape(session['username']), vars=lyst)
else:
return "Your are not logged in"
| 20,985
|
def test_downsample_handle_livetime_error(uncal_spec):
"""Test bad value of handle_livetime"""
with pytest.raises(ValueError):
uncal_spec.downsample(5, handle_livetime="asdf")
| 20,986
|
def is_blank(line):
"""Determines if a selected line consists entirely of whitespace."""
return whitespace_re.match(line) is not None
| 20,987
|
def _gff_line_map(line, params):
"""Map part of Map-Reduce; parses a line of GFF into a dictionary.
Given an input line from a GFF file, this:
- decides if the file passes our filtering limits
- if so:
- breaks it into component elements
- determines the type of attribute (flat, parent, child or annotation)
- generates a dictionary of GFF info which can be serialized as JSON
"""
gff3_kw_pat = re.compile("\w+=")
def _split_keyvals(keyval_str):
"""Split key-value pairs in a GFF2, GTF and GFF3 compatible way.
GFF3 has key value pairs like:
count=9;gene=amx-2;sequence=SAGE:aacggagccg
GFF2 and GTF have:
Sequence "Y74C9A" ; Note "Clone Y74C9A; Genbank AC024206"
name "fgenesh1_pg.C_chr_1000003"; transcriptId 869
"""
quals = collections.defaultdict(list)
if keyval_str is None:
return quals
# ensembl GTF has a stray semi-colon at the end
if keyval_str[-1] == ';':
keyval_str = keyval_str[:-1]
# GFF2/GTF has a semi-colon with at least one space after it.
# It can have spaces on both sides; wormbase does this.
# GFF3 works with no spaces.
# Split at the first one we can recognize as working
parts = keyval_str.split(" ; ")
if len(parts) == 1:
parts = keyval_str.split("; ")
if len(parts) == 1:
parts = keyval_str.split(";")
# check if we have GFF3 style key-vals (with =)
is_gff2 = True
if gff3_kw_pat.match(parts[0]):
is_gff2 = False
key_vals = [p.split('=') for p in parts]
# otherwise, we are separated by a space with a key as the first item
else:
pieces = [p.strip().split(" ") for p in parts]
key_vals = [(p[0], " ".join(p[1:])) for p in pieces]
for key, val in key_vals:
val = (val[1:-1] if (len(val) > 0 and val[0] == '"'
and val[-1] == '"') else val)
if val:
quals[key].extend(val.split(','))
# if we don't have a value, make this a key=True/False style
# attribute
else:
quals[key].append('true')
for key, vals in quals.items():
quals[key] = [urllib.unquote(v) for v in vals]
return quals, is_gff2
def _nest_gff2_features(gff_parts):
"""Provide nesting of GFF2 transcript parts with transcript IDs.
exons and coding sequences are mapped to a parent with a transcript_id
in GFF2. This is implemented differently at different genome centers
and this function attempts to resolve that and map things to the GFF3
way of doing them.
"""
# map protein or transcript ids to a parent
for transcript_id in ["transcript_id", "transcriptId", "proteinId"]:
try:
gff_parts["quals"]["Parent"] = \
gff_parts["quals"][transcript_id]
break
except KeyError:
pass
# case for WormBase GFF -- everything labelled as Transcript
if gff_parts["quals"].has_key("Transcript"):
# parent types
if gff_parts["type"] in ["Transcript"]:
if not gff_parts["id"]:
gff_parts["id"] = gff_parts["quals"]["Transcript"][0]
# children types
elif gff_parts["type"] in ["intron", "exon", "three_prime_UTR",
"coding_exon", "five_prime_UTR", "CDS", "stop_codon",
"start_codon"]:
gff_parts["quals"]["Parent"] = gff_parts["quals"]["Transcript"]
return gff_parts
strand_map = {'+' : 1, '-' : -1, '?' : None, None: None}
line = line.strip()
if line[:2] == "##":
return [('directive', line[2:])]
elif line[0] != "#":
parts = line.split('\t')
should_do = True
if params.limit_info:
for limit_name, limit_values in params.limit_info.items():
cur_id = tuple([parts[i] for i in
params.filter_info[limit_name]])
if cur_id not in limit_values:
should_do = False
break
if should_do:
assert len(parts) >= 9, line
gff_parts = [(None if p == '.' else p) for p in parts]
gff_info = dict()
# collect all of the base qualifiers for this item
quals, is_gff2 = _split_keyvals(gff_parts[8])
gff_info["is_gff2"] = is_gff2
if gff_parts[1]:
quals["source"].append(gff_parts[1])
if gff_parts[5]:
quals["score"].append(gff_parts[5])
if gff_parts[7]:
quals["phase"].append(gff_parts[7])
gff_info['quals'] = dict(quals)
gff_info['rec_id'] = gff_parts[0]
# if we are describing a location, then we are a feature
if gff_parts[3] and gff_parts[4]:
gff_info['location'] = [int(gff_parts[3]) - 1,
int(gff_parts[4])]
gff_info['type'] = gff_parts[2]
gff_info['id'] = quals.get('ID', [''])[0]
gff_info['strand'] = strand_map[gff_parts[6]]
if is_gff2:
gff_info = _nest_gff2_features(gff_info)
# features that have parents need to link so we can pick up
# the relationship
if gff_info['quals'].has_key('Parent'):
final_key = 'child'
elif gff_info['id']:
final_key = 'parent'
# Handle flat features
else:
final_key = 'feature'
# otherwise, associate these annotations with the full record
else:
final_key = 'annotation'
return [(final_key, (simplejson.dumps(gff_info) if params.jsonify
else gff_info))]
return []
| 20,988
|
def smoothEvolve(problem, orig_point, first_ref, second_ref):
"""Evolves using RVEA with abrupt change of reference vectors."""
pop = Population(problem, assign_type="empty", plotting=False)
try:
pop.evolve(slowRVEA, {"generations_per_iteration": 200, "iterations": 15})
except IndexError:
return pop.archive
try:
pop.evolve(
slowRVEA,
{
"generations_per_iteration": 10,
"iterations": 20,
"old_point": orig_point,
"ref_point": first_ref,
},
)
except IndexError:
return pop.archive
try:
pop.evolve(
slowRVEA,
{
"generations_per_iteration": 10,
"iterations": 20,
"old_point": first_ref,
"ref_point": second_ref,
},
)
except IndexError:
return pop.archive
return pop.archive
| 20,989
|
def combine_color_channels(discrete_rgb_images):
"""
Combine discrete r,g,b images to RGB iamges.
:param discrete_rgb_images:
:return:
"""
color_imgs = []
for r, g, b in zip(*discrete_rgb_images):
# pca output is float64, positive and negative. normalize the images to [0, 255] rgb
r = (255 * (r - np.max(r)) / -np.ptp(r)).astype(int)
g = (255 * (g - np.max(g)) / -np.ptp(g)).astype(int)
b = (255 * (b - np.max(b)) / -np.ptp(b)).astype(int)
color_imgs.append(cv2.merge((r, g, b)))
return color_imgs
| 20,990
|
def decohere_earlier_link(tA, tB, wA, wB, T_coh):
"""Applies decoherence to the earlier generated of the two links.
Parameters
----------
tA : float
Waiting time of one of the links.
wA : float
Corresponding fidelity
tB : float
Waiting time of the other link.
wB : float
Corresponding fidelity
t_both : float
Time both links experience decoherence (e.g. communication time)
T_coh : float
Memory coherence time. If set to 0, there is no decay.
Returns
-------
Tuple (float : tA, float : tB, float : wA, float : wB) after decoherence.
"""
delta_t = abs(tA - tB)
if(tA < tB):
wA = wern_after_memory_decoherence(wA, delta_t, T_coh)
elif(tB < tA):
wB = wern_after_memory_decoherence(wB, delta_t, T_coh)
return wA, wB
| 20,991
|
def train(
network: RNN,
data: np.ndarray,
epochs: int = 10,
_n_seqs: int = 10,
_n_steps: int = 50,
lr: int = 0.001,
clip: int = 5,
val_frac: int = 0.2,
cuda: bool = True,
print_every: int = 10,
):
"""Train RNN."""
network.train()
opt = torch.optim.Adam(network.parameters(), lr=lr)
criterion = nn.CrossEntropyLoss()
val_idx = int(len(data) * (1 - val_frac))
data, val_data = data[:val_idx], data[val_idx:]
if cuda:
network.cuda()
step = 0
train_loss = []
validation_loss = []
for i in range(epochs):
h = network.init_hidden(_n_seqs)
for x, y in get_batches(data, _n_seqs, _n_steps):
step += 1
# One-hot encode, make Torch tensors
x = one_hot_encode(x, network.vocab)
inputs, targets = torch.from_numpy(x), torch.from_numpy(y)
if cuda:
inputs, targets = inputs.cuda(), targets.cuda()
h = tuple([m.data for m in h])
network.zero_grad()
output, h = network.forward(inputs, h)
loss = criterion(output, targets.view(_n_seqs * _n_steps))
loss.backward()
# Avoid exploding gradients
nn.utils.clip_grad_norm_(network.parameters(), clip)
opt.step()
if step % print_every == 0:
# Validation loss
val_h = network.init_hidden(_n_seqs)
val_losses = []
for x, y in get_batches(val_data, _n_seqs, _n_steps):
x = one_hot_encode(x, network.vocab)
x, y = torch.from_numpy(x), torch.from_numpy(y)
val_h = tuple([m.data for m in val_h])
inputs, targets = x, y
if cuda:
inputs, targets = inputs.cuda(), targets.cuda()
output, val_h = network.forward(inputs, val_h)
val_loss = criterion(output, targets.view(_n_seqs * _n_steps))
val_losses.append(val_loss.item())
train_loss.append(loss.item())
validation_loss.append(np.mean(val_losses))
print(
f"Epoch: {i + 1} / {epochs},",
f"Step: {step},",
f"Loss: {loss.item():.4f},",
"Val Loss: {:.4f}".format(np.mean(val_losses)),
)
return train_loss, validation_loss
| 20,992
|
def simple_lunar_phase(jd):
"""
This just does a quick-and-dirty estimate of the Moon's phase given the date.
"""
lunations = (jd - 2451550.1) / LUNAR_PERIOD
percent = lunations - int(lunations)
phase_angle = percent * 360.
delta_t = phase_angle * LUNAR_PERIOD / 360.
moon_day = int(delta_t + 0.5)
phase = get_phase_description(phase_angle)
bgcolor = get_moon_color(delta_t)
return dict(
angle = phase_angle,
day = moon_day,
phase = phase,
days_since_new_moon = delta_t,
bgcolor = bgcolor,
)
| 20,993
|
def fetch_traj(data, sample_index, colum_index):
""" Returns the state sequence. It also deletes the middle index, which is
the transition point from history to future.
"""
# data shape: [sample_index, time, feature]
traj = np.delete(data[sample_index, :, colum_index:colum_index+1], history_len-1, axis=1)
return traj.flatten()
| 20,994
|
def validate_date(period: str, start: bool = False) -> pd.Timestamp:
"""Validate the format of date passed as a string.
:param period: Date in string. If None, date of today is assigned.
:type period: str
:param start: Whether argument passed is a starting date or an ending date,
defaults to False.
:type start: bool, optional
:raises IntegerDateInputError: If integer type object is passed.
:return: Date with format YYYY-MM-DD or YY-MM-DD.
:rtype: pandas.Timestamp
"""
if isinstance(period, int):
raise IntegerDateInputError('Input type of period should be in string.')
if period is None:
date = _convert_none_to_date(start)
else:
try:
date_format = '%y-%m-%d'
period = datetime.strptime(period, date_format)
except ValueError:
date_format = '%Y-%m-%d'
finally:
date = string_to_date(period, date_format)
return date
| 20,995
|
def rec_module_mic(echograms, mic_specs):
"""
Apply microphone directivity gains to a set of given echograms.
Parameters
----------
echograms : ndarray, dtype = Echogram
Target echograms. Dimension = (nSrc, nRec)
mic_specs : ndarray
Microphone directions and directivity factor. Dimension = (nRec, 4)
Returns
-------
rec_echograms : ndarray, dtype = Echogram
Echograms subjected to microphone gains. Dimension = (nSrc, nRec)
Raises
-----
TypeError, ValueError: if method arguments mismatch in type, dimension or value.
Notes
-----
Each row of `mic_specs` is expected to be described as [x, y, z, alpha],
with (x, y, z) begin the unit vector of the mic orientation.
`alpha` must be contained in the range [0(dipole), 1(omni)],
so that directivity is expressed as: d(theta) = a + (1-a)*cos(theta).
"""
nSrc = echograms.shape[0]
nRec = echograms.shape[1]
_validate_echogram_array(echograms)
_validate_ndarray_2D('mic_specs', mic_specs, shape0=nRec, shape1=C+1)
mic_vecs = mic_specs[:,:C]
mic_coeffs = mic_specs[:,-1]
rec_echograms = copy.copy(echograms)
# Do nothing if all orders are zeros(omnis)
if not np.all(mic_coeffs == 1):
for ns in range(nSrc):
for nr in range(nRec):
nRefl = len(echograms[ns, nr].value)
# Get vectors from source to receiver
rec_vecs = echograms[ns, nr].coords
rec_vecs = rec_vecs / np.sqrt(np.sum(np.power(rec_vecs,2), axis=1))[:,np.newaxis]
mic_gains = mic_coeffs[nr] + (1 - mic_coeffs[nr]) * np.sum(rec_vecs * mic_vecs[nr,:], axis=1)
rec_echograms[ns, nr].value = echograms[ns, nr].value * mic_gains[:,np.newaxis]
_validate_echogram_array(rec_echograms)
return rec_echograms
| 20,996
|
def is_string_like(obj): # from John Hunter, types-free version
"""Check if obj is string."""
try:
obj + ''
except (TypeError, ValueError):
return False
return True
| 20,997
|
def get_firefox_start_cmd():
"""Return the command to start firefox."""
start_cmd = ""
if platform.system() == "Darwin":
start_cmd = ("/Applications/Firefox.app/Contents/MacOS/firefox-bin")
elif platform.system() == "Windows":
start_cmd = _find_exe_in_registry() or _default_windows_location()
else:
# Maybe iceweasel (Debian) is another candidate...
for ffname in ["firefox2", "firefox", "firefox-3.0", "firefox-4.0"]:
LOGGER.debug("Searching for '%s'...", ffname)
start_cmd = which(ffname)
if start_cmd is not None:
break
return start_cmd
| 20,998
|
def divide():
"""Handles division, returns a string of the answer"""
a = int(request.args["a"])
b = int(request.args["b"])
quotient = str(int(operations.div(a, b)))
return quotient
| 20,999
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.