content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
def get_domain_from_url(url):
"""get domain from url"""
domain=''
# url is http://a.b.com/ads/asds
if re.search(r'://.*?/',url):
try:
domain = url.split('//', 1)[1].split('/', 1)[0]
except IndexError, e:
LOGGER.warn('Get domain error,%s,%s' % (url, e))
# http://a.b.com?a=adsd
elif re.search(r'://.*?\?',url):
try:
domain = url.split('//', 1)[1].split('?', 1)[0]
except IndexError, e:
LOGGER.warn('Get domain error,%s,%s' % (url, e))
elif re.search(r'://.*?',url):
try:
domain = url.split('//', 1)[1].split('/', 1)[0]
except IndexError, e:
LOGGER.warn('Get domain error,%s,%s' % (url, e))
# url is a.b.com/a/b/c, a.b.com, /a/b/c,
elif re.search(r'/',url):
value = url.split('/', 1)[0]
if value=='':
pass
elif value=='.':
pass
elif '.' not in value:
pass
elif domain=='..':
pass
return domain
|
6b364a74c86337108d21539c4a5678af2e6ea48a
| 3,644,900
|
import json
def render_response(body=None, status=None, headers=None):
"""生成WSGI返回消息"""
headers = [] if headers is None else list(headers)
if body is None:
body = ''
status = status or (204, 'No Content')
else:
body = json.dumps(body, encoding='utf-8')
headers.append(('Content-Type', 'application/json'))
status = status or (200, 'OK')
resp = webob.Response(body=body,
status='%s %s' % status,
headerlist=headers)
return resp
|
b31128db57ca99a840d4adce6f3116f629d8a0b8
| 3,644,901
|
def tests(request):
"""Print a list of tests."""
test_list = Test.objects.all()
tag_list = Tag.objects.all().order_by(u'name')
# check if we need to filter the test list based on tags
# defaults to empty list because we're always passing the list to the template
tags = request.GET.get(u'tag', [])
if tags:
# plus means only those tests that are tagged with every tag
# TODO: support commas, for aggregating stuff that includes at least one tag in the list
tags = tags.split(u'+')
log.debug(u'displaying tests for search tags: {}'.format(tags))
# order the list by name if search tags are specified
# this list contains tests if they have any of the tags passed in, so it's still 'unfiltered'
test_list = test_list.filter(tags__name__in=tags).distinct().order_by(u'name')
# return only the tests that have every tag specified
test_list = filter_tags(test_list, tags)
else:
# order the list by newest -> oldest if there are no tags specified
test_list = test_list.order_by(u'-id')
paginator = Paginator(test_list, 20) # decides how many results to show per page
# https://docs.djangoproject.com/en/dev/topics/pagination/
page = request.GET.get(u'page')
try:
tests = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
tests = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
tests = paginator.page(paginator.num_pages)
return render(request, u'django_bdd/bddscenarios.html', {u'title': u'Scenarios', u'tests': tests, u'tag_list': tag_list, u'searched_tags': tags, u'label_classes': LABEL_CLASSES})
|
18c4077878f3864cf0771fd77a5ae324d9ee4630
| 3,644,902
|
def nashpobench_benchmark(params):
"""
The underlying tabulated blackbox does not have an `elapsed_time_attr`,
but only a `time_this_resource_attr`.
"""
config_space = dict(
CONFIGURATION_SPACE,
epochs=params['max_resource_level'],
dataset_name=params['dataset_name'])
return {
'script': None,
'metric': METRIC_VALID_LOSS,
'mode': 'min',
'resource_attr': RESOURCE_ATTR,
'elapsed_time_attr': METRIC_ELAPSED_TIME,
'max_resource_attr': 'epochs',
'config_space': config_space,
'cost_model': None,
'supports_simulated': True,
'blackbox_name': BLACKBOX_NAME,
}
|
74e1e619cc8c4a3201e41820f5f641c651a5f283
| 3,644,903
|
def horizontal_plate_natual_convection_2(Gr, Pr):
"""hot side downward, or cold side upward """
""" 1e5 < Ra < 1e10 """
Ra = Gr * Pr
return 0.27 * Ra**0.25
|
bc44118e871e977a7ecb6a877f7232b837d1bf0e
| 3,644,904
|
import dill as pickle
from drivebuildclient import create_client, send_request
from config import SIM_NODE_PORT, FIRST_SIM_PORT
from typing import Tuple
def run_test_case(test_case: TestCase) -> Tuple[Simulation, Scenario, ExtThread, SimulationID]:
"""
This method starts the actual simulation in a separate thread.
Additionally it already calculates and attaches all information that is need by this node and the separate
thread before calling _start_simulation(...).
"""
sid = SimulationID()
response = send_request(create_client("localhost", SIM_NODE_PORT), b"generateSid", [])
sid.ParseFromString(response)
sim = Simulation(sid, pickle.dumps(test_case), FIRST_SIM_PORT + run_test_case.counter)
run_test_case.counter += 1 # FIXME Add a lock?
# Make sure there is no folder of previous tests having the same sid that got not propery removed
bng_scenario, thread = sim._start_simulation(test_case)
return sim, bng_scenario, thread, sid
|
b8428763cb611b57069d74f7ebb5844f5b90df9d
| 3,644,905
|
import typing
def translate_value_data(
new_values: list,
options: dict,
parent_value: str,
translate_dict: typing.Optional[dict],
values: list,
):
"""Translates value data if necessary and checks if it falls within the Castor optiongroup"""
for value in values:
if pd.isnull(parent_value):
if translate_dict:
value = translate_dict.get(str(value), "Error: no translation provided")
new_values.append(options.get(str(value), "Error: non-existent option"))
else:
if translate_dict:
value = translate_dict.get(str(value), parent_value)
new_values.append(options.get(str(value), parent_value))
return new_values
|
ccfc64e54fae868877c6852ebeeadae11bb1221b
| 3,644,906
|
def makeVocabFromText(
filelist=None,
max_size=10*10000,
least_freq=2,
trunc_len=100,
filter_len=0,
print_log=None,
vocab_file=None,
encoding_format='utf-8',
lowercase = True):
""" the core of this function is getting a word2count dict and writing it to a .txt file,then use Vocab to read it """
if print_log:
print_log("%s: the max vocab size = %d, least_freq is %d truncate length = %d" \
% ( filelist[0], max_size, least_freq , trunc_len ))
else:
print("%s: the max vocab size = %d, least_freq is %d truncate length = %d" \
% ( filelist[0], max_size, least_freq , trunc_len ))
"""tokenizing sentence and add word to word2count dict"""
word2count={}
for filename in filelist:
with open(filename, 'r', encoding = encoding_format) as f:
for sent in f:
tokens = sent.strip().split()
if 0 < filter_len < len(sent.strip().split()):
continue
if trunc_len > 0:
tokens = tokens[:trunc_len]
for word in tokens:
word = word if not lowercase else word.lower()
if word not in word2count:
word2count[word] = 1
else:
word2count[word] += 1
return makeVocabFormDict(word2count=word2count,max_size=max_size,least_freq=least_freq,\
vocab_file=vocab_file,encoding_format=encoding_format,filename=filelist[0])
|
2a3c0c42ee5c541d19bbe695c12f977fd29dfeaf
| 3,644,907
|
def import_supplemental(file_path):
"""Get data from a supplemental file"""
data = sio.loadmat(file_path)
data['move'] = np.squeeze(data['move'])
data['rep'] = np.squeeze(data['rep'])
data['emg_time'] = np.squeeze(data['emg_time'])
return data
|
4544a0ee292cb4e323c31545009c4d1e17ca98e1
| 3,644,908
|
def _unpickle_injected_object(base_class, mixin_class, class_name=None):
"""
Callable for the pickler to unpickle objects of a dynamically created class
based on the InjectableMixin. It creates the base object from the original
base class and re-injects the mixin class when unpickling an object.
:param type base_class: The base class of the pickled object before adding
the mixin via injection.
:param type mixin_class: The :class:`InjectableMixin` subclass that was
injected into the pickled object.
:param str class_name: The class name of the pickled object's dynamically
created class.
:return: The initial unpickled object (before the pickler restores the
object's state).
"""
obj = base_class.__new__(base_class, ())
return mixin_class.inject_into_object(obj, class_name)
|
1821509506ad31dcdb21f07a2b83c544ff3c3eb3
| 3,644,909
|
from pathlib import Path
import re
def parse_endfblib(libdir):
"""Parse ENDF/B library
Parametres:
-----------
libdir : str
directory with ENDFB file structure"""
filepaths = []
nuclidnames = []
endf_dir = Path(libdir)
neutron_files = tuple((endf_dir / "neutrons").glob("*endf"))
for n in neutron_files:
filepaths.append(n.absolute())
nuclidnames.append(n.name.split('_')[1] +
re.split("^0*", n.name.split('_')[2][:-5])[-1])
return nuclidnames, filepaths
|
3587b849132e4b2eeb6ad184bf58755340473bd9
| 3,644,910
|
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser
def get_parser():
"""Get parser object."""
parser = ArgumentParser(
description=__doc__, formatter_class=ArgumentDefaultsHelpFormatter
)
parser.add_argument(
"-f",
"--file",
dest="filename",
help="Read this PDF file",
metavar="FILE",
required=True,
)
return parser
|
31f973d357762fa706191caa1d76f59c6d2b3d67
| 3,644,911
|
def build_val_col_list(tableName):
"""Build and return a schema to use for the sample data."""
statement = "( SELECT column_name, data_type, case when data_type='NUMBER' THEN NVL(DATA_PRECISION,38) + DATA_SCALE ELSE DATA_LENGTH END AS ORACLE_LENGTH FROM dba_tab_columns WHERE table_name = '" + tableName + "' order by column_id asc )"
buildColTypeList = spark.read.format("jdbc") \
.option("url","jdbc:oracle:thin:system/oracle@//0.0.0.0:1521/xe") \
.option("dbtable", statement) \
.option("user","system") \
.option("password","oracle") \
.option("driver","oracle.jdbc.driver.OracleDriver") \
.load()
xList = buildColTypeList.collect()
return xList
|
d6602078a458fa3f36de3558c8044749caf7f4d5
| 3,644,912
|
from datetime import datetime
def save_image(user, filename, image_tif, process, latency, size, hist):
"""
Function that saves image to Mongo database
Args:
user: username
filename: desired file name in database
image_tif: tiff image in byte format
process: processing algorithm applied to image
latency: time to process image
size: image size
hist: histogram values of image
bins: bin locations of image
Returns:
outstr: Confirmation that image has been saved
"""
time = datetime.datetime.now()
Image_Dict = {
"File": filename,
"Image": image_tif,
"Process": process,
"Timestamp": time,
"Latency": latency,
"Size": size,
"Histogram": hist,
}
Image_List = user.ImageFile
Image_List.append(Image_Dict)
user.filenames.append(filename)
user.save()
outstr = "Image saved successfully"
return outstr
|
ea416fcdc09c71aef56250a8e0b7f558e8e8a884
| 3,644,913
|
def run_simulation_with_params(
sim_params, replicate, repeats=10, should_perform_gwas=True):
"""Runs simulation with given params and returns result object.
"""
try:
simulation_result = run_simulation(
simulation_params=sim_params)
except Exception as e:
print sim_params
raise e
result = {
'num_snps_considered': sim_params.num_snps_considered,
'num_samples': sim_params.num_samples,
'num_snps_with_effect': sim_params.num_snps_with_effect,
'replicate': replicate,
'total_fitness_effect': np.prod(simulation_result['snp_effect']),
'mage_cycles': sim_params.mage_cycles,
'population_size': sim_params.population_size
}
# Apply linear modeling.
lm_result = run_linear_modeling(
simulation_result['wgs_samples'],
simulation_result['wgs_samples_doubling_times'],
repeats=repeats)
lm_eval_results = evaluate_modeling_result(
simulation_result, lm_result)
lm_eval_results_df = lm_eval_results['results_df']
result.update({
'lm_pearson_r': lm_eval_results['pearson_r'],
'lm_pearson_p': lm_eval_results['p_value'],
})
result.update(
calculate_modeling_metrics(
lm_eval_results_df, 'linear_model_coef',
results_prefix='lm_'))
# Maybe perform GWAS.
if should_perform_gwas:
gwas_results_df = run_gwas(
simulation_result['wgs_samples'],
simulation_result['wgs_samples_doubling_times'])
gwas_eval_results = evaluate_gwas_result(
gwas_results_df, lm_eval_results_df)
gwas_eval_results_df = gwas_eval_results['results_df']
result.update({
'gwas_pearson_r': gwas_eval_results['pearson_r'],
'gwas_pearson_p': gwas_eval_results['p_value'],
})
result.update(
calculate_modeling_metrics(
gwas_eval_results_df, 'gwas_coef', results_prefix='gwas_'))
# Perform enrichment analysis on final timepoint.
enrichment_result_df = run_enrichment_analysis(simulation_result)
result.update(
calculate_enrichment_metrics(
enrichment_result_df))
return result
|
a7a1383708c1b6e69c975488b03704698f9b1066
| 3,644,914
|
import colorsys
def hsl_to_rgb(hsl):
"""Convert hsl colorspace values to RGB."""
# Convert hsl to 0-1 ranges.
h = hsl[0] / 359.
s = hsl[1] / 100.
l = hsl[2] / 100.
hsl = (h, s, l)
# returns numbers between 0 and 1
tmp = colorsys.hls_to_rgb(h, s, l)
# convert to 0 to 255
r = int(round(tmp[0] * 255))
g = int(round(tmp[1] * 255))
b = int(round(tmp[2] * 255))
return (r, g, b)
|
4417ce8468e71b7139b57fe270809c7030b2c3df
| 3,644,915
|
import os
def init_celery(app=None):
"""
Initialize celery.
"""
app = app or create_app(os.environ.get('APP_MODE'))
celery.conf.update(app.config.get("CELERY", {}))
class ContextTask(celery.Task):
"""
Make celery tasks work with Flask app context
"""
def __call__(self, *args, **kwargs):
with app.app_context():
return self.run(*args, **kwargs)
celery.Task = ContextTask
return celery
|
8d0c6f6bc4882976812abe9967b56720b745b46a
| 3,644,916
|
import itertools
async def test_filterfalse_matches_itertools_filterfalse(
arange: ty.Type[ty.AsyncIterator[int]], stop: int
):
"""Ensure that our async filterfalse implementation follows the standard
implementation.
"""
async def _pair(x):
return (x % 2) == 0
target = list(itertools.filterfalse(lambda x: (x % 2) == 0, range(stop)))
result = [x async for x in none.collection.a.filterfalse(_pair, arange(stop))]
assert result == target
|
59fd932f3906eb411e21207d920f752f7f78df44
| 3,644,917
|
def extract_buffer_info(mod, param_dict):
"""
This function is to read the tvm.IRModule that
contains Relay to TIR compiled IRModule. Thereafter,
this will extract the buffer information as the shape
and constant data (if any).
Parameters
----------
mod : tvm.IRModule
The NPU TIR IRModule.
param_dict : dict
A dictionary containing param idx --> const numpy.NDArray
Returns
-------
dict
a dictionary of buffer names --> BufferInfo
"""
buffer_info = dict()
# There should only be a single function
assert len(mod.functions.items()) == 1
primfunc = mod.functions.items()[0][1]
for idx, const_data in param_dict.items():
param = primfunc.params[idx]
buffer_info[primfunc.buffer_map[param].data] = BufferInfo(
const_data, const_data.shape, const_data.dtype, BufferType.constant
)
for param in primfunc.params:
if primfunc.buffer_map[param].data not in buffer_info.keys():
buffer_info[primfunc.buffer_map[param].data] = BufferInfo(
None,
primfunc.buffer_map[param].shape,
primfunc.buffer_map[param].dtype,
BufferType.input_or_output,
)
def populate_allocate_buffer_info(stmt):
if isinstance(stmt, tvm.tir.stmt.Allocate):
allocate = stmt
buffer_info[allocate.buffer_var] = BufferInfo(
None,
allocate.extents,
allocate.dtype,
BufferType.scratch,
)
tvm.tir.stmt_functor.post_order_visit(primfunc.body, populate_allocate_buffer_info)
return buffer_info
|
291f091d06aa768ceb28f2738823f5eeb336c47e
| 3,644,918
|
def find_external_nodes(digraph):
"""Return a set of external nodes in a directed graph.
External nodes are node that are referenced as a dependency not defined as
a key in the graph dictionary.
"""
external_nodes = set()
for ni in digraph:
for nj in digraph[ni]:
if nj not in digraph:
external_nodes.add(nj)
return external_nodes
|
de63af1b649e450214907dd704bde782820d393d
| 3,644,919
|
import six
def strip(val):
"""
Strip val, which may be str or iterable of str.
For str input, returns stripped string, and for iterable input,
returns list of str values without empty str (after strip) values.
"""
if isinstance(val, six.string_types):
return val.strip()
try:
return list(filter(None, map(strip, val)))
except TypeError:
return val
|
893986e69f6d64167f45daf30dacb72f4b7f2bff
| 3,644,920
|
from matplotlib.backends.backend_pdf import PdfPages
import os
from datetime import datetime
def durations_histo(filename: str, v1_2_1, v1_5_2):
"""Generate all the figures for the histograms.
Returns a dictionary of dict with dict containing the full filename.
"""
ensure_path(os.path.dirname(filename))
filenames = dict()
filenames['dev'] = dict()
filenames['train'] = dict()
labels_in_the_datasets = set(list(
v1_2_1['events_counts_dev'].keys()) + list(
v1_2_1['events_counts_train'].keys()) + list(
v1_5_2['events_counts_dev'].keys()) + list(
v1_5_2['events_counts_train'].keys()))
labels_in_the_datasets = sorted(
list(set([label.lower() for label in labels_in_the_datasets])))
dev_seizures_durations_by_type_v1_2_1 = {
seizure_type: np.array(
[event['duration'] for event in v1_2_1['events_dev']
if event['event'] == seizure_type])
for seizure_type in list(v1_2_1['events_counts_dev'].keys())}
train_seizures_durations_by_type_v1_2_1 = {
seizure_type: np.array(
[event['duration'] for event in v1_2_1['events_train']
if event['event'] == seizure_type])
for seizure_type in list(v1_2_1['events_counts_train'].keys())}
dev_seizures_durations_by_type_v1_5_2 = v1_5_2[
'dev_seizures_durations_by_type']
train_seizures_durations_by_type_v1_5_2 = v1_5_2[
'train_seizures_durations_by_type']
with PdfPages(filename + ".pdf") as pdf:
d = pdf.infodict()
d['Title'] = 'Seizures duration histograms'
d['Author'] = 'Vincent Stragier'
d['Subject'] = 'Compilation of all the duration histograms'
d['Keywords'] = 'seizures epilepsy histogram TUSZ EEG'
d['CreationDate'] = datetime.datetime(2020, 10, 21)
d['ModDate'] = datetime.datetime.today()
for seizure_type in labels_in_the_datasets:
plt.figure(
"Histograms for {0} seizures ('{1}') - dev set".format(
EPILEPTIC_SEIZURE_LABELS_DICT[
seizure_type.lower()], seizure_type),
figsize=(16/2.54*2, 9/2.54*2))
pdf.attach_note(
"Histograms for {0} seizures ('{1}') - dev set".format(
EPILEPTIC_SEIZURE_LABELS_DICT[
seizure_type.lower()],
seizure_type))
plt.rc('text', usetex=True)
plt.rcParams["font.family"] = 'sans-serif'
plt.rcParams['font.sans-serif'] = "Calibri"
plt.suptitle(
"\\Huge Histograms for {0} seizures ('{1}') - dev set".format(
EPILEPTIC_SEIZURE_LABELS_DICT[
seizure_type.lower()],
seizure_type),
fontsize=15)
if seizure_type.upper()\
in dev_seizures_durations_by_type_v1_2_1.keys():
data = dev_seizures_durations_by_type_v1_2_1[
seizure_type.upper()]
mu = data.mean()
median = np.median(data)
sigma = data.std()
minimum = data.min()
maximum = data.max()
plt.subplot(2, 2, 1)
counts, bins, _ = plt.hist(
data, bins=data.size, rwidth=0.8, color='#6eb055ff')
i = np.argmax(counts)
hist_mode = (bins[i] + bins[i + 1])/2
plt.ylabel(r'Count per bin')
plt.legend(
[r'$\mu={0:.4f}$, $\sigma={1:.4f}$,{7}min$\,={2:.4f}$,'
r' max$\,={6:.4f}$,{7}median$\,={4:.4f}$,'
r' mode$\,={5:.4f}$,{7}Number of seizures: {3}'.format(
mu,
sigma,
minimum,
len(data),
median,
hist_mode,
maximum, "\n")])
plt.xlabel(r'Time in seconds')
plt.title(r'Dev set v1.2.1')
plt.subplot(2, 2, 3)
counts, bins, _ = plt.hist(
data, bins=data.size, rwidth=0.8, color='#6eb055ff')
plt.xlim(0, mu)
i = np.argmax(counts)
hist_mode = (bins[i] + bins[i + 1])/2
plt.ylabel(r'Count per bin')
plt.legend(
[r'$\mu={0:.4f}$, $\sigma={1:.4f}$,{7}min$\,={2:.4f}$,'
r' max$\,={6:.4f}$,{7}median$\,={4:.4f}$,'
r' mode$\,={5:.4f}$,{7}Number of seizures: {3}'.format(
mu,
sigma,
minimum,
len(data),
median,
hist_mode,
maximum,
"\n")])
plt.xlabel(r'Time in seconds')
plt.title(r'Dev set v1.2.1 [0, %.2f]' % mu)
if seizure_type in dev_seizures_durations_by_type_v1_5_2.keys():
data = dev_seizures_durations_by_type_v1_5_2[seizure_type]
mu = data.mean()
median = np.median(data)
sigma = data.std()
minimum = data.min()
maximum = data.max()
plt.subplot(2, 2, 2)
counts, bins, _ = plt.hist(
data, bins=data.size, rwidth=0.8, color='#575656ff')
i = np.argmax(counts)
hist_mode = (bins[i] + bins[i + 1])/2
plt.ylabel(r'Count per bin')
plt.legend(
[r'$\mu={0:.4f}$, $\sigma={1:.4f}$,{7}min$\,={2:.4f}$,'
r' max$\,={6:.4f}$,{7}median$\,={4:.4f}$,'
r' mode$\,={5:.4f}$,{7}Number of seizures: {3}'.format(
mu,
sigma,
minimum,
len(data),
median,
hist_mode,
maximum,
"\n")])
plt.xlabel(r'Time in seconds')
plt.title(r'Dev set v1.5.2')
plt.subplot(2, 2, 4)
counts, bins, _ = plt.hist(
data, bins=data.size, rwidth=0.8, color='#575656ff')
plt.xlim(0, mu)
i = np.argmax(counts)
hist_mode = (bins[i] + bins[i + 1])/2
plt.ylabel(r'Count per bin')
plt.legend(
[r'$\mu={0:.4f}$, $\sigma={1:.4f}$,{7}min$\,={2:.4f}$,'
r' max$\,={6:.4f}$,{7}median$\,={4:.4f}$,'
r' mode$\,={5:.4f}$,{7}Number of seizures: {3}'.format(
mu,
sigma,
minimum,
len(data),
median,
hist_mode,
maximum,
"\n")])
plt.xlabel(r'Time in seconds')
plt.title(r'Dev set v1.5.2 [0, %.2f]' % mu)
# tight_layout docs: [left, bottom, right, top]
# in normalized (0, 1) figure
plt.tight_layout(rect=[0, 0.03, 1, 0.95])
plt.savefig(
"_".join([filename, seizure_type, 'dev.pdf']),
format="PDF",
transparent=True)
filenames['dev'][seizure_type] = "_".join(
[filename, seizure_type, 'dev.pdf']).replace('\\', '/')
pdf.savefig(transparent=True)
plt.close()
plt.figure(
"Histograms for {0} seizures ('{1}') - train set".format(
EPILEPTIC_SEIZURE_LABELS_DICT[
seizure_type.lower()],
seizure_type),
figsize=(16/2.54*2, 9/2.54*2))
plt.rc('text', usetex=True)
plt.rcParams["font.family"] = 'sans-serif'
plt.rcParams['font.sans-serif'] = "Calibri"
plt.suptitle(
"\\Huge Histograms for {0}"
" seizures ('{1}') - train set".format(
EPILEPTIC_SEIZURE_LABELS_DICT[
seizure_type.lower()],
seizure_type),
fontsize=15)
if seizure_type.upper()\
in train_seizures_durations_by_type_v1_2_1.keys():
data = train_seizures_durations_by_type_v1_2_1[
seizure_type.upper()]
mu = data.mean()
median = np.median(data)
sigma = data.std()
minimum = data.min()
maximum = data.max()
plt.subplot(2, 2, 1)
counts, bins, _ = plt.hist(
data, bins=data.size, rwidth=0.8, color='#6eb055ff')
i = np.argmax(counts)
hist_mode = (bins[i] + bins[i + 1])/2
plt.ylabel(r'Count per bin')
plt.legend(
[r'$\mu={0:.4f}$, $\sigma={1:.4f}$,{7}min$\,={2:.4f}$,'
r' max$\,={6:.4f}$,{7}median$\,={4:.4f}$,'
r' mode$\,={5:.4f}$,{7}Number of seizures: {3}'.format(
mu,
sigma,
minimum,
len(data),
median,
hist_mode,
maximum,
"\n")])
plt.xlabel(r'Time in seconds')
plt.title(r'Train set v1.2.1')
plt.subplot(2, 2, 3)
counts, bins, _ = plt.hist(
data, bins=data.size, rwidth=0.8, color='#6eb055ff')
plt.xlim(0, mu)
i = np.argmax(counts)
hist_mode = (bins[i] + bins[i + 1])/2
plt.ylabel(r'Count per bin')
plt.legend(
[r'$\mu={0:.4f}$, $\sigma={1:.4f}$,{7}min$\,={2:.4f}$,'
r' max$\,={6:.4f}$,{7}median$\,={4:.4f}$,'
r' mode$\,={5:.4f}$,{7}Number of seizures: {3}'.format(
mu,
sigma,
minimum,
len(data),
median,
hist_mode,
maximum,
"\n")])
plt.xlabel(r'Time in seconds')
plt.title(r'Train set v1.2.1 [0, %.2f]' % mu)
if seizure_type in train_seizures_durations_by_type_v1_5_2.keys():
data = train_seizures_durations_by_type_v1_5_2[seizure_type]
mu = data.mean()
median = np.median(data)
sigma = data.std()
minimum = data.min()
maximum = data.max()
plt.subplot(2, 2, 2)
counts, bins, _ = plt.hist(
data, bins=data.size, rwidth=0.8, color='#575656ff')
i = np.argmax(counts)
hist_mode = (bins[i] + bins[i + 1])/2
plt.ylabel(r'Count per bin')
plt.legend(
[r'$\mu={0:.4f}$, $\sigma={1:.4f}$,{7}min$\,={2:.4f}$,'
r' max$\,={6:.4f}$,{7}median$\,={4:.4f}$,'
r' mode$\,={5:.4f}$,{7}Number of seizures: {3}'.format(
mu,
sigma,
minimum,
len(data),
median,
hist_mode,
maximum,
"\n")])
plt.xlabel(r'Time in seconds')
plt.title(r'Train set v1.5.2')
plt.subplot(2, 2, 4)
counts, bins, _ = plt.hist(
data, bins=data.size, rwidth=0.8, color='#575656ff')
plt.xlim(0, mu)
i = np.argmax(counts)
hist_mode = (bins[i] + bins[i + 1])/2
plt.ylabel(r'Count per bin')
plt.legend(
[r'$\mu={0:.4f}$, $\sigma={1:.4f}$,{7}min$\,={2:.4f}$,'
r' max$\,={6:.4f}$,{7}median$\,={4:.4f}$,'
r' mode$\,={5:.4f}$,{7}Number of seizures: {3}'.format(
mu,
sigma,
minimum,
len(data),
median,
hist_mode,
maximum,
"\n")])
plt.xlabel(r'Time in seconds')
plt.title(r'Train set v1.5.2 [0, %.2f]' % mu)
# tight_layout docs: [left, bottom, right, top]
# in normalized (0, 1) figure
plt.tight_layout(rect=[0, 0.03, 1, 0.95])
plt.savefig("_".join(
[filename, seizure_type, 'train.pdf']),
format="PDF",
transparent=True)
filenames['train'][seizure_type] = "_".join(
[filename, seizure_type, 'train.pdf']).replace('\\', '/')
pdf.savefig(transparent=True)
plt.close()
return filenames
|
6d05a7af348f4b88bf0437feb81a0f5172514d87
| 3,644,921
|
def construct_area_cube(var_name, area_data, global_atts, dim_coords):
"""Construct the new area cube """
dim_coords_list = []
for i, coord in enumerate(dim_coords):
dim_coords_list.append((coord, i))
if var_name == 'areacello':
long_name = 'Grid-Cell Area for Ocean Variables'
else:
long_name = 'Grid-Cell Area for Atmospheric Grid Variables'
area_cube = iris.cube.Cube(area_data,
standard_name='cell_area',
long_name=long_name,
var_name=var_name,
units='m2',
attributes=global_atts,
dim_coords_and_dims=dim_coords_list)
return area_cube
|
07c01610f800202ccbdebf834648840b77d47fb7
| 3,644,922
|
def _switch_obs_2_time_dim(ds):
"""Function to create a single time variable that is the midpoint of the
ObsPack averaging interval, and make it the xarray coordinate. """
# Get the midpoint of the average pulled from the model:
midpoint = pd.to_datetime(ds.averaging_interval_start.data) + \
np.asarray(ds.averaging_interval.data) / 2
# Make it the time midpoint a new variable in the dataset.
t = midpoint.to_series().reset_index(drop=True)
ds['time'] = ("obs", t)
# Tell xarray that we want time to be a coordinate.
ds = ds.set_coords('time')
# And tell it to replace Obs # with time as the preferred dimension.
ds = ds.swap_dims({"obs": "time"})
return ds
|
6fa53b3f1a0472f45fa59c11b5d869786b5a9f4f
| 3,644,923
|
def bitfield_v(val, fields, col=15):
"""
return a string of bit field components formatted vertically
val: the value to be split into bit fields
fields: a tuple of (name, output_function, (bit_hi, bit_lo)) tuples
"""
fmt = '%%-%ds: %%s' % col
s = []
for (name, func, field) in fields:
s.append(fmt % (name, func(bits(val, field))))
return '\n'.join(s)
|
139b9328190f61a1cd649826bfde806e565d4201
| 3,644,924
|
from typing import Tuple
from typing import Iterable
def split_housenumber_line(line: str) -> Tuple[str, bool, bool, str, Tuple[int, str], str,
Tuple[int, str], Iterable[str], Tuple[int, str]]:
"""
Augment TSV Overpass house numbers result lines to aid sorting.
It prepends two bools to indicate whether an entry is missing either a house number, a house name
or a conscription number.
Entries lacking either a house number or all of the above IDs come first.
The following fields are interpreted numerically: oid, house number, conscription number.
"""
field = line.split('\t')
oid = get_array_nth(field, 0)
street = get_array_nth(field, 1)
housenumber = get_array_nth(field, 2)
postcode = get_array_nth(field, 3)
housename = get_array_nth(field, 4)
cons = get_array_nth(field, 5)
tail = field[6:] if len(field) > 6 else []
have_housenumber = housenumber != ''
have_houseid = have_housenumber or housename != '' or cons != ''
return (postcode, have_houseid, have_housenumber, street,
split_house_number(housenumber),
housename, split_house_number(cons), tail, split_house_number(oid))
|
c3d93d459c9b004d199725b11e1b92340e6154b9
| 3,644,925
|
import math
def tau_polinomyal_coefficients(z):
"""
Coefficients (z-dependent) for the log(tau) formula from
Raiteri C.M., Villata M. & Navarro J.F., 1996, A&A 315, 105-115
"""
log_z = math.log10(z)
log_z_2 = log_z ** 2
a0 = 10.13 + 0.07547 * log_z - 0.008084 * log_z_2
a1 = -4.424 - 0.7939 * log_z - 0.1187 * log_z_2
a2 = 1.262 + 0.3385 * log_z + 0.05417 * log_z_2
return [a0, a1, a2]
|
ebef7d773eeb400ef87553fc5838ee2cb97d0669
| 3,644,926
|
from typing import Optional
import this
def register( # lgtm[py/similar-function]
fn: callbacks.ResourceHandlerFn,
*,
id: Optional[str] = None,
errors: Optional[errors_.ErrorsMode] = None,
timeout: Optional[float] = None,
retries: Optional[int] = None,
backoff: Optional[float] = None,
cooldown: Optional[float] = None, # deprecated, use `backoff`
registry: Optional[registries.ResourceChangingRegistry] = None,
labels: Optional[bodies.Labels] = None,
annotations: Optional[bodies.Annotations] = None,
when: Optional[callbacks.WhenHandlerFn] = None,
) -> callbacks.ResourceHandlerFn:
"""
Register a function as a sub-handler of the currently executed handler.
Example::
@kopf.on.create('zalando.org', 'v1', 'kopfexamples')
def create_it(spec, **kwargs):
for task in spec.get('tasks', []):
def create_single_task(task=task, **_):
pass
kopf.register(id=task, fn=create_single_task)
This is efficiently an equivalent for::
@kopf.on.create('zalando.org', 'v1', 'kopfexamples')
def create_it(spec, **kwargs):
for task in spec.get('tasks', []):
@kopf.on.this(id=task)
def create_single_task(task=task, **_):
pass
"""
decorator = this(
id=id, registry=registry,
errors=errors, timeout=timeout, retries=retries, backoff=backoff, cooldown=cooldown,
labels=labels, annotations=annotations, when=when,
)
return decorator(fn)
|
d2e539c97a4946f819616d0f596e68e190a68c78
| 3,644,927
|
def pd_read_csv_using_metadata(filepath_or_buffer, table_metadata, ignore_partitions=False, *args, **kwargs):
"""
Use pandas to read a csv imposing the datatypes specified in the table_metadata
Passes through kwargs to pandas.read_csv
If ignore_partitions=True, assume that partitions are not columns in the dataset
"""
if ignore_partitions:
table_metadata = _remove_paritions_from_table_metadata(table_metadata)
dtype = _pd_dtype_dict_from_metadata(table_metadata, ignore_partitions)
parse_dates = _pd_date_parse_list_from_metadatadata(table_metadata)
return pd.read_csv(filepath_or_buffer, dtype = dtype, parse_dates = parse_dates, *args, **kwargs)
|
bddc8da985c7e252effe566c640bca25acd01d6a
| 3,644,928
|
def read_parfile_dirs_props(filename):
"""Reads BRUKER parfile-dirs.prop file to in order to get correct mapping
of the topspin parameters.
Args:
filename: input Bruker parfile-dirs.prop file
Returns:
A dict mapping parameter classes to the their respective directory.
E.g. {'PY_DIRS': ['py/user', 'py']}
"""
fh = open(filename)
dirs = fh.read()
fh.close()
par_dc = {}
dirs = dirs.replace('\\\n', '').replace(';', ' ')
for line in dirs.split('\n'):
if len(line) > 0 and line[0] != '#':
key, values = line.split('=')
par_dc[key] = values.split()
if verbose_level > 1:
print 'Dictionary for BRUKER search paths:'
for key in par_dc.keys():
print key, par_dc[key]
return par_dc
|
ca54dc948923826bb81af94e41be42caadfe6004
| 3,644,929
|
def get_all_playlist_items(playlist_id, yt_client):
"""
Get a list of video ids of videos currently in playlist
"""
return yt_client.get_playlist_items(playlist_id)
|
c7a8cc806b552b1853eba1d8223aa00225d5539e
| 3,644,930
|
def _get_last_measurement(object_id: int):
"""
Get the last measurement of object with given ID.
Args:
object_id (int): Object ID whose last measurement to look for.
Returns:
(GamMeasurement): The last measurement of the object, or None if it doesn't exist.
"""
last_mea = (GamMeasurement.select()
.where(GamMeasurement.mea_object == object_id)
.order_by(GamMeasurement.mea_id.desc())
.get())
return last_mea if last_mea else None
|
a5ee460f57912bb885ae0cb534f6195c92983aad
| 3,644,931
|
def get_library_isotopes(acelib_path):
"""
Returns the isotopes in the cross section library
Parameters
----------
acelib_path : str
Path to the cross section library
(i.e. '/home/luke/xsdata/endfb7/sss_endfb7u.xsdata')
Returns
-------
iso_array: array
array of isotopes in cross section library:
"""
lib_isos_list = []
with open(acelib_path, 'r') as f:
lines = f.readlines()
for line in lines:
iso = line.split()[0]
lib_isos_list.append(iso)
return lib_isos_list
|
d93d319b84c02b8156c5bad0998f5943a5bbe8ae
| 3,644,932
|
from typing import Mapping
def read_wires(data: str) -> Mapping[int, Wire]:
"""Read the wiring information from data."""
wires = {}
for line in data.splitlines():
wire_name, wire = get_wire(line)
wires[wire_name] = wire
return wires
|
87c8b82bceab0252204ababf842ca0b00ab6a059
| 3,644,933
|
def back_ease_out(p):
"""Modeled after overshooting cubic y = 1-((1-x)^3-(1-x)*sin((1-x)*pi))"""
f = 1 - p
return 1 - (f * f * f - f * sin(f * pi))
|
9946b8929211df4624ecc201ce026b981ffb3d0c
| 3,644,934
|
def configure_estimator_params(init_args, train_args):
"""Validates the initialization and training arguments and constructs a
`params` dictionary for creating a TensorFlow Estimator object."""
params = {}
init_val = ArgumentsValidator(init_args, "Initialization arguments")
with init_val:
params["rm_dir_on_init"] = init_val.get("rm_dir", ATYPE_BOOL, True)
params["use_ortho_weights"] = init_val.get("use_ortho_weights", ATYPE_BOOL, True)
params["max_lsuv_iters"] = init_val.get("max_lsuv_iters", [ATYPE_NONE, ATYPE_INT], True)
params["lsuv_tolerance"] = init_val.get("lsuv_tolerance", ATYPE_FLOAT, True)
params["init_alpha"] = init_val.get("init_alpha", ATYPE_FLOAT, True)
train_val = ArgumentsValidator(train_args, "Training arguments")
with train_val:
params["save_time"] = train_val.get("save_time", ATYPE_FLOAT, True)
params["val_throttle_time"] = train_val.get("val_throttle_time", ATYPE_FLOAT, True)
params["learning_rate"] = train_val.get("learning_rate", ATYPE_FLOAT, True)
params["sgd_momentum"] = train_val.get("sgd_momentum", [ATYPE_NONE, ATYPE_FLOAT], True)
params["sgd_use_nesterov"] = train_val.get("sgd_use_nesterov", ATYPE_BOOL, True)
params["use_rmsprop"] = train_val.get("use_rmsprop", ATYPE_BOOL, True)
params["rmsprop_decay"] = train_val.get("rmsprop_decay", ATYPE_FLOAT, True)
params["rmsprop_momentum"] = train_val.get("rmsprop_momentum", ATYPE_FLOAT, True)
params["rmsprop_epsilon"] = train_val.get("rmsprop_epsilon", ATYPE_FLOAT, True)
params["reg_weight_decay"] = train_val.get("reg_weight_decay", [ATYPE_NONE, ATYPE_FLOAT], True)
params["cost_type"] = train_val.get("cost_type", ATYPE_STRING, True).lower()
params["max_grad_norm"] = train_val.get("max_grad_norm", [ATYPE_NONE, ATYPE_FLOAT], True)
params["parallel_grad_gate"] = train_val.get("parallel_grad_gate", ATYPE_BOOL, True)
return params
|
f132eaa4077dd197faed72d6805f15255b7dd680
| 3,644,935
|
def bit_lshift(bin_name, bit_offset, bit_size, shift, policy=None):
"""Creates a bit_lshift_operation to be used with operate or operate_ordered.
Server left shifts bitmap starting at bit_offset for bit_size by shift bits.
No value is returned.
Args:
bin_name (str): The name of the bin containing the map.
bit_offset (int): The offset where the bits will start being shifted.
bit_size (int): The number of bits that will be shifted by shift places.
shift (int): How many bits to shift by.
policy (dict, optional): The bit_policy policy dictionary. See: See :ref:`aerospike_bit_policies`. default: None
Returns:
A dictionary usable in operate or operate_ordered. The format of the dictionary
should be considered an internal detail, and subject to change.
"""
return {
OP_KEY: aerospike.OP_BIT_LSHIFT,
BIN_KEY: bin_name,
BIT_OFFSET_KEY: bit_offset,
BIT_SIZE_KEY: bit_size,
VALUE_KEY: shift,
POLICY_KEY: policy
}
|
3e8224a3f48eade9ee01a43819b4c6aa88ef308e
| 3,644,936
|
def compute_ccas(sigma_xx, sigma_xy, sigma_yx, sigma_yy, epsilon,
verbose=True):
"""Main cca computation function, takes in variances and crossvariances.
This function takes in the covariances and cross covariances of X, Y,
preprocesses them (removing small magnitudes) and outputs the raw results of
the cca computation, including cca directions in a rotated space, and the
cca correlation coefficient values.
Args:
sigma_xx: 2d numpy array, (num_neurons_x, num_neurons_x)
variance matrix for x
sigma_xy: 2d numpy array, (num_neurons_x, num_neurons_y)
crossvariance matrix for x,y
sigma_yx: 2d numpy array, (num_neurons_y, num_neurons_x)
crossvariance matrix for x,y (conj) transpose of sigma_xy
sigma_yy: 2d numpy array, (num_neurons_y, num_neurons_y)
variance matrix for y
epsilon: small float to help with stabilizing computations
verbose: boolean on whether to print intermediate outputs
Returns:
[ux, sx, vx]: [numpy 2d array, numpy 1d array, numpy 2d array]
ux and vx are (conj) transposes of each other, being
the canonical directions in the X subspace.
sx is the set of canonical correlation coefficients-
how well corresponding directions in vx, Vy correlate
with each other.
[uy, sy, vy]: Same as above, but for Y space
invsqrt_xx: Inverse square root of sigma_xx to transform canonical
directions back to original space
invsqrt_yy: Same as above but for sigma_yy
x_idxs: The indexes of the input sigma_xx that were pruned
by remove_small
y_idxs: Same as above but for sigma_yy
"""
(sigma_xx, sigma_xy, sigma_yx, sigma_yy,
x_idxs, y_idxs) = remove_small(sigma_xx, sigma_xy, sigma_yx, sigma_yy, epsilon)
numx = sigma_xx.shape[0]
numy = sigma_yy.shape[0]
if numx == 0 or numy == 0:
return ([0, 0, 0], [0, 0, 0], np.zeros_like(sigma_xx),
np.zeros_like(sigma_yy), x_idxs, y_idxs)
if verbose:
print("adding eps to diagonal and taking inverse")
sigma_xx += epsilon * np.eye(numx)
sigma_yy += epsilon * np.eye(numy)
inv_xx = np.linalg.pinv(sigma_xx)
inv_yy = np.linalg.pinv(sigma_yy)
if verbose:
print("taking square root")
invsqrt_xx = positivedef_matrix_sqrt(inv_xx)
invsqrt_yy = positivedef_matrix_sqrt(inv_yy)
if verbose:
print("dot products...")
arr = np.dot(invsqrt_xx, np.dot(sigma_xy, invsqrt_yy))
if verbose:
print("trying to take final svd")
u, s, v = np.linalg.svd(arr)
if verbose:
print("computed everything!")
return [u, np.abs(s), v], invsqrt_xx, invsqrt_yy, x_idxs, y_idxs
|
67827220cdbdd41250a8a40f140c8c21e0625df7
| 3,644,937
|
def generate_samples(
segment_mask: np.ndarray, num_of_samples: int = 64, p: float = 0.5
) -> np.ndarray:
"""Generate samples by randomly selecting a subset of the segments.
Parameters
----------
segment_mask : np.ndarray
The mask generated by `create_segments()`: An array of shape (image_width, image_height).
num_of_samples : int
The number of samples to generate.
p : float
The probability for each segment to be removed from a sample.
Returns
-------
samples : np.ndarray
A two-dimensional array of size (num_of_samples, num_of_segments).
"""
num_of_segments = int(np.max(segment_mask) + 1)
return np.random.binomial(n=1, p=p, size=(num_of_samples, num_of_segments))
|
99ee42abf95bd338714e42beee42610e3ac2f09d
| 3,644,938
|
def get_mix_bandpassed(bp_list, comp, param_dict_file=None,bandpass_shifts=None,
ccor_cen_nus=None, ccor_beams=None, ccor_exps = None,
normalize_cib=True,param_dict_override=None,bandpass_exps=None,nus_ghz=None,btrans=None,
dust_beta_param_name='beta_CIB',
radio_beta_param_name='beta_radio',
override_lbeam_bnus=None):
"""
Get mixing factors for a given component that have "color corrections" that account for
a non-delta-function bandpass and for possible variation of the beam within the bandpass.
If the latter is provided, the resulting output is of shape [Nfreqs,nells], otherwise
the output is of shape [Nfreqs,].
Parameters
----------
bp_list : list of strings
a list of strings of length Nfreqs where each string is the filename for a file
containing a specification of the bandpass for that frequency channel. For each
file, the first column is frequency in GHz and the second column is the transmission
whose overall normalization does not matter.
comp : string
a string specifying the component whose mixing is requested. Currently, the following are
supported (1) CMB or kSZ (considered identical, and always returns ones)
(2) tSZ (3) mu (4) rSZ (5) CIB (6) radio
param_dict_file : string, optional
filename of a YAML file used to create a dictionary of SED parameters and values
(only needed for some SEDs). If None, defaults to parameters specified in
input/fg_SEDs_default_params.yml.
bandpass_shifts : list of floats, optional
A list of floats of length [Nfreqs,] specifying how much in GHz to shift the
entire bandpass. Each value can be positive (shift right) or negative (shift left).
If None, no shift is applied and the bandpass specified in the files is used as is.
ccor_cen_nus : list of floats, optional
If not None, this indicates that the dependence of the beam on frequency with the
bandpass should be taken into account. ccor_cen_nus will then be interpreted as a
[Nfreqs,] length list of the "central frequencies" of each bandpass in GHz.
The provided beams in ccor_beams for each channel are then scaled by
(nu/nu_central)**ccor_exp where ccor_exp defaults to -1.
ccor_beams : list of array_like, optional
Only used if ccor_cen_nus is not None. In that mode, ccor_beams is interpreted as
an [Nfreqs,] length list where each element is a 1d numpy array specifying the
beam transmission starting from ell=0 and normalized to one at ell=0.
The provided beams for each channel are then scaled by
(nu/nu_central)**ccor_exp where ccor_exp defaults to -1 and nu_central is specified
through ccor_cen_nus. If any list element is None, no scale dependent color correction
is applied for that frequency channel. See get_scaled_beams for more information.
ccor_exps : list of floats, optional
Only used if ccor_cen_nus is not None. Defaults to -1 for each frequncy channel.
This controls how the beam specified in ccor_beams for the central frequencies
specified in ccor_cen_nus is scaled to other frequencies.
"""
if bandpass_shifts is not None and np.any(np.array(bandpass_shifts)!=0):
print("WARNING: shifted bandpasses provided.")
assert (comp is not None)
assert (bp_list is not None)
N_freqs = len(bp_list)
if ccor_cen_nus is not None:
assert len(ccor_cen_nus)==N_freqs
assert len(ccor_beams)==N_freqs
lmaxs = []
for i in range(N_freqs):
if ccor_beams[i] is not None:
assert ccor_beams[i].ndim==1
lmaxs.append( ccor_beams[i].size )
if len(lmaxs)==0:
ccor_cen_nus = None
shape = N_freqs
else:
lmax = max(lmaxs)
shape = (N_freqs,lmax)
if ccor_exps is None: ccor_exps = [-1]*N_freqs
elif override_lbeam_bnus is not None:
lbeam,bnus = override_lbeam_bnus
lmax = lbeam.size
shape = (N_freqs,lmax)
else:
shape = N_freqs
if (comp == 'CIB' or comp == 'rSZ' or comp == 'radio'):
if param_dict_file is None:
p = default_dict
else:
p = read_param_dict_from_yaml(param_dict_file)
if (comp == 'CMB' or comp == 'kSZ'): #CMB (or kSZ)
output = np.ones(shape) #this is unity by definition, since we're working in Delta T units [uK_CMB]; output ILC map will thus also be in uK_CMB
for i in range(N_freqs):
if(bp_list[i] == None): #this case is appropriate for HI or other maps that contain no CMB-relevant signals (and also no CIB); they're assumed to be denoted by None in bp_list
output[i] = 0.
return output
else:
output = np.zeros(shape)
for i,bp in enumerate(bp_list):
if (bp_list[i] is not None):
if nus_ghz is None:
nu_ghz, trans = np.loadtxt(bp, usecols=(0,1), unpack=True)
else:
nu_ghz = nus_ghz
trans = btrans
if bandpass_shifts is not None: nu_ghz = nu_ghz + bandpass_shifts[i]
if bandpass_exps is not None: trans = trans * nu_ghz**bandpass_exps[i]
lbeam = 1
bnus = 1
# It turns out scaling the beam is actually the slowest part of the calculation
# so we allow pre-calculated ones to be provided
if override_lbeam_bnus is not None:
lbeam,bnus = override_lbeam_bnus
else:
if ccor_cen_nus is not None:
if ccor_beams[i] is not None:
lbeam = ccor_beams[i]
ells = np.arange(lbeam.size)
cen_nu_ghz = ccor_cen_nus[i]
bnus = get_scaled_beams(ells,lbeam,cen_nu_ghz,nu_ghz,ccor_exp=ccor_exps[i]).swapaxes(0,1)
assert np.all(np.isfinite(bnus))
if (comp == 'tSZ' or comp == 'mu' or comp == 'rSZ'):
# Thermal SZ (y-type distortion) or mu-type distortion or relativistic tSZ
# following Sec. 3.2 of https://arxiv.org/pdf/1303.5070.pdf
# -- N.B. IMPORTANT TYPO IN THEIR EQ. 35 -- see https://www.aanda.org/articles/aa/pdf/2014/11/aa21531-13.pdf
mixs = get_mix(nu_ghz, comp,
param_dict_file=param_dict_file, param_dict_override=param_dict_override,
dust_beta_param_name=dust_beta_param_name,radio_beta_param_name=radio_beta_param_name)
val = np.trapz(trans * dBnudT(nu_ghz) * bnus * mixs, nu_ghz) / np.trapz(trans * dBnudT(nu_ghz), nu_ghz) / lbeam
# this is the response at each frequency channel in uK_CMB for a signal with y=1 (or mu=1)
elif (comp == 'CIB'):
# following Sec. 3.2 of https://arxiv.org/pdf/1303.5070.pdf
# -- N.B. IMPORTANT TYPO IN THEIR EQ. 35 -- see https://www.aanda.org/articles/aa/pdf/2014/11/aa21531-13.pdf
# CIB SED parameter choices in dict file: Tdust_CIB [K], beta_CIB, nu0_CIB [GHz]
# N.B. overall amplitude is not meaningful here; output ILC map (if you tried to preserve this component) would not be in sensible units
mixs = get_mix(nu_ghz, 'CIB_Jysr',
param_dict_file=param_dict_file, param_dict_override=param_dict_override,
dust_beta_param_name=dust_beta_param_name,radio_beta_param_name=radio_beta_param_name)
vnorm = np.trapz(trans * dBnudT(nu_ghz), nu_ghz)
val = (np.trapz(trans * mixs * bnus , nu_ghz) / vnorm) / lbeam
# N.B. this expression follows from Eqs. 32 and 35 of
# https://www.aanda.org/articles/aa/pdf/2014/11/aa21531-13.pdf ,
# and then noting that one also needs to first rescale the CIB emission
# in Jy/sr from nu0_CIB to the "nominal frequency" nu_c that appears in
# those equations (i.e., multiply by get_mix(nu_c, 'CIB_Jysr')).
# The resulting cancellation leaves this simple expression which has no dependence on nu_c.
elif (comp == 'radio'):
# same logic/formalism as used for CIB component immediately above this
# radio SED parameter choices in dict file: beta_radio, nu0_radio [GHz]
mixs = get_mix(nu_ghz, 'radio_Jysr',
param_dict_file=param_dict_file, param_dict_override=param_dict_override,
dust_beta_param_name=dust_beta_param_name,radio_beta_param_name=radio_beta_param_name)
val = (np.trapz(trans * mixs * bnus , nu_ghz) / np.trapz(trans * dBnudT(nu_ghz), nu_ghz)) / lbeam
else:
print("unknown component specified")
raise NotImplementedError
if (ccor_cen_nus is not None) and (ccor_beams[i] is not None): val[lbeam==0] = 0
output[i] = val
assert np.all(np.isfinite(val))
elif (bp_list[i] is None):
#this case is appropriate for HI or other maps that contain no CMB-relevant signals (and also no CIB); they're assumed to be denoted by None in bp_list
output[i] = 0.
if (comp == 'CIB' or comp == 'radio') and normalize_cib:
#overall amplitude not meaningful, so divide by max to get numbers of order unity;
# output gives the relative conversion between CIB (or radio) at different frequencies, for maps in uK_CMB
omax = output.max(axis=0)
ret = output / omax
if (ccor_cen_nus is not None): ret[:,omax==0] = 0
else:
ret = output
assert np.all(np.isfinite(ret))
return ret
|
d4693e41c755dd1067c371bfa740ce1436dfc85a
| 3,644,939
|
def partition(data, label_name, ratio):
""" Partitions data set according to a provided ratio.
params:
data - The data set in a pandas data frame
label_name - the name of the collumn in the data set that contains the labels
ratio - the training/total data ratio
returns:
training_data - The data set to train on
training_labels - Indexed labels for training set
testing_data - The data set to test on
testing_labels - The data set to test on """
data = data.loc[np.random.permutation(data.index)]
partition_idx = int(data.shape[0] * ratio)
train, test = np.split(data, [partition_idx])
def splitDataLabels(data):
"""Separates labels from data."""
labels = data[label_name].to_frame()
data = data.drop(columns = [label_name])
return data , labels
train_data, train_label = splitDataLabels(train)
test_data, test_label = splitDataLabels(test)
return train_data, train_label, test_data, test_label
|
6f00c8df9e5fb42f4e3fb01744215214e732f441
| 3,644,940
|
import os
def find_background2(data, mask, channels, apertureset_lst,
method='poly', scale='linear', scan_step=200,
xorder=2, yorder=2, maxiter=5, upper_clip=3, lower_clip=3,
extend=True, display=True, fig_file=None, reg_file=None):
"""Subtract the background for an input FITS image.
Args:
data (:class:`numpy.ndarray`): Input data image.
mask (:class:`numpy.ndarray`): Mask of input data image.
channels (list): List of channels as strings.
apertureset_lst (dict): Dict of :class:`~edrs.echelle.trace.ApertureSet`
at different channels.
method (str): Method of finding background light.
scale (str): Scale of the 2-D polynomial fitting. If 'log', fit the
polynomial in the logrithm scale.
scan_step (int): Steps of scan in pixels.
xorder (int): Order of 2D polynomial along the main dispersion
direction (only applicable if **method** = "poly").
yorder (int): Order of 2D polynomial along the cross-dispersion
direction (only applicable if **method** = "poly").
maxiter (int): Maximum number of iteration of 2D polynomial fitting
(only applicable if **method** = "poly").
upper_clip (float): Upper sigma clipping threshold (only applicable if
**method** = "poly").
lower_clip (float): Lower sigma clipping threshold (only applicable if
**method** = "poly").
extend (bool): Extend the grid to the whole CCD image if *True*.
display (bool): Display figures on the screen if *True*.
fig_file (str): Name of the output figure. No image file generated if
*None*.
reg_file (string): Name of the output DS9 region file. No file generated
if *None*.
Returns:
:class:`numpy.ndarray`: Image of background light.
"""
plot = (display or fig_file is not None)
plot_paper_fig = False
h, w = data.shape
meddata = median_filter(data, size=(3,3), mode='reflect')
xnodes, ynodes, znodes = [], [], []
# find the minimum and maximum aperture number
min_aper = min([min(apertureset_lst[ch].keys()) for ch in channels])
max_aper = max([max(apertureset_lst[ch].keys()) for ch in channels])
# generate the horizontal scan list
x_lst = np.arange(0, w-1, scan_step)
# add the last column to the list
if x_lst[-1] != w-1:
x_lst = np.append(x_lst, w-1)
# find intra-order pixels
_message_lst = ['Column, N (between), N (extend), N (removed), N (total)']
for x in x_lst:
xsection = meddata[:,x]
inter_aper = []
prev_newy = None
# loop for every aperture
for aper in range(min_aper, max_aper+1):
# for a new aperture, initialize the count of channel
count_channel = 0
for ich, channel in enumerate(channels):
# check every channel in this frame
if aper in apertureset_lst[channel]:
count_channel += 1
this_newy = apertureset_lst[channel][aper].position(x)
if count_channel == 1 and prev_newy is not None:
# this channel is the first channel in this aperture and
# there is a previous y
mid_newy = (prev_newy + this_newy)//2
i1 = min(h-1, max(0, int(prev_newy)))
i2 = min(h-1, max(0, int(this_newy)))
#if len(inter_aper)==0 or \
# abs(mid_newy - inter_aper[-1])>scan_step*0.7:
# if i2-i1>0:
if i2-i1>0:
mid_newy = i1 + xsection[i1:i2].argmin()
inter_aper.append(mid_newy)
prev_newy = this_newy
inter_aper = np.array(inter_aper)
# count how many nodes found between detected orders
n_nodes_inter = inter_aper.size
# if extend = True, expand the grid with polynomial fitting to
# cover the whole CCD area
n_nodes_extend = 0
if extend:
if x==2304:
_fig = plt.figure(dpi=150)
_ax = _fig.gca()
for _x in inter_aper:
_ax.axvline(x=_x,color='g', ls='--',lw=0.5, alpha=0.6)
_ax.plot(data[:, x],'b-',lw=0.5)
_fig2 = plt.figure(dpi=150)
_ax2 = _fig2.gca()
print(inter_aper)
coeff = np.polyfit(np.arange(inter_aper.size), inter_aper, deg=3)
if x== 2304:
_ax2.plot(np.arange(inter_aper.size), inter_aper,'go', alpha=0.6)
_newx = np.arange(0, inter_aper.size, 0.1)
_ax2.plot(_newx, np.polyval(coeff, _newx),'g-')
# find the points after the end of inter_aper
ii = inter_aper.size-1
new_y = inter_aper[-1]
while(new_y<h-1):
ii += 1
new_y = int(np.polyval(coeff,ii))
inter_aper = np.append(inter_aper,new_y)
n_nodes_extend += 1
# find the points before the beginning of order_mid
ii = 0
new_y = inter_aper[0]
while(new_y>0):
ii -= 1
new_y = int(np.polyval(coeff,ii))
inter_aper = np.insert(inter_aper,0,new_y)
n_nodes_extend += 1
if x==2304:
#for _x in np.polyval(coeff, np.arange(0,25)):
# _ax.axvline(x=_x, color='r',ls='--',lw=0.5)
#_newx = np.arange(0, 25)
#_ax2.plot(_newx, np.polyval(coeff, _newx), 'ro', alpha=0.6)
plt.show()
# remove those points with y<0 or y>h-1
m1 = inter_aper > 0
m2 = inter_aper < h-1
inter_aper = inter_aper[np.nonzero(m1*m2)[0]]
# filter those masked pixels
m = mask[inter_aper, x]==0
inter_aper = inter_aper[m]
# remove backward points
tmp = np.insert(inter_aper,0,0.)
m = np.diff(tmp)>0
inter_aper = inter_aper[np.nonzero(m)[0]]
# count how many nodes removed
n_nodes_removed = (n_nodes_inter + n_nodes_extend) - inter_aper.size
# pack infos into message list
_message_lst.append('| %6d | %6d | %6d | %6d | %6d |'%(
x, n_nodes_inter, n_nodes_extend, n_nodes_removed, inter_aper.size))
# pack all nodes
for y in inter_aper:
xnodes.append(x)
ynodes.append(y)
znodes.append(meddata[y,x])
# extrapolate
#if extrapolate:
if False:
_y0, _y1 = inter_aper[0], inter_aper[1]
newy = _y0 - (_y1 - _y0)
newz = meddata[_y0, x] - (meddata[_y1, x] - meddata[_y0, x])
xnodes.append(x)
ynodes.append(newy)
znodes.append(newz)
_y1, _y2 = inter_aper[-2], inter_aper[-1]
newy = _y2 + (_y2 - _y1)
newz = meddata[_y2, x] + (meddata[_y2, x] - meddata[_y1, x])
xnodes.append(x)
ynodes.append(newy)
znodes.append(newz)
# convert to numpy array
xnodes = np.array(xnodes)
ynodes = np.array(ynodes)
znodes = np.array(znodes)
# write to running log
_message_lst.append('Total: %4d'%xnodes.size)
logger.info((os.linesep+' ').join(_message_lst))
# if scale='log', filter the negative values
if scale=='log':
pmask = znodes > 0
znodes[~pmask] = znodes[pmask].min()
znodes = np.log10(znodes)
if plot:
# initialize figures
fig = plt.figure(figsize=(10,10), dpi=150)
ax11 = fig.add_axes([0.07, 0.54, 0.39, 0.39])
ax12 = fig.add_axes([0.52, 0.54, 0.39, 0.39])
ax13 = fig.add_axes([0.94, 0.54, 0.015, 0.39])
ax21 = fig.add_axes([0.07, 0.07, 0.39, 0.39], projection='3d')
ax22 = fig.add_axes([0.52, 0.07, 0.39, 0.39], projection='3d')
fig.suptitle('Background')
ax11.imshow(data, cmap='gray')
# plot nodes
for ax in [ax11, ax12]:
ax.set_xlim(0,w-1)
ax.set_ylim(h-1,0)
ax.set_xlabel('X (pixel)', fontsize=10)
ax.set_ylabel('Y (pixel)', fontsize=10)
for ax in [ax21, ax22]:
ax.set_xlim(0,w-1)
ax.set_ylim(0,h-1)
ax.set_xlabel('X (pixel)', fontsize=10)
ax.set_ylabel('Y (pixel)', fontsize=10)
for ax in [ax11, ax12]:
for tick in ax.xaxis.get_major_ticks():
tick.label1.set_fontsize(9)
for tick in ax.yaxis.get_major_ticks():
tick.label1.set_fontsize(9)
for ax in [ax21, ax22]:
for tick in ax.xaxis.get_major_ticks():
tick.label1.set_fontsize(9)
for tick in ax.yaxis.get_major_ticks():
tick.label1.set_fontsize(9)
for tick in ax.zaxis.get_major_ticks():
tick.label1.set_fontsize(9)
if display:
plt.show(block=False)
# plot the figure used in paper
if plot_paper_fig:
figp1 = plt.figure(figsize=(6,6), dpi=150)
axp1 = figp1.add_axes([0.00, 0.05, 1.00, 0.95], projection='3d')
figp2 = plt.figure(figsize=(6.5,6), dpi=150)
axp2 = figp2.add_axes([0.12, 0.1, 0.84, 0.86])
if method=='poly':
background_data, fitmask = fit_background(data.shape,
xnodes, ynodes, znodes, xorder=xorder, yorder=yorder,
maxiter=maxiter, upper_clip=upper_clip, lower_clip=lower_clip)
elif method=='interp':
background_data, fitmask = interpolate_background(data.shape,
xnodes, ynodes, znodes)
else:
print('Unknown method: %s'%method)
m = (ynodes >= 0)*(ynodes <= h-1)
xnodes = xnodes[m]
ynodes = ynodes[m]
znodes = znodes[m]
fitmask = fitmask[m]
if scale=='log':
background_data = np.power(10, background_data)
# save nodes to DS9 region file
if reg_file is not None:
outfile = open(reg_file, 'w')
outfile.write('# Region file format: DS9 version 4.1'+os.linesep)
outfile.write('global color=green dashlist=8 3 width=1 ')
outfile.write('font="helvetica 10 normal roman" select=1 highlite=1 ')
outfile.write('dash=0 fixed=0 edit=1 move=1 delete=1 include=1 ')
outfile.write('source=1'+os.linesep)
outfile.write('physical'+os.linesep)
for x, y in zip(xnodes, ynodes):
text = ('point(%4d %4d) # point=circle'%(x+1, y+1))
outfile.write(text+os.linesep)
outfile.close()
# write nodes to running log
message = ['Background Nodes:', ' x, y, value, mask']
for x,y,z,m in zip(xnodes, ynodes, znodes, fitmask):
message.append('| %4d | %4d | %+10.8e | %1d |'%(x,y,z,m))
logger.info((os.linesep+' '*4).join(message))
residual = znodes - background_data[ynodes, xnodes]
if plot:
# prepare for plotting the fitted surface with a loose grid
yy, xx = np.meshgrid(np.linspace(0,h-1,32), np.linspace(0,w-1,32))
yy = np.int16(np.round(yy))
xx = np.int16(np.round(xx))
zz = background_data[yy, xx]
# plot 2d fitting in a 3-D axis in fig2
# plot the linear fitting
ax21.set_title('Background fitting (%s Z)'%scale, fontsize=10)
ax22.set_title('residuals (%s Z)'%scale, fontsize=10)
ax21.plot_surface(xx, yy, zz, rstride=1, cstride=1, cmap='jet',
linewidth=0, antialiased=True, alpha=0.5)
ax21.scatter(xnodes[fitmask], ynodes[fitmask], znodes[fitmask],
color='C0', linewidth=0)
ax22.scatter(xnodes[fitmask], ynodes[fitmask], residual[fitmask],
color='C0', linewidth=0)
if (~fitmask).sum()>0:
ax21.scatter(xnodes[~fitmask], ynodes[~fitmask], znodes[~fitmask],
color='none', edgecolor='C0', linewidth=1)
ax22.scatter(xnodes[~fitmask], ynodes[~fitmask], residual[~fitmask],
color='none', edgecolor='C0', linewidth=1)
# plot the logrithm fitting in another fig
#if scale=='log':
# ax23.plot_surface(xx, yy, log_zz, rstride=1, cstride=1, cmap='jet',
# linewidth=0, antialiased=True, alpha=0.5)
# ax23.scatter(xnodes[fitmask], ynodes[fitmask], zfit[fitmask], linewidth=0)
# ax24.scatter(xnodes[fitmask], ynodes[fitmask], log_residual[fitmask], linewidth=0)
for ax in [ax21, ax22]:
ax.xaxis.set_major_locator(tck.MultipleLocator(500))
ax.xaxis.set_minor_locator(tck.MultipleLocator(100))
ax.yaxis.set_major_locator(tck.MultipleLocator(500))
ax.yaxis.set_minor_locator(tck.MultipleLocator(100))
if display: fig.canvas.draw()
# plot figure for paper
if plot_paper_fig:
axp1.plot_surface(xx, yy, zz, rstride=1, cstride=1, cmap='jet',
linewidth=0, antialiased=True, alpha=0.5)
axp1.scatter(xnodes[fitmask], ynodes[fitmask], znodes[fitmask], linewidth=0)
axp1.xaxis.set_major_locator(tck.MultipleLocator(500))
axp1.xaxis.set_minor_locator(tck.MultipleLocator(100))
axp1.yaxis.set_major_locator(tck.MultipleLocator(500))
axp1.yaxis.set_minor_locator(tck.MultipleLocator(100))
axp1.set_xlim(0, w-1)
axp1.set_ylim(0, h-1)
axp1.set_xlabel('X')
axp1.set_ylabel('Y')
axp1.set_zlabel('Count')
if plot:
# plot the accepted nodes in subfig 1
ax11.scatter(xnodes[fitmask], ynodes[fitmask],
c='r', s=6, linewidth=0, alpha=0.8)
# plot the rejected nodes
if (~fitmask).sum()>0:
ax11.scatter(xnodes[~fitmask], ynodes[~fitmask],
c='none', s=6, edgecolor='r', linewidth=0.5)
# plot subfig 2
cnorm = colors.Normalize(vmin = background_data.min(),
vmax = background_data.max())
scalarmap = cmap.ScalarMappable(norm=cnorm, cmap=cmap.jet)
# plot the background light
image = ax12.imshow(background_data, cmap=scalarmap.get_cmap())
# plot the accepted nodes
ax12.scatter(xnodes[fitmask], ynodes[fitmask],
c='k', s=6, linewidth=0.5)
# plot the rejected nodes
if (~fitmask).sum()>0:
ax12.scatter(xnodes[~fitmask], ynodes[~fitmask],
c='none', s=6, edgecolor='k', linewidth=0.5)
# set colorbar
plt.colorbar(image, cax=ax13)
# set font size of colorbar
for tick in ax13.get_yaxis().get_major_ticks():
tick.label2.set_fontsize(9)
if display: fig.canvas.draw()
# plot for figure in paper
if plot_paper_fig:
pmask = data>0
logdata = np.zeros_like(data)-1
logdata[pmask] = np.log(data[pmask])
axp2.imshow(logdata, cmap='gray')
axp2.scatter(xnodes, ynodes, c='b', s=8, linewidth=0, alpha=0.8)
cs = axp2.contour(background_data, linewidth=1, cmap='jet')
axp2.clabel(cs, inline=1, fontsize=11, fmt='%d', use_clabeltext=True)
axp2.set_xlim(0, w-1)
axp2.set_ylim(h-1, 0)
axp2.set_xlabel('X')
axp2.set_ylabel('Y')
figp1.savefig('fig_background1.png')
figp2.savefig('fig_background2.png')
figp1.savefig('fig_background1.pdf')
figp2.savefig('fig_background2.pdf')
plt.close(figp1)
plt.close(figp2)
if fig_file is not None:
fig.savefig(fig_file)
plt.close(fig)
return background_data
|
20ed1499ed23252fb5d24cf892e3cd169b0027b9
| 3,644,941
|
def get_piesocket_api_key():
"""
Retrieves user's Piesocket API key.
Returns:
(str) Piesocket API key.
Raises:
(ImproperlyConfigured) if the Piesocket API key isn't specified in settings.
"""
return get_setting_or_raise(
setting="PIESOCKET_API_KEY", setting_str="PieSocket API Key"
)
|
657bba650a914ed1a15d54b9d0000f37b99568d0
| 3,644,942
|
def downsample(myarr,factor,estimator=np.mean):
"""
Downsample a 2D array by averaging over *factor* pixels in each axis.
Crops upper edge if the shape is not a multiple of factor.
This code is pure numpy and should be fast.
keywords:
estimator - default to mean. You can downsample by summing or
something else if you want a different estimator
(e.g., downsampling error: you want to sum & divide by sqrt(n))
"""
ys,xs = myarr.shape
crarr = myarr[:ys-(ys % int(factor)),:xs-(xs % int(factor))]
dsarr = estimator(np.concatenate([[crarr[i::factor,j::factor]
for i in range(factor)]
for j in range(factor)]), axis=0)
return dsarr
|
45b6422cb7f9b01512bc4860229164b043201675
| 3,644,943
|
def getActiveWindow():
"""Returns a Window object of the currently active Window."""
# Source: https://stackoverflow.com/questions/5286274/front-most-window-using-cgwindowlistcopywindowinfo
windows = Quartz.CGWindowListCopyWindowInfo(Quartz.kCGWindowListExcludeDesktopElements | Quartz.kCGWindowListOptionOnScreenOnly, Quartz.kCGNullWindowID)
for win in windows:
if win['kCGWindowLayer'] == 0:
return '%s %s' % (win[Quartz.kCGWindowOwnerName], win.get(Quartz.kCGWindowName, '')) # Temporary. For now, we'll just return the title of the active window.
raise Exception('Could not find an active window.')
|
ca1c810525f0a49cd9f4b53d0d621cb39b3b733e
| 3,644,944
|
def _derivative_log(x):
"""Chain rule on natural log = (1/x)*(dx/dr)"""
return _protected_inverse(x[0])[:, :, np.newaxis, np.newaxis]*x[1]
|
5f4bf5416575126cd93adaee6ccfca942ad6218f
| 3,644,945
|
def svn_wc_merge_props(*args):
"""
svn_wc_merge_props(svn_wc_notify_state_t state, char path, svn_wc_adm_access_t adm_access,
apr_hash_t baseprops, apr_array_header_t propchanges,
svn_boolean_t base_merge,
svn_boolean_t dry_run, apr_pool_t pool) -> svn_error_t
"""
return _wc.svn_wc_merge_props(*args)
|
54187e010f71798bee90eb179a10da11bf410fce
| 3,644,946
|
def is_paused():
"""
Return True if is_paused is set in the global settings table of the database.
"""
try:
is_paused_val = Settings.objects.get().is_paused
except ObjectDoesNotExist:
is_paused_val = False
return is_paused_val
|
59b99d4a4842e14205376d7923d3e5c8b52c30a6
| 3,644,947
|
def info_request(request):
"""Information request form."""
if request.method == 'POST':
form = InfoRequestForm(request.POST)
if form.is_valid():
cd = form.cleaned_data
# create out recipient list
to = []
# cleaned_data converts the data to a list so we do not
# need to use getlist()
for program in cd.get('academic_programs'):
to = to + settings.CONTINUING_STUDIES_INFOREQUEST_RECIPIENTS[program]
if settings.DEBUG:
cd['to'] = to
to = [settings.MANAGERS[0][1]]
subject = 'OCS Information Request'
send_mail(
request,
to,
subject,
cd['email'],
'admissions/inforequest.txt',
cd,
)
return HttpResponseRedirect(reverse_lazy('info_request_success'))
else:
form = InfoRequestForm()
return render(request, 'admissions/inforequest.html', {'form': form})
|
124d2fc36823fb4e1c7b569c0c6d9c75a93297e8
| 3,644,948
|
import itertools
def get_accurate(clustering_res_df, cluster_number, error=False):
"""
:param clustering_res_df: a pandas DataFrame about clustering result
:param cluster_number: the number of the cluster
(the first column is the index,
the second column is the right information,
the third column is the clustering information)
:param error: if error=True, then return the error rate, else, return the accuracy rate
:return: the clustering accuracy
"""
if clustering_res_df.shape[1] != 3:
raise Exception("Shape Error: the input DataFrame's column number is not 3")
real_dict = {}
clustering_dict = {}
for i in range(cluster_number):
real_df = clustering_res_df.loc[clustering_res_df['ClusterInfo'] == i]
clustering_df = clustering_res_df.loc[clustering_res_df['ClusterExp'] == i]
real_dict[i] = real_df['IndexNum'].tolist()
clustering_dict[i] = clustering_df['IndexNum'].tolist()
accuracy_matrix = np.zeros((cluster_number, cluster_number))
for i in range(cluster_number):
for j in range(cluster_number):
accuracy_matrix[i][j] = len(set(real_dict[i]).intersection(set(clustering_dict[j])))
# for test
# print("The accuracy matrix is: \n", accuracy_matrix)
case_iterator = itertools.permutations(range(cluster_number), cluster_number)
accurate = 0
for item in case_iterator:
acc = sum([accuracy_matrix[i][item[i]] for i in range(cluster_number)])
if acc > accurate:
accurate = acc
if not error:
return accurate / clustering_res_df.shape[0]
else:
return 1 - accurate / clustering_res_df.shape[0]
|
7ba71bcd82e70d9344994f9b6a2133676d58f683
| 3,644,949
|
import json
def odict_to_json(odict):
"""
Dump an OrderedDict into JSON series
"""
json_series = json.dumps(odict)
return json_series
|
d18a4e0f0d11a2c529edb395671052f15ad8071d
| 3,644,950
|
import ast
def parenthesize(node: ast.AST, _nl_able: bool = False) -> str:
"""Wrap the un-parsed node in parentheses."""
return f"({unparse(node, True)})"
|
d61d6be6e5466559cdaba0602a95f3169d74aa36
| 3,644,951
|
def stack_nested_arrays(nested_arrays):
"""Stack/batch a list of nested numpy arrays.
Args:
nested_arrays: A list of nested numpy arrays of the same shape/structure.
Returns:
A nested array containing batched items, where each batched item is obtained
by stacking corresponding items from the list of nested_arrays.
"""
nested_arrays_flattened = [tf.nest.flatten(a) for a in nested_arrays]
batched_nested_array_flattened = [
np.stack(a) for a in zip(*nested_arrays_flattened)
]
return tf.nest.pack_sequence_as(nested_arrays[0],
batched_nested_array_flattened)
|
bf1e4bd35be871b9098d9789b189c48f36329646
| 3,644,952
|
from sys import version
def temporal_statistics(da, stats):
"""
Obtain generic temporal statistics using the hdstats temporal library:
https://github.com/daleroberts/hdstats/blob/master/hdstats/ts.pyx
last modified June 2020
Parameters
----------
da : xarray.DataArray
DataArray should contain a 3D time series.
stats : list
list of temporal statistics to calculate.
Options include:
'discordance' =
'f_std' = std of discrete fourier transform coefficients, returns
three layers: f_std_n1, f_std_n2, f_std_n3
'f_mean' = mean of discrete fourier transform coefficients, returns
three layers: f_mean_n1, f_mean_n2, f_mean_n3
'f_median' = median of discrete fourier transform coefficients, returns
three layers: f_median_n1, f_median_n2, f_median_n3
'mean_change' = mean of discrete difference along time dimension
'median_change' = median of discrete difference along time dimension
'abs_change' = mean of absolute discrete difference along time dimension
'complexity' =
'central_diff' =
'num_peaks' : The number of peaks in the timeseries, defined with a local
window of size 10. NOTE: This statistic is very slow
Outputs
-------
xarray.Dataset containing variables for the selected
temporal statistics
"""
# if dask arrays then map the blocks
if dask.is_dask_collection(da):
if version.parse(xr.__version__) < version.parse("0.16.0"):
raise TypeError(
"Dask arrays are only supported by this function if using, "
+ "xarray v0.16, run da.compute() before passing dataArray."
)
# create a template that matches the final datasets dims & vars
arr = da.isel(time=0).drop("time")
# deal with the case where fourier is first in the list
if stats[0] in ("f_std", "f_median", "f_mean"):
template = xr.zeros_like(arr).to_dataset(name=stats[0] + "_n1")
template[stats[0] + "_n2"] = xr.zeros_like(arr)
template[stats[0] + "_n3"] = xr.zeros_like(arr)
for stat in stats[1:]:
if stat in ("f_std", "f_median", "f_mean"):
template[stat + "_n1"] = xr.zeros_like(arr)
template[stat + "_n2"] = xr.zeros_like(arr)
template[stat + "_n3"] = xr.zeros_like(arr)
else:
template[stat] = xr.zeros_like(arr)
else:
template = xr.zeros_like(arr).to_dataset(name=stats[0])
for stat in stats:
if stat in ("f_std", "f_median", "f_mean"):
template[stat + "_n1"] = xr.zeros_like(arr)
template[stat + "_n2"] = xr.zeros_like(arr)
template[stat + "_n3"] = xr.zeros_like(arr)
else:
template[stat] = xr.zeros_like(arr)
try:
template = template.drop('spatial_ref')
except:
pass
# ensure the time chunk is set to -1
da_all_time = da.chunk({"time": -1})
# apply function across chunks
lazy_ds = da_all_time.map_blocks(
temporal_statistics, kwargs={"stats": stats}, template=template
)
try:
crs = da.geobox.crs
lazy_ds = assign_crs(lazy_ds, str(crs))
except:
pass
return lazy_ds
# If stats supplied is not a list, convert to list.
stats = stats if isinstance(stats, list) else [stats]
# grab all the attributes of the xarray
x, y, time, attrs = da.x, da.y, da.time, da.attrs
# deal with any all-NaN pixels by filling with 0's
mask = da.isnull().all("time")
da = da.where(~mask, other=0)
# complete timeseries
print("Completing...")
da = fast_completion(da)
# ensure dim order is correct for functions
da = da.transpose("y", "x", "time").values
stats_dict = {
"discordance": lambda da: hdstats.discordance(da, n=10),
"f_std": lambda da: hdstats.fourier_std(da, n=3, step=5),
"f_mean": lambda da: hdstats.fourier_mean(da, n=3, step=5),
"f_median": lambda da: hdstats.fourier_median(da, n=3, step=5),
"mean_change": lambda da: hdstats.mean_change(da),
"median_change": lambda da: hdstats.median_change(da),
"abs_change": lambda da: hdstats.mean_abs_change(da),
"complexity": lambda da: hdstats.complexity(da),
"central_diff": lambda da: hdstats.mean_central_diff(da),
"num_peaks": lambda da: hdstats.number_peaks(da, 10),
}
print(" Statistics:")
# if one of the fourier functions is first (or only)
# stat in the list then we need to deal with this
if stats[0] in ("f_std", "f_median", "f_mean"):
print(" " + stats[0])
stat_func = stats_dict.get(str(stats[0]))
zz = stat_func(da)
n1 = zz[:, :, 0]
n2 = zz[:, :, 1]
n3 = zz[:, :, 2]
# intialise dataset with first statistic
ds = xr.DataArray(
n1, attrs=attrs, coords={"x": x, "y": y}, dims=["y", "x"]
).to_dataset(name=stats[0] + "_n1")
# add other datasets
for i, j in zip([n2, n3], ["n2", "n3"]):
ds[stats[0] + "_" + j] = xr.DataArray(
i, attrs=attrs, coords={"x": x, "y": y}, dims=["y", "x"]
)
else:
# simpler if first function isn't fourier transform
first_func = stats_dict.get(str(stats[0]))
print(" " + stats[0])
ds = first_func(da)
# convert back to xarray dataset
ds = xr.DataArray(
ds, attrs=attrs, coords={"x": x, "y": y}, dims=["y", "x"]
).to_dataset(name=stats[0])
# loop through the other functions
for stat in stats[1:]:
print(" " + stat)
# handle the fourier transform examples
if stat in ("f_std", "f_median", "f_mean"):
stat_func = stats_dict.get(str(stat))
zz = stat_func(da)
n1 = zz[:, :, 0]
n2 = zz[:, :, 1]
n3 = zz[:, :, 2]
for i, j in zip([n1, n2, n3], ["n1", "n2", "n3"]):
ds[stat + "_" + j] = xr.DataArray(
i, attrs=attrs, coords={"x": x, "y": y}, dims=["y", "x"]
)
else:
# Select a stats function from the dictionary
# and add to the dataset
stat_func = stats_dict.get(str(stat))
ds[stat] = xr.DataArray(
stat_func(da), attrs=attrs, coords={"x": x, "y": y}, dims=["y", "x"]
)
# try to add back the geobox
try:
crs = da.geobox.crs
ds = assign_crs(ds, str(crs))
except:
pass
return ds
|
b3be6d8260a70dcb697de39dd04d146e53907997
| 3,644,953
|
def encode_data(data):
"""
Helper that converts :class:`str` or :class:`bytes` to :class:`bytes`.
:class:`str` are encoded with UTF-8.
"""
# Expect str or bytes, return bytes.
if isinstance(data, str):
return data.encode('utf-8')
elif isinstance(data, bytes):
return data
else:
raise TypeError("data must be bytes or str")
|
3cd54389719439e8f18cf02b110af07799c946b5
| 3,644,954
|
def get_apps_final(the_apps_dummy):
"""
计算出:
1.每个用户安装app的数量;
2.每个用户安装小众app的数量;
3.每个用户安装大众app的数量;
4.根据每个用户安装app的向量进行Mean-shift聚类的结果
"""
core_data = the_apps_dummy.drop(['id'], axis=1)
the_apps_final = get_minor_major(core_data, 'apps', 5, 90)
# new_core_data = col_cluster(core_data, n_cluster, 'app')
# the_apps_final = pd.concat([apps_minor_major, new_core_data], axis=1)
the_apps_final['id'] = the_apps_dummy['id']
return the_apps_final
|
afe92cf7fb79ac73464f7e9189bb56608e0fd424
| 3,644,955
|
from indico.modules.events.abstracts.util import (get_events_with_abstract_reviewer_convener,
get_events_with_abstract_persons)
from indico.modules.events.contributions.util import get_events_with_linked_contributions
from indico.modules.events.papers.util import get_events_with_paper_roles
from indico.modules.events.registration.util import get_events_registered
from indico.modules.events.sessions.util import get_events_with_linked_sessions
from indico.modules.events.surveys.util import get_events_with_submitted_surveys
from indico.modules.events.util import (get_events_managed_by, get_events_created_by,
get_events_with_linked_event_persons)
def get_linked_events(user, dt, limit=None, load_also=()):
"""Get the linked events and the user's roles in them
:param user: A `User`
:param dt: Only include events taking place on/after that date
:param limit: Max number of events
"""
links = OrderedDict()
for event_id in get_events_registered(user, dt):
links.setdefault(event_id, set()).add('registration_registrant')
for event_id in get_events_with_submitted_surveys(user, dt):
links.setdefault(event_id, set()).add('survey_submitter')
for event_id in get_events_managed_by(user, dt):
links.setdefault(event_id, set()).add('conference_manager')
for event_id in get_events_created_by(user, dt):
links.setdefault(event_id, set()).add('conference_creator')
for event_id, principal_roles in get_events_with_linked_sessions(user, dt).iteritems():
links.setdefault(event_id, set()).update(principal_roles)
for event_id, principal_roles in get_events_with_linked_contributions(user, dt).iteritems():
links.setdefault(event_id, set()).update(principal_roles)
for event_id, role in get_events_with_linked_event_persons(user, dt).iteritems():
links.setdefault(event_id, set()).add(role)
for event_id, roles in get_events_with_abstract_reviewer_convener(user, dt).iteritems():
links.setdefault(event_id, set()).update(roles)
for event_id, roles in get_events_with_abstract_persons(user, dt).iteritems():
links.setdefault(event_id, set()).update(roles)
for event_id, roles in get_events_with_paper_roles(user, dt).iteritems():
links.setdefault(event_id, set()).update(roles)
if not links:
return OrderedDict()
query = (Event.query
.filter(~Event.is_deleted,
Event.id.in_(links))
.options(joinedload('series'),
joinedload('label'),
load_only('id', 'category_id', 'title', 'start_dt', 'end_dt',
'series_id', 'series_pos', 'series_count', 'label_id', 'label_message',
*load_also))
.order_by(Event.start_dt, Event.id))
if limit is not None:
query = query.limit(limit)
return OrderedDict((event, links[event.id]) for event in query)
|
145f33cec0b79d74ea1550d22fc8599484d2b16d
| 3,644,956
|
import time
import os
def get_run_callback(run_text, output_dir):
"""
function to generate tf run callback for tensor board
"""
root_logdir = f'{output_dir.rstrip("/")}/tensorboard_logs/'
run_id = time.strftime(f'{run_text}_%Y_%m_%d-%H-%M-%S')
log_path = os.path.join(root_logdir, run_id)
tensorboad_callback = tf.keras.callbacks.TensorBoard(log_path)
return tensorboad_callback
|
f5275515b061446fd2be0cd79ed4fa583a23f80a
| 3,644,957
|
def bert_keyword_expansion(keywords: str,
num_similar: int,
bert_model,
tokenizer,
bert_embedding_dict):
"""
Keyword expansion outputs top N most similar words from vocabulary.
@param keywords: string of comma-separated words to find keywords for
@param num_similar: number of similar words to return
@param bert_model: BERT embedding model
@param tokenizer: BERT tokenizer object
@param bert_embedding_dict: KeyedVectors object storing BERT-generated embeddings
@return: list of top N most similar words in order of descending similarity
"""
assert isinstance(keywords, str)
keywords = utils.rm_punct(keywords)
keywords = utils.lower(keywords)
# Assuming input is a string of space separated words
keyword_list = set(keywords.split())
# Dictionary used to store similarity scores for top keywords
scores_dict = defaultdict(int)
for keyword in keyword_list:
# Check if keyword is in the BERT embedding dictionary
# If not, we create a vector representation of it first
if keyword not in bert_embedding_dict.vocab:
keyword = bert_embedding(keyword, tokenizer, bert_model, to_numpy=True)
# Returns a list of tuples in the form (word, similarity score)
result = utils.find_similar_keyword_in_vocab(keyword=keyword, num_similar=num_similar,
model=bert_embedding_dict, similarity_score=True)
for word, score in result:
# Skipping similar words that already in the list of keywords provided by user
if word in keyword_list:
continue
else:
# Keeping the maximum similarity score for each word
scores_dict[word] = max(scores_dict[word], score)
sorted_results = sorted(scores_dict.items(), key=lambda kv: kv[1], reverse=True)[:num_similar]
return [word for word, score in sorted_results]
|
5976a52cfcdd8d80f1b3b0fcc0d32b92f40a1f62
| 3,644,958
|
def utc_to_tt_offset(jday=None):
"""Returns the offset in seconds from a julian date in Terrestrial Time (TT)
to a Julian day in Coordinated Universal Time (UTC)"""
if use_numpy:
return utc_to_tt_offset_numpy(jday)
else:
return utc_to_tt_offset_math(jday)
|
f40c82840253cc9b9f1f9a2685134b9313cb42d1
| 3,644,959
|
from typing import Any
import aiohttp
async def get_async_request(url: str) -> [int, Any]:
"""Get the data from the url provided.
Parameters
----------
url: str
url to get the data from
Returns
-------
[int, Any]
Tuple with the Response status code and the data returned from the request
"""
async with aiohttp.ClientSession() as session:
async with session.get(url) as response:
data = await response.json()
return [response.status, data]
|
2ad07bf7394d4d7804802d201fab0027171bea3d
| 3,644,960
|
def scattering_probability(H, psi0, n_emissions, c_ops, tlist,
system_zero_state=None,
construct_effective_hamiltonian=True):
"""
Compute the integrated probability of scattering n photons in an arbitrary
system. This function accepts a nonlinearly spaced array of times.
Parameters
----------
H : :class: qutip.Qobj or list
System-waveguide(s) Hamiltonian or effective Hamiltonian in Qobj or
list-callback format. If construct_effective_hamiltonian is not
specified, an effective Hamiltonian is constructed from H and
`c_ops`.
psi0 : :class: qutip.Qobj
Initial state density matrix :math:`\\rho(t_0)` or state vector
:math:`\\psi(t_0)`.
n_emissions : int
Number of photons emitted by the system (into any combination of
waveguides).
c_ops : list
List of collapse operators for each waveguide; these are assumed to
include spontaneous decay rates, e.g.
:math:`\\sigma = \\sqrt \\gamma \\cdot a`.
tlist : array_like
List of times for :math:`\\tau_i`. tlist should contain 0 and exceed
the pulse duration / temporal region of interest; tlist need not be
linearly spaced.
system_zero_state : :class: qutip.Qobj
State representing zero excitations in the system. Defaults to
`basis(systemDims, 0)`.
construct_effective_hamiltonian : bool
Whether an effective Hamiltonian should be constructed from H and c_ops:
:math:`H_{eff} = H - \\frac{i}{2} \\sum_n \\sigma_n^\\dagger \\sigma_n`
Default: True.
Returns
-------
scattering_prob : float
The probability of scattering n photons from the system over the time
range specified.
"""
phi_n = temporal_scattered_state(H, psi0, n_emissions, c_ops, tlist,
system_zero_state,
construct_effective_hamiltonian)
T = len(tlist)
W = len(c_ops)
# Compute <omega_tau> for all combinations of tau
all_emission_indices = combinations_with_replacement(range(T), n_emissions)
probs = np.zeros([T] * n_emissions)
# Project scattered state onto temporal basis
for emit_indices in all_emission_indices:
# Consider unique emission time partitionings
partition = tuple(set(set_partition(emit_indices, W)))
# wg_indices_list = list(set_partition(indices, W))
for wg_indices in partition:
projector = temporal_basis_vector(wg_indices, T)
amplitude = projector.dag() * phi_n
probs[emit_indices] += np.real(amplitude.conjugate() * amplitude)
# Iteratively integrate to obtain single value
while probs.shape != ():
probs = np.trapz(probs, x = tlist)
return np.abs(probs)
|
d4509371303a729a160d1b3d92f5c5f2f8ac3339
| 3,644,961
|
def analytical_value_cond_i_shannon(distr, par):
""" Analytical value of the conditional Shannon mutual information.
Parameters
----------
distr : str-s
Names of the distributions; 'normal'.
par : dictionary
Parameters of the distribution. If distr is 'normal':
par["cov"] and par["ds"] are the (joint) covariance matrix and
the vector of subspace dimensions.
Returns
-------
cond_i : float
Analytical value of the conditional Shannon mutual
information.
"""
# initialization:
ds = par['ds']
len_ds = len(ds)
# 0,d_1,d_1+d_2,...,d_1+...+d_M; starting indices of the subspaces:
cum_ds = cumsum(hstack((0, ds[:-1])))
idx_condition = range(cum_ds[len_ds - 1],
cum_ds[len_ds - 1] + ds[len_ds - 1])
if distr == 'normal':
c = par['cov']
# h_joint:
h_joint = analytical_value_h_shannon(distr, par)
# h_cross:
h_cross = 0
for m in range(len_ds-1): # non-conditioning subspaces
idx_m = range(cum_ds[m], cum_ds[m] + ds[m])
idx_m_and_condition = hstack((idx_m, idx_condition))
par = {"cov": c[ix_(idx_m_and_condition, idx_m_and_condition)]}
h_cross += analytical_value_h_shannon(distr, par)
# h_condition:
par = {"cov": c[ix_(idx_condition, idx_condition)]}
h_condition = analytical_value_h_shannon(distr, par)
cond_i = -h_joint + h_cross - (len_ds - 2) * h_condition
else:
raise Exception('Distribution=?')
return cond_i
|
a9a91d77863829de7f818aa6dcfe0216eb9a70af
| 3,644,962
|
from re import DEBUG
import os
def zexp(input, i=None):
"""
Point-wise complex exponential.
:param input array:
:param i bool: imaginary
"""
usage_string = "zexp [-i] input output"
cmd_str = f'{BART_PATH} '
cmd_str += 'zexp '
flag_str = ''
opt_args = f''
multituples = []
if i is not None:
flag_str += f'-i '
cmd_str += flag_str + opt_args + ' '
cmd_str += f"{' '.join([' '.join([str(x) for x in arg]) for arg in zip(*multituples)]).strip()} {NAME}input {NAME}output "
cfl.writecfl(NAME + 'input', input)
if DEBUG:
print(cmd_str)
os.system(cmd_str)
outputs = cfl.readcfl(NAME + 'output')
return outputs
|
0a392013e3e7c5ac51d32f4fb7d228ecc3cf0250
| 3,644,963
|
def ekin2wl(ekin):
"""Convert neutron kinetic energy in electronvolt to wavelength in Angstrom"""
if _np and hasattr(ekin,'__len__'):
#reciprocals without zero division:
ekinnonzero = ekin != 0.0
ekininv = 1.0 / _np.where( ekinnonzero, ekin, 1.0)#fallback 1.0 wont be used
return _c_ekin2wl * _np.sqrt(_np.where( ekinnonzero, ekininv, _np.inf))
else:
return _rawfct['ncrystal_ekin2wl'](ekin)
|
873ae25e10dfb3e6d9c6ca8ab1c56efa0066544a
| 3,644,964
|
def show_subscription(conn, customer):
"""
Retrieves authenticated user's plan and prints it.
- Return type is a tuple, 1st element is a boolean and 2nd element is the response message from messages.py.
- If the operation is successful; print the authenticated customer's plan and return tuple (True, CMD_EXECUTION_SUCCESS).
- If any exception occurs; return tuple (False, CMD_EXECUTION_FAILED).
Output should be like:
#|Name|Resolution|Max Sessions|Monthly Fee
1|Basic|720P|2|30
"""
try:
cursor = conn.cursor()
cursor.execute("SELECT plan_id "
"FROM customer "
"WHERE customer_id = %s",
(customer.customer_id,))
queryCustomerPlanId = cursor.fetchall()
if queryCustomerPlanId is None:
# Fail if no such customer exists (somehow)
return False, CMD_EXECUTION_FAILED
else:
cursor.execute("SELECT plan_id, plan_name, resolution, max_parallel_sessions, monthly_fee "
"FROM plan "
"WHERE plan_id = %s",
(queryCustomerPlanId[0],))
queryPlan = cursor.fetchone()
if queryPlan is None:
# Fail if no such plan exists
return False, CMD_EXECUTION_FAILED
print("#|Name|Resolution|Max Sessions|Monthly Fee")
print("{0}|{1}|{2}|{3}|{4}".format(queryPlan[0], queryPlan[1], queryPlan[2], queryPlan[3], queryPlan[4]))
cursor.close()
return True, CMD_EXECUTION_SUCCESS
except Exception as e:
return False, CMD_EXECUTION_FAILED
|
93a99075ea98697782845c2751362f9b319cea43
| 3,644,965
|
def maybe_gen_fake_data_based_on_real_data(
image, label, reso, min_fake_lesion_ratio, gen_fake_probability):
"""Remove real lesion and synthesize lesion."""
# TODO(lehou): Replace magic numbers with flag variables.
gen_prob_indicator = tf.random_uniform(
shape=[], minval=0.0, maxval=1.0, dtype=tf.float32)
background_mask = tf.less(label, 0.5)
lesion_mask = tf.greater(label, 1.5)
liver_mask = tf.logical_not(tf.logical_or(background_mask, lesion_mask))
liver_intensity = tf.boolean_mask(image, liver_mask)
lesion_intensity = tf.boolean_mask(image, lesion_mask)
intensity_diff = tf.reduce_mean(liver_intensity) - (
tf.reduce_mean(lesion_intensity))
intensity_diff *= 1.15
intensity_diff = tf.cond(tf.is_nan(intensity_diff),
lambda: 0.0, lambda: intensity_diff)
lesion_liver_ratio = 0.0
lesion_liver_ratio += tf.random.normal(shape=[], mean=0.01, stddev=0.01)
lesion_liver_ratio += tf.random.normal(shape=[], mean=0.0, stddev=0.05)
lesion_liver_ratio = tf.clip_by_value(
lesion_liver_ratio, min_fake_lesion_ratio, min_fake_lesion_ratio + 0.20)
fake_lesion_mask = tf.logical_and(
_gen_rand_mask(ratio_mean=lesion_liver_ratio, ratio_stddev=0.0,
scale=reso // 32, shape=label.shape,
smoothness=reso // 32),
tf.logical_not(background_mask))
liver_mask = tf.logical_not(tf.logical_or(background_mask, fake_lesion_mask))
# Blur the masks
lesion_mask_blur = tf.squeeze(tf.nn.conv3d(
tf.expand_dims(tf.expand_dims(tf.cast(lesion_mask, tf.float32), -1), 0),
filter=tf.ones([reso // 32] * 3 + [1, 1], tf.float32) / (reso // 32) ** 3,
strides=[1, 1, 1, 1, 1],
padding='SAME'))
fake_lesion_mask_blur = tf.squeeze(tf.nn.conv3d(
tf.expand_dims(tf.expand_dims(
tf.cast(fake_lesion_mask, tf.float32), -1), 0),
filter=tf.ones([reso // 32] * 3 + [1, 1], tf.float32) / (reso // 32) ** 3,
strides=[1, 1, 1, 1, 1],
padding='SAME'))
# Remove real lesion and add fake lesion.
# If the intensitify is too small (maybe no liver or lesion region labeled),
# do not generate fake data.
gen_prob_indicator = tf.cond(
tf.greater(intensity_diff, 0.0001),
lambda: gen_prob_indicator, lambda: 0.0)
# pylint: disable=g-long-lambda
image = tf.cond(
tf.greater(gen_prob_indicator, 1 - gen_fake_probability),
lambda: image + intensity_diff * lesion_mask_blur \
- intensity_diff * fake_lesion_mask_blur,
lambda: image)
label = tf.cond(
tf.greater(gen_prob_indicator, 1 - gen_fake_probability),
lambda: tf.cast(background_mask, tf.float32) * 0 + \
tf.cast(liver_mask, tf.float32) * 1 + \
tf.cast(fake_lesion_mask, tf.float32) * 2,
lambda: label)
# pylint: enable=g-long-lambda
return image, label
|
fd3f1557930d63652f4dd39b2ba8031410bda711
| 3,644,966
|
def is_primitive_type (v) :
""" Check to see if v is primitive. Primitive in this context
means NOT a container type (str is the exception):
primitives type are: int, float, long, complex, bool, None, str
"""
return type(v) in {int:0, float:0, long:0, complex:0, bool:0, None:0, str:0}
|
2d72e03aa1ec62f214b2a3b468948ff2354508dd
| 3,644,967
|
def _get_bit(h, i):
"""Return specified bit from string for subsequent testing"""
h1 = int.from_bytes(h, 'little')
return (h1 >> i) & 0x01
|
b9b672c87b35369dc86abec7005dfeed3e99eb67
| 3,644,968
|
def go_down_right_reward(nobs, high_pos, agent_num, act):
"""
Return a reward for going to the low or right side of the board
:param nobs: The current observation
:param high_pos: Tuple of lowest and most-right position
:param agent_num: The id of the agent to check (0-3)
:return: The reward for going down or right
"""
# only give rewards if a new highest point is reached
bomb_bonus = 0
if act[agent_num] == 5:
bomb_bonus = 0.00
if nobs[agent_num]['position'][0] > high_pos[0]:
return 1 + bomb_bonus, (nobs[agent_num]['position'][0], high_pos[1])
elif nobs[agent_num]['position'][1] > high_pos[1]:
return 1 + bomb_bonus, (high_pos[0], nobs[agent_num]['position'][1])
else:
return 0 + bomb_bonus, high_pos
|
bd8c6f01b55e14cc498cc251b1c0cc92340506c7
| 3,644,969
|
def getChannelBoxMenu():
"""
Get ChannelBox Menu, convert the main channel box to QT and return the
Edit QMenu which is part of the channel box' children.
:return: Maya's main channel box menu
:rtype: QMenu
"""
channelBox = getChannelBox()
# find widget
menus = channelBox.findChildren(QMenu)
# find Edit menu
for menu in menus:
if menu.menuAction().text() == "Edit":
return menu
|
38957701044f4552cd355c8856abb8c3b486479b
| 3,644,970
|
def make_sid_cookie(sid, uri):
"""Given a sid (from a set-cookie) figure out how to send it back"""
# sometime near 0.92, port got dropped...
# uritype, uribody = urllib.splittype(uri)
# host, path = urllib.splithost(uribody)
# host, port = urllib.splitnport(host)
# if port == -1:
# port = dict(http=80, https=443)[uritype] # we want to throw here
cookiename = "JIFTY_SID_HIVEMINDER"
return "%s=%s" % (cookiename, sid)
|
d194bcb8f47acfbbab9d7405ff9a23069b74f077
| 3,644,971
|
def identity_filter(element_tuple):
"""
element_tuple est consitute des (name, attrs) de chaque element XML recupere par la methode startElement
"""
return element_tuple
|
c50208f345f40acce58df86cdae4432aae24cf4b
| 3,644,972
|
def EoZ(N2, w0, f, ):
"""
Wave ray energy when variations can only occur in the vertical (i.e. N2 and
flow only vary with depth not horizontally) - Olbers 1981
"""
Ez = np.squeeze((w0**2 * (N2 - f**2))
/ ((w0**2 - f**2)**(3 / 2) * (N2 - w0**2)**(1 / 2)))
return Ez
|
7dfbcf3c0e29463ccf1663922486ffaad99b1ea5
| 3,644,973
|
from typing import Any
def safe_string(value: Any) -> str:
"""
Consistently converts a value to a string.
:param value: The value to stringify.
"""
if isinstance(value, bytes):
return value.decode()
return str(value)
|
0ba8dcfe028ac6c45e0c17f9ba02014c2f746c4d
| 3,644,974
|
def handle_older_version(upstream_version: Box) -> bool:
"""
Checks if the current version (local) is older than the upstream one
and provides a message to the end-user.
:return:
:py:class:`True` if local is older. :py:class:`False` otherwise.
"""
version_utility = VersionUtility(PyFunceble.storage.PROJECT_VERSION)
if PyFunceble.facility.ConfigLoader.is_already_loaded():
if PyFunceble.storage.CONFIGURATION.cli_testing.display_mode.quiet:
message = "New version available."
elif PyFunceble.storage.CONFIGURATION.cli_testing.display_mode.colour:
message = (
f"{colorama.Style.BRIGHT}{colorama.Fore.GREEN}Please take the "
"time to "
f"update {PyFunceble.storage.PROJECT_NAME}!"
f"{colorama.Style.RESET_ALL}\n"
f"{colorama.Style.BRIGHT}Your version:{colorama.Style.RESET_ALL} "
f"{PyFunceble.storage.PROJECT_VERSION}\n"
f"{colorama.Style.BRIGHT}Upstream version:{colorama.Style.RESET_ALL} "
f"{upstream_version.current_version}\n"
)
else:
message = (
f"Please take the time to update "
f"{PyFunceble.storage.PROJECT_NAME}!\n"
f"Your version: {PyFunceble.storage.PROJECT_VERSION}\n"
f"Upstream version: {upstream_version.current_version}"
)
else:
message = (
"Please take the time to "
f"update {PyFunceble.storage.PROJECT_NAME}!\n"
f"Your version: {PyFunceble.storage.PROJECT_VERSION}\n"
f"Upstream version: {upstream_version.current_version}"
)
if version_utility.is_older_than(upstream_version.current_version):
print(message)
return True
return False
|
130f825a59ca27a2e3d7d63e7d917f837153a2be
| 3,644,975
|
from typing import Union
def tp(selector:Union[str, tuple]="@s", selector2:Union[str, tuple]=("~", "~", "~")):
"""
selector:Union[str, tuple] -> The position to be moved from
selector2:Union[str, tuple] -> The position to be moved to
"""
if not ((isinstance(selector, str) or isinstance(selector, tuple)) and (isinstance(selector2, str) or isinstance(selector2, tuple))):
return "## Tp command hasn't been configured properly ##"
if isinstance(selector, tuple):
if len(selector) < 3:
selector = ("~", "~", "~")
return f"tp {selector[0]} {selector[1]} {selector[2]}\n"
else:
if isinstance(selector2, tuple):
if len(selector2) < 3:
selector2 = ("~", "~", "~")
return f"tp {selector} {selector2[0]} {selector2[1]} {selector2[2]}\n"
else:
return f"tp {selector} {selector2}\n"
|
81b3baf308f412bae3718fe165028a970fe56bda
| 3,644,976
|
def new_product() -> Product:
"""Generates an instance of Product with default values."""
return Product(
product_id='',
desc='',
display_name='',
capacity=0,
image='')
|
ef50c2e90d5f512b2ae53c1111f501c13bdbca5d
| 3,644,977
|
def _rng_bit_generator_batching_rule(batched_args, batch_dims, *, shape, dtype, algorithm):
"""Calls RBG in a loop and stacks the results."""
key, = batched_args
bd, = batch_dims
if bd is batching.not_mapped:
return lax.rng_bit_generator_p.bind(key, shape=shape, dtype=dtype,
algorithm=algorithm), (None, None)
key = batching.moveaxis(key, bd, 0)
map_body = lambda k: lax.rng_bit_generator_p.bind(k, shape=shape, dtype=dtype, algorithm=algorithm)
stacked_keys, stacked_bits = map(map_body, key)
return (stacked_keys, stacked_bits), (0, 0)
|
6fdcdadb5a303a7f1f38033f7e69be781eee4a49
| 3,644,978
|
from typing import List
def count_short_tail_keywords(keywords: List[str]) -> int:
"""
Returns the count of short tail keywords in a list of keywords.
Parameters:
keywords (List[str]): list with all keywords as strings.
Returns:
total (int): count of short tail keywords (1 o 2 words per keyword).
"""
total = 0
for keyword in keywords:
keyword_list = keyword.split()
if len(keyword_list) > 1 and len(keyword_list) < 3:
total += 1
return total
|
1af42d71be75d9279584a8c3edc090a39ec6cf77
| 3,644,979
|
def is_odd(number):
"""Determine if a number is odd."""
if number % 2 == 0:
return False
else:
return True
|
4efe5114f2e25431808492c768abc0f750e63225
| 3,644,980
|
def fmt_quil_str(raw_str):
"""Format a raw Quil program string
Args:
raw_str (str): Quil program typed in by user.
Returns:
str: The Quil program with leading/trailing whitespace trimmed.
"""
raw_quil_str = str(raw_str)
raw_quil_str_arr = raw_quil_str.split('\n')
trimmed_quil_str_arr = [qs.strip() for qs in raw_quil_str_arr]
trimmed_quil_str = '\n'.join([x for x in trimmed_quil_str_arr])
return trimmed_quil_str
|
e95c26f3de32702d6e44dc09ebbd707da702d964
| 3,644,981
|
from typing import Optional
from typing import Collection
from typing import cast
def get_source_plate_uuid(barcode: str) -> Optional[str]:
"""Attempt to get a UUID for a source plate barcode.
Arguments:
barcode {str} -- The source plate barcode.
Returns:
{str} -- The source plate UUID; otherwise None if it cannot be determined.
"""
try:
source_plates_collection: Collection = cast(Eve, app).data.driver.db.source_plates
source_plate: Optional[SourcePlateDoc] = source_plates_collection.find_one({FIELD_BARCODE: barcode})
if source_plate is None:
return None
return source_plate.get(FIELD_LH_SOURCE_PLATE_UUID)
except Exception as e:
logger.error(f"An error occurred attempting to determine the UUID of source plate '{barcode}'")
logger.exception(e)
return None
|
b834d7740bfda86037d1d266c53f8618ff8e0bd3
| 3,644,982
|
def find_largest_digit(n):
"""
:param n: integers
:return: the largest digit
"""
n = abs(n) # absolute the value
if n < 10:
return n
else:
return find_helper(n, 0)
|
f18af5c32263254e132ca405ad40c217083bd568
| 3,644,983
|
def extractChrononTranslations(item):
"""
"""
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol or frag) or 'preview' in item['title'].lower():
return None
item['title'] = item['title'].replace('’', '')
if 'Weapons cheat'.lower() in item['title'].lower():
return buildReleaseMessageWithType(item, 'Modern weapons cheat in another world', vol, chp, frag=frag, postfix=postfix)
if 'Heavenly Tribulation'.lower() in item['title'].lower():
return buildReleaseMessageWithType(item, 'Heavenly Tribulation', vol, chp, frag=frag, postfix=postfix)
if 'I can speak'.lower() in item['title'].lower():
return buildReleaseMessageWithType(item, 'I Can Speak with Animals and Demons', vol, chp, frag=frag, postfix=postfix)
if 'I Bought a Girl'.lower() in item['title'].lower():
return buildReleaseMessageWithType(item, 'I Bought a Girl', vol, chp, frag=frag, postfix=postfix)
if 'Girl Corps'.lower() in item['title'].lower():
return buildReleaseMessageWithType(item, 'Girl Corps', vol, chp, frag=frag, postfix=postfix)
if 'Modern Weapons'.lower() in item['title'].lower():
return buildReleaseMessageWithType(item, 'Modern weapons cheat in another world', vol, chp, frag=frag, postfix=postfix)
if 'Upper World'.lower() in item['title'].lower():
return buildReleaseMessageWithType(item, 'Reincarnation ~ From the lower world to the upper world', vol, chp, frag=frag, postfix=postfix)
if 'I work as a healer'.lower() in item['title'].lower():
return buildReleaseMessageWithType(item, "I Work As A Healer In Another World's Labyrinth City", vol, chp, frag=frag, postfix=postfix)
return False
|
3c4768745d54257d5654dac74b3278e95611288e
| 3,644,984
|
from sklearn.ensemble import RandomForestRegressor
from sklearn.feature_selection import SelectFromModel
def run_feature_selection(X, y, select_k_features):
"""Use a gradient boosting tree regressor as a proxy for finding
the k most important features in X, returning indices for those
features as output."""
clf = RandomForestRegressor(n_estimators=100, max_depth=3, random_state=0)
clf.fit(X, y)
selector = SelectFromModel(
clf, threshold=-np.inf, max_features=select_k_features, prefit=True
)
return selector.get_support(indices=True)
|
0c730e2d2015b3ad0777cd493e2f5235bc9682a3
| 3,644,985
|
from typing import Optional
from typing import Callable
from typing import Literal
def _not_json_encodable(message: str, failure_callback: Optional[Callable[[str], None]]) -> Literal[False]:
""" Utility message to fail (return `False`) by first calling an optional failure callback. """
if failure_callback:
failure_callback(message)
return False
|
6979261a5f14a32c1ae34d01bad346344f38ed14
| 3,644,986
|
import functools
def requires(*commands: str) -> RequiresT:
"""Decorator to require the given commands."""
def inner(func: ReturnT) -> ReturnT:
"""Decorates the function and checks for the commands."""
for command in commands:
if not check_availability(command):
raise errors.MissingShellCommand(
f"ipq requires the {command!r} command, please install it."
)
@functools.wraps(func)
def wrapper(*args: t.Any, **kwargs: t.Any) -> str:
"""Wraps and executes the decorated function."""
return func(*args, **kwargs)
return wrapper
return inner
|
a84199cfff7bf29a3a2c3ccb46cf65b3a09a7136
| 3,644,987
|
def _build_model(input_dim, num_classes, num_hidden_layers=0,
hidden_dimension=128,
normalize_inputs=False, dropout=0):
"""
Macro to generate a Keras classification model
"""
inpt = tf.keras.layers.Input((input_dim))
net = inpt
# if we're normalizing inputs:
if normalize_inputs:
norm = tf.keras.layers.Lambda(lambda x:K.l2_normalize(x,axis=1))
net = norm(net)
# for each hidden layer
for _ in range(num_hidden_layers):
if dropout > 0:
net = tf.keras.layers.Dropout(dropout)(net)
net = tf.keras.layers.Dense(hidden_dimension, activation="relu")(net)
# final layer
if dropout > 0:
net = tf.keras.layers.Dropout(dropout)(net)
net = tf.keras.layers.Dense(num_classes, activation="relu")(net)
return tf.keras.Model(inpt, net)
|
07386dbf2649463963d754959f7d389c1d2aae90
| 3,644,988
|
def get_client(
project_id, cloud_region, registry_id, device_id, private_key_file,
algorithm, ca_certs, mqtt_bridge_hostname, mqtt_bridge_port):
"""Create our MQTT client. The client_id is a unique string that identifies
this device. For Google Cloud IoT Core, it must be in the format below."""
client = mqtt.Client(
client_id=('projects/{}/locations/{}/registries/{}/devices/{}'
.format(
project_id,
cloud_region,
registry_id,
device_id)))
# With Google Cloud IoT Core, the username field is ignored, and the
# password field is used to transmit a JWT to authorize the device.
client.username_pw_set(
username='unused',
password=create_jwt(
project_id, private_key_file, algorithm))
# Enable SSL/TLS support.
client.tls_set(ca_certs=ca_certs)
# Register message callbacks. https://eclipse.org/paho/clients/python/docs/
# describes additional callbacks that Paho supports. In this example, the
# callbacks just print to standard out.
client.on_connect = on_connect
client.on_publish = on_publish
client.on_disconnect = on_disconnect
client.on_message = on_message
client.on_subscribe = on_subscribe
# Connect to the Google MQTT bridge.
client.connect(mqtt_bridge_hostname, mqtt_bridge_port)
return client
|
6fa945892e7b0174e4fbfebc7e5f2f985982c6f0
| 3,644,989
|
def fake_image_sct_custom(data):
"""
:return: an Image (3D) in RAS+ (aka SCT LPI) space
"""
i = fake_image_custom(data)
img = msct_image.Image(i.get_data(), hdr=i.header,
orientation="LPI",
dim=i.header.get_data_shape(),
)
return img
|
9593240af25c3c4fc2cb8602dda902713721ebfb
| 3,644,990
|
def _get_property_header(resource, resource_type):
"""
Create a dictionary representing resources properties
:param resource: The name of the resource for which to create a
property header
:param resource_type: The type of the resource (model, seed, etc.)
:return: A dictionary representing resource properties
"""
header_dict = {
'version': 2,
_SUPPORTED_RESOURCE_TYPES[resource_type]: [
{'name': resource, 'description': "", 'columns': []}
],
}
return header_dict
|
e9622282a83cfd1b0be5af54def817ed07ad8e21
| 3,644,991
|
def create_metapaths_parameters(filename, folder):
""" creates a parameters file from the default """
default_filename = folder + PATHDELIM + 'resources'+ PATHDELIM + "template_param.txt"
try:
filep = open(default_filename, 'r')
except:
eprintf("ERROR: cannot open the default parameter file " + sQuote(default_filename) )
exit_process("ERROR: cannot open the default parameter file " + sQuote(default_filename), errorCode = 0 )
lines = filep.readlines()
with open(filename, 'w') as newfile:
for line in lines:
fprintf(newfile, "%s", line);
filep.close()
#result['filename'] = filename
return True
|
b859ee0791b3213cc48f253943122290ad5a67fa
| 3,644,992
|
def dist_prune(DELTA, prune=True):
""" transform similarity matrix to distance matrix
- prune matrix by removing edges that have a distance larger
than condition cond (default mean distance)
"""
w = np.max(DELTA)
DELTA = np.abs(DELTA - w)
np.fill_diagonal(DELTA, 0.)
if prune:
cond = np.mean(DELTA) # + np.std(DELTA)# TODO: transform to parameter with choice between models
for i in range(DELTA.shape[0]):
for j in range(DELTA.shape[1]):
val = DELTA[i, j]
if val > cond:
DELTA[i, j] = 0.
else:
DELTA[i, j] = DELTA[i, j]
return DELTA
|
524c4f743ced455d67b276dfe4f66e9bcd2ca313
| 3,644,993
|
def bitwise_dot(x, y):
"""Compute the dot product of two integers bitwise."""
def bit_parity(i):
n = bin(i).count("1")
return int(n % 2)
return bit_parity(x & y)
|
074b09a92e3e697eb08b8aaefa6ffd05d58698f4
| 3,644,994
|
import itertools
def fit (samples, degree, sample_weights=None):
"""
Fit a univariate polynomial function to the 2d points given in samples, where the rows of
samples are the points. The return value is the vector of coefficients of the polynomial
(see p below) which minimizes the squared error of the polynomial at the given samples.
Denote the components of samples as
samples = [
[x[0], y[0]],
[x[1], y[1]],
...
]
and let
p(coefficients)(t) = sum(coefficient*t**i for i,coefficient in enumerate(coefficients))
noting that coefficients[0] is the constant term, coefficients[1] is the linear coefficient, etc.
"""
assert len(samples.shape) == 2
assert samples.shape[1] == 2, 'Expected the rows of samples to be (x,y) pairs.'
A = np.zeros((degree+1,degree+1), dtype=float)
B = np.zeros((degree+1,), dtype=float)
weight_iterator = sample_weights if sample_weights is not None else itertools.cycle([1.0])
for (x,y),weight in itertools.izip(samples,weight_iterator):
g = geometric(x, degree)
A += weight*np.outer(g,g)
B += weight*y*g
coefficients,_,_,_ = np.linalg.lstsq(A,B)
return coefficients
|
4b7a7e77c5e55641a1a666f52abef32088bf680a
| 3,644,995
|
import os
def get_engines():
""" Returns a list of all engines for tests """
engines = []
base_dir = os.getcwd()
engines_dir = os.path.join(base_dir, 'search_engine_parser', 'core', 'engines')
for filename in os.listdir(engines_dir):
if os.path.isfile(os.path.join(engines_dir, filename)) and filename.endswith('.py') \
and filename != '__init__.py':
engine = filename.split('.py')[0]
module = import_module("search_engine_parser.core.engines.{}".format(engine.lower()))
engine_class = getattr(module, "Search")
engines.append([engine, engine_class(),])
return engines
|
0b23da44ebd14c39f2e5b3ba240c37f3f3500cf9
| 3,644,996
|
import os
def IsEncryptedCoredump(path):
""" Function to find if the coredump is encrypted or not. """
if not os.path.exists('/bin/vmkdump_extract'):
raise Exception('vmkdump_extract not present.')
result, rc = RunCmd("/bin/vmkdump_extract -E {0}".format(path))
if rc != 0:
raise Exception(
'RunCmd failed when trying to check for encrypted coredump')
return result.strip() == "YES"
|
cbd15d8df2d9172a32c4f8e6f6cdd04e5d42c57f
| 3,644,997
|
def get_ahead_mask(tokens, i_pad=0):
"""
ahead mask 계산하는 함수
:param tokens: tokens (bs, n_seq)
:param i_pad: id of pad
:return mask: ahead and pad mask (ahead or pad: 1, other: 0)
"""
n_seq = tf.shape(tokens)[1]
ahead_mask = 1 - tf.linalg.band_part(tf.ones((n_seq, n_seq)), -1, 0)
ahead_mask = tf.expand_dims(ahead_mask, axis=0)
pad_mask = get_pad_mask(tokens, i_pad)
mask = tf.maximum(ahead_mask, pad_mask)
return mask
|
31a7bd2710cd86075f753227fa2e4c97b1948e19
| 3,644,998
|
def check_containment(row, query_index, reference_index, percent_identity=PERCENT_IDENTITY, covered_length=COVERED_LENGTH):
"""Checks if a row from a blast out format 6 file is a containment
Takes in a row from a blast out format 6 table, a DataFrames with query sequence and reference sequence data.
"""
if (row['qId'] != row['tId']) and (row['seqIdentity'] >= percent_identity):
query_covered = row['alnLen']/float(query_index.loc[row['qId'], 'LENGTH'])
reference_covered = row['alnLen']/float(reference_index.loc[row['tId'], 'LENGTH'])
if query_covered >= covered_length or reference_covered >= covered_length:
return True
else:
return False
else:
return False
|
a868d9c15abd5c73bc9483cc9a58d7c92875d15a
| 3,644,999
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.