content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def parse_into_tree(abbr, doc_type = 'html'):
"""
Преобразует аббревиатуру в дерево элементов
@param abbr: Аббревиатура
@type abbr: str
@param doc_type: Тип документа (xsl, html)
@type doc_type: str
@return: Tag
"""
root = Tag('', 1, doc_type)
parent = root
last = None
token = re.compile(r'([\+>])?([a-z][a-z0-9:\!\-]*)(#[\w\-\$]+)?((?:\.[\w\-\$]+)*)(?:\*(\d+))?', re.IGNORECASE)
def expando_replace(m):
ex = m.group(1)
if 'expandos' in zen_settings[doc_type] and ex in zen_settings[doc_type]['expandos']:
return zen_settings[doc_type]['expandos'][ex]
else:
return ex
# заменяем разворачиваемые элементы
abbr = re.sub(r'([a-z][a-z0-9]*)\+$', expando_replace, abbr)
def token_expander(operator, tag_name, id_attr, class_name, multiplier):
multiplier = multiplier and int(multiplier) or 1
current = is_snippet(tag_name, doc_type) and Snippet(tag_name, multiplier, doc_type) or Tag(tag_name, multiplier, doc_type)
if id_attr:
current.add_attribute('id', id_attr[1:])
if class_name:
current.add_attribute('class', class_name[1:].replace('.', ' '))
# двигаемся вглубь дерева
if operator == '>' and token_expander.last:
token_expander.parent = token_expander.last;
token_expander.parent.add_child(current)
token_expander.last = current;
return '';
token_expander.parent = root
token_expander.last = None
abbr = re.sub(token, lambda m: token_expander(m.group(1), m.group(2), m.group(3), m.group(4), m.group(5)), abbr)
# если в abbr пустая строка — значит, вся аббревиатура без проблем
# была преобразована в дерево, если нет, то аббревиатура была не валидной
return not abbr and root or None;
| 5,337,600
|
def map_pao1_genes(gene_list):
"""Takes a list of PAO1 genes and returns the corresponding PA14 names."""
pa14_pao1_mapping = dict()
mapping_path = os.path.join(os.getcwd(), 'data', 'ortholuge_pa14_to_pao1_20190708.tsv')
with open(mapping_path) as mapping:
reader = csv.reader(mapping, delimiter='\t')
for row in reader:
pa14_pao1_mapping[row[4]] = row[10]
pa14_genes = [pa14_pao1_mapping[gene] for gene in gene_list if gene in pa14_pao1_mapping.keys()]
return pa14_genes
| 5,337,601
|
def test_set_profile_data_empty_last_name(api_client, user):
"""Test profile data cannot be set last name is blank."""
api_client.login(username=user.username, password="test_password")
response = api_client.patch(
"/v1/users/set_profile/", {"first_name": "John", "last_name": ""},
)
assert response.status_code == status.HTTP_400_BAD_REQUEST
| 5,337,602
|
def banner():
"""Verify banner in HTML file match expected."""
def match(path, expected_url=None, expected_base=None):
"""Assert equals and return file contents.
:param py.path.local path: Path to file to read.
:param str expected_url: Expected URL in <a href="" /> link.
:param str expected_base: Expected base message.
:return: File contents.
:rtype: str
"""
contents = path.read()
actual = RE_BANNER.findall(contents)
if not expected_url and not expected_base:
assert not actual
else:
assert actual == [(expected_url, expected_base)]
return contents
return match
| 5,337,603
|
def harvester():
"""Harvests remotely mastered metadata.
"""
pass
| 5,337,604
|
def terminate_execution(
execution: t.Union[CurrentExecutionId, ExecutionId],
domain: str,
reason: str = None,
details: str = None,
child_execution_policy: ChildExecutionTerminationPolicy = None,
client: "botocore.client.BaseClient" = None,
) -> None:
"""Terminate (immediately close) a workflow execution.
Args:
execution: workflow execution to close
domain: domain od execution
reason: termination reason, usually for classification
details: termination details, usually for explanation
child_execution_policy: how to handle open child workflow
executions, default: use default for workflow type
client: SWF client
"""
client = _common.ensure_client(client)
kw = {}
if isinstance(execution, ExecutionId):
kw["runId"] = execution.run_id
if reason or reason == "":
kw["reason"] = reason
if details or details == "":
kw["details"] = details
if child_execution_policy:
kw["childPolicy"] = child_execution_policy.value
client.terminate_workflow_execution(
domain=domain,
workflowId=execution.id,
**kw,
)
| 5,337,605
|
def rerank(x2ys, x2cnt, x2xs, width, n_trans):
"""Re-rank word translations by computing CPE scores.
See paper for details about the CPE method."""
x2ys_cpe = dict()
for x, ys in tqdm(x2ys.items()):
cntx = x2cnt[x]
y_scores = []
for y, cnty in sorted(ys.items(), key=operator.itemgetter(1), reverse=True)[:width]:
ts = cnty / float(cntx) # translation score: initial value
if x in x2xs:
for x2, cntx2 in x2xs[x].items(): # Collocates
p_x_x2 = cntx2 / float(cntx)
p_x2_y2 = 0
if x2 in x2ys:
p_x2_y2 = x2ys[x2].get(y, 0) / float(x2cnt[x2])
ts -= (p_x_x2 * p_x2_y2)
y_scores.append((y, ts))
_ys_ = sorted(y_scores, key=lambda y_score: y_score[1], reverse=True)[:n_trans]
_ys_ = [each[0] for each in _ys_]
x2ys_cpe[x] = _ys_
return x2ys_cpe
| 5,337,606
|
def ParallelTempering(num_sweeps=10000, num_replicas=10,
max_iter=None, max_time=None, convergence=3):
"""Parallel tempering workflow generator.
Args:
num_sweeps (int, optional):
Number of sweeps in the fixed temperature sampling.
num_replicas (int, optional):
Number of replicas (parallel states / workflow branches).
max_iter (int/None, optional):
Maximum number of iterations of the update/swaps loop.
max_time (int/None, optional):
Maximum wall clock runtime (in seconds) allowed in the update/swaps
loop.
convergence (int/None, optional):
Number of times best energy of the coldest replica has to repeat
before we terminate.
Returns:
Workflow (:class:`~hybrid.core.Runnable` instance).
"""
# expand single input state into `num_replicas` replica states
preprocess = SpawnParallelTemperingReplicas(num_replicas=num_replicas)
# fixed temperature sampling on all replicas in parallel
update = hybrid.Map(FixedTemperatureSampler(num_sweeps=num_sweeps))
# replica exchange step: do the top-down sweep over adjacent pairs
# (good hot samples sink to bottom)
swap = SwapReplicasDownsweep()
# loop termination key function
def key(states):
if states is not None:
return states[-1].samples.first.energy
# replicas update/swap until Loop termination criteria reached
loop = hybrid.Loop(
update | swap,
max_iter=max_iter, max_time=max_time, convergence=convergence, key=key)
# collapse all replicas (although the bottom one should be the best)
postprocess = hybrid.MergeSamples(aggregate=True)
workflow = preprocess | loop | postprocess
return workflow
| 5,337,607
|
def get_document_info(file):
"""
Scrape document information using ChemDataExtractor Scrapers
:param file: file path to target article
:type file: str
:return: list of dicts containing the document information
"""
if file.endswith('.html'):
file_type = 'html'
elif file.endswith('.xml'):
file_type = 'xml'
else:
return
print("file type", file_type)
f = open(file, 'rb').read()
sel = Selector.from_text(f)
# Determine publishers, use the RSC scraper by default
publisher = detect_publisher(f)
if publisher == 'acs':
document_info = AcsHtmlDocument(sel)
elif publisher == 'rsc':
document_info = RscHtmlDocument(sel)
elif publisher == 'elsevier' and file_type == 'html':
document_info = ElsevierHtmlDocument(sel)
elif publisher == 'elsevier' and file_type == 'xml':
document_info = ElsevierXmlDocument(sel)
elif publisher == 'springer' and file_type == 'html':
document_info = SpringerHtmlDocument(sel)
else:
print('Unknown Journal for file' + file + 'using RSC HTML formatting by default')
document_info = RscHtmlDocument(sel)
return document_info
| 5,337,608
|
def qlog(q):
"""
Compute logarithm of a unit quaternion (unit norm is important here).
Let q = [a, qv], where a is the scalar part and qv is the vector part.
qv = sin(phi/2)*nv, where nv is a unit vector. Then
ln(q) = ln(||q||) + qv / ||qv|| * arccos(a / ||q||)
Therefore for a unit quaternion, the scalar part of ln(q) is zero
and the vector part of ln(q) is 1/2 * phi * nv,
i.e. half of rotation vector rv = phi * nv because
a = cos(phi/2) in attitude quaternion (see quatRotVec())
Reference: https://en.wikipedia.org/wiki/Quaternion
NOTE 1: due to existing implementation in C++, this function
returns just the vector part of ln(q)
NOTE 2: According to Wiki description, ln(q)_v should be a
half of rotation vector. However the previous
implementation computed the full rotation vector.
So, using the rotation vector for now until cleared up.
"""
rv = quatRotVec(q)
return rv
| 5,337,609
|
def trx():
"""Response from ADN about current transaction APPROVED/DECLINED and showing Receipt of transaction"""
trx = web.trxs[-1]
trx.shoppingCartUuid = request.args.get('shoppingCartUuid', default = "", type = str)
trx.mediaType = request.args.get('mediaType', default = "", type = str)
trx.correlationId = request.args.get('correlationId', default = "", type = str)
trx.trxId = request.args.get('payId', default = "", type = str)
trx.maskedMediaId = request.args.get('maskedMediaId', default = "", type = str)
trx.status = request.args.get('status', default = "", type = str)
trx.author_time = datetime.now().strftime("%d.%m.%Y %H:%M:%S")
web.logger.info(f"ShoppingCart {trx.shoppingCartUuid} Transaction {trx.trxId} {trx.mediaType} {trx.maskedMediaId} {trx.status}")
return render_template('trx.html', trx=trx)
| 5,337,610
|
def test_return_topological_neighbors_hexagonal():
"""Test the topological neighbors of a neuron for hexagonal grid type."""
som = SOM(random_state=RANDOM_STATE, gridtype='hexagonal').fit(X)
assert set(som._return_topological_neighbors(0, 0)).issubset(set([(1, 0), (0, 1)]))
assert set(som._return_topological_neighbors(1, 1)).issubset(
set([(0, 1), (2, 1), (1, 0), (1, 2), (2, 2), (2, 0)])
)
| 5,337,611
|
def get_counts_by_domain(df):
"""
Parameters:
df (pandas.Dataframe) - form of `get_counts_df` output
Returns:
pandas.Dataframe
"""
columns = ['study', 'study_label', 'domain_code', 'domain_label']
df2 = df.groupby(columns, as_index=False)[["count", "subjects"]].max()
return df2
| 5,337,612
|
def raise_value_exception(method_name, expected_value_range, val):
"""Utility function for raising value exceptions."""
raise ValueError('Method \'' + method_name + '\' expects a value in range of{' + str(expected_value_range) + '} but got {' + str(val) + '} instead!')
| 5,337,613
|
def batch_cosine_similarity(x1, x2):
""" https://en.wikipedia.org/wiki/Cosine_similarity """
mul = np.multiply(x1, x2)
s = np.sum(mul, axis=1)
return s
| 5,337,614
|
def plot(ax, x, y):
"""Plot """
return ax._plot(x, y)
| 5,337,615
|
def plot_character(character_name, movie_name, character_sentiments):
"""
This functions takes the name of the character, and plot their sentiment in a ring chart
Parameters:
character_name: str: The name of the character who emotions are to be displayed\
movie_name: str: the movie which the character belongs to
character_sentiments: dict: having the name of the character and sentiments
Returns:
Nothing
"""
if not character_sentiments.get(movie_name, {}).get(character_name):
print("The character does not exist")
return
plt.figure(figsize=(10,10))
plt.pie(character_sentiments[movie_name][character_name].values(),
labels=character_sentiments[movie_name][character_name].keys(),
# ax=axs[i, 1],
wedgeprops={'width':0.5},
startangle=90,
autopct='%1.1f%%')
plt.title(character_name)
| 5,337,616
|
async def test_list_photos(
client: _TestClient, mocker: MockFixture, token: MockFixture
) -> None:
"""Should return OK and a valid json body."""
ID = "290e70d5-0933-4af0-bb53-1d705ba7eb95"
mocker.patch(
"photo_service.adapters.photos_adapter.PhotosAdapter.get_all_photos",
return_value=[{"id": ID, "DateTime": "2020:02:29 13:00:34"}],
)
headers = MultiDict(
{
hdrs.AUTHORIZATION: f"Bearer {token}",
},
)
resp = await client.get("/photos", headers=headers)
assert resp.status == 200
assert "application/json" in resp.headers[hdrs.CONTENT_TYPE]
photos = await resp.json()
assert type(photos) is list
assert len(photos) > 0
| 5,337,617
|
def concatenatePDFs(filelist, pdfname, pdftk='pdftk', gs='gs', cleanup=False,
quiet=False):
"""
Takes a list or a string list of PDF filenames (space-delimited), and an
output name, and concatenates them.
It first tries pdftk (better quality), and if that fails, it tries
ghostscript (more commonly installed).
Todd Hunter
"""
if (type(filelist) == list):
filelist = ' '.join(filelist)
cmd = '%s %s cat output %s' % (pdftk, filelist, pdfname)
if not quiet: print "Running command = %s" % (cmd)
mystatus = os.system(cmd)
if (mystatus != 0):
print "status = ", mystatus
cmd = '%s -q -sPAPERSIZE=letter -dNOPAUSE -dBATCH -sDEVICE=pdfwrite -sOutputFile=%s %s' % (gs,pdfname,filelist)
print "Running command = %s" % (cmd)
mystatus = os.system(cmd)
if (mystatus != 0):
gs = '/opt/local/bin/gs'
cmd = '%s -q -sPAPERSIZE=letter -dNOPAUSE -dBATCH -sDEVICE=pdfwrite -sOutputFile=%s %s' % (gs,pdfname,filelist)
print "Running command = %s" % (cmd)
mystatus = os.system(cmd)
if (mystatus != 0):
print "Both pdftk and gs are missing, no PDF created."
cleanup = False
if (cleanup):
os.system('rm %s' % filelist)
return (mystatus)
| 5,337,618
|
def get_module_offset(
process_id: int,
process_name: str
) -> Address:
"""Returns an Adress with the base offset of the process.
Args:
process_id (int): PID
process_name (str): Name of the process. Case does not matter.
Returns:
Address: Adress with the base offset of the process.
"""
flag = TH32CS_SNAPMODULE | TH32CS_SNAPMODULE32
snap = CreateToolhelp32Snapshot(flag, process_id)
me32 = MODULEENTRY32()
me32.dwSize = sizeof(MODULEENTRY32)
Module32First(snap, byref(me32))
while True:
name = me32.szModule.decode("ascii")
if process_name.lower() in name.lower():
base_addr = me32.modBaseAddr
addr = Address(addressof(base_addr.contents))
CloseHandle(snap)
return addr
if not Module32Next(snap, byref(me32)):
break
CloseHandle(snap)
| 5,337,619
|
def sequence_to_header(sequence: Sequence[Bytes]) -> Header:
"""
Build a Header object from a sequence of bytes. The sequence should be
containing exactly 15 byte sequences.
Parameters
----------
sequence :
The sequence of bytes which is supposed to form the Header
object.
Returns
-------
header : `Header`
The obtained `Header` object.
"""
ensure(len(sequence) == 15)
ensure(len(sequence[12]) <= 32)
return Header(
parent_hash=Hash32(sequence[0]),
ommers_hash=Hash32(sequence[1]),
coinbase=Address(sequence[2]),
state_root=Root(sequence[3]),
transactions_root=Root(sequence[4]),
receipt_root=Root(sequence[5]),
bloom=Bloom(sequence[6]),
difficulty=Uint.from_be_bytes(sequence[7]),
number=Uint.from_be_bytes(sequence[8]),
gas_limit=Uint.from_be_bytes(sequence[9]),
gas_used=Uint.from_be_bytes(sequence[10]),
timestamp=U256.from_be_bytes(sequence[11]),
extra_data=sequence[12],
mix_digest=Hash32(sequence[13]),
nonce=Bytes8(sequence[14]),
)
| 5,337,620
|
def cache_tree_children(queryset):
"""
For all items in the queryset, set the '_cached_children' attribute to a
list. This attribute is in turn used by the 'get_children' method on the
item, which would otherwise (if '_cached_children' is not set) cause a
database query.
The queryset must be ordered by 'path', or the function will put the children
in the wrong order.
"""
parents_dict = {}
# Loop through the queryset twice, so that the function works even if the
# mptt tree is broken. Since django caches querysets internally, the extra
# computation time is minimal.
for obj in queryset:
parents_dict[obj.pk] = obj
obj._cached_children = []
for obj in queryset:
parent = parents_dict.get(obj.parent_id)
if parent:
parent._cached_children.append(obj)
| 5,337,621
|
def makeASdef(isd_id, as_id_tail, label, public_ip, is_core=False, is_ap=False):
""" Helper for readable ASdef declaration """
return ASdef(isd_id, _expand_as_id(as_id_tail), label, public_ip, is_core, is_ap)
| 5,337,622
|
def _get_fs_subjid(subject_id, subjects_dir=None):
"""
Gets fsaverage version `subject_id`, fetching if required
Parameters
----------
subject_id : str
FreeSurfer subject ID
subjects_dir : str, optional
Path to FreeSurfer subject directory. If not set, will inherit from
the environmental variable $SUBJECTS_DIR. Default: None
Returns
-------
subject_id : str
FreeSurfer subject ID
subjects_dir : str
Path to subject directory with `subject_id`
"""
from netneurotools.utils import check_fs_subjid
# check for FreeSurfer install w/fsaverage; otherwise, fetch required
try:
subject_id, subjects_dir = check_fs_subjid(subject_id, subjects_dir)
except FileNotFoundError:
if 'fsaverage' not in subject_id:
raise ValueError('Provided subject {} does not exist in provided '
'subjects_dir {}'
.format(subject_id, subjects_dir))
from netneurotools.datasets import fetch_fsaverage
from netneurotools.datasets.utils import _get_data_dir
fetch_fsaverage(subject_id)
subjects_dir = os.path.join(_get_data_dir(), 'tpl-fsaverage')
subject_id, subjects_dir = check_fs_subjid(subject_id, subjects_dir)
return subject_id, subjects_dir
| 5,337,623
|
def EnableTrt(mod, params=None, trt_version=None):
"""Converts the "main" function in the module into one that can be executed using
TensorRT. If any of the operators are not supported by the TensorRT
conversion, the unmodified program will be returned instead.
Parameters
----------
mod: Module
The original module.
params : dict of str to NDArray
Input parameters to the graph that do not change
during inference time. Used for constant folding.
trt_version : Optional[Tuple[int]]
Which version of TensorRT to target for partitioning as a tuple of
(major, minor, patch). If not specified, will attempt to get using
GetTrtVersion.
Returns
-------
mod: Module
The modified module which will use the TensorRT runtime if compatible.
"""
if not trt_version:
trt_version = GetTrtVersion()
# If TVM wasn't built against TRT, default to target TRT 6. Since the
# actual conversion to TRT is done at runtime, building against TRT is
# not required for compilation.
if not trt_version:
trt_version = (6, 0, 1)
assert isinstance(trt_version, (list, tuple))
assert len(trt_version) == 3
# Apply passes required for TRT
mod = relay.transform.RemoveUnusedFunctions()(mod)
mod = relay.transform.InferType()(mod)
mod = relay.transform.ConvertLayout('NCHW')(mod)
mod = PreprocessForTrt(mod)
if params:
# Bind params so that we can use FoldConstant.
mod['main'] = _bind_params(mod['main'], params)
mod = relay.transform.FoldConstant()(mod)
return _transform.EnableTrt(*trt_version)(mod)
| 5,337,624
|
def _setup_cuda_fft_resample(n_jobs, W, new_len):
"""Set up CUDA FFT resampling.
Parameters
----------
n_jobs : int | str
If n_jobs == 'cuda', the function will attempt to set up for CUDA
FFT resampling.
W : array
The filtering function to be used during resampling.
If n_jobs='cuda', this function will be shortened (since CUDA
assumes FFTs of real signals are half the length of the signal)
and turned into a gpuarray.
new_len : int
The size of the array following resampling.
Returns
-------
n_jobs : int
Sets n_jobs = 1 if n_jobs == 'cuda' was passed in, otherwise
original n_jobs is passed.
cuda_dict : dict
Dictionary with the following CUDA-related variables:
use_cuda : bool
Whether CUDA should be used.
fft_plan : instance of FFTPlan
FFT plan to use in calculating the FFT.
ifft_plan : instance of FFTPlan
FFT plan to use in calculating the IFFT.
x_fft : instance of gpuarray
Empty allocated GPU space for storing the result of the
frequency-domain multiplication.
x : instance of gpuarray
Empty allocated GPU space for the data to resample.
Notes
-----
This function is designed to be used with fft_resample().
"""
cuda_dict = dict(use_cuda=False, rfft=rfft, irfft=irfft)
rfft_len_x = len(W) // 2 + 1
# fold the window onto inself (should be symmetric) and truncate
W = W.copy()
W[1:rfft_len_x] = (W[1:rfft_len_x] + W[::-1][:rfft_len_x - 1]) / 2.
W = W[:rfft_len_x]
if n_jobs == 'cuda':
n_jobs = 1
init_cuda()
if _cuda_capable:
try:
import cupy
# do the IFFT normalization now so we don't have to later
W = cupy.array(W)
logger.info('Using CUDA for FFT resampling')
except Exception:
logger.info('CUDA not used, could not instantiate memory '
'(arrays may be too large), falling back to '
'n_jobs=1')
else:
cuda_dict.update(use_cuda=True,
rfft=_cuda_upload_rfft,
irfft=_cuda_irfft_get)
else:
logger.info('CUDA not used, CUDA could not be initialized, '
'falling back to n_jobs=1')
cuda_dict['W'] = W
return n_jobs, cuda_dict
| 5,337,625
|
def sentensize(text):
"""Break a text into sentences.
Args:
text (str): A text containing sentence(s).
Returns:
list of str: A list of sentences.
"""
return nltk.tokenize.sent_tokenize(text)
| 5,337,626
|
def tokenize(texts, context_length=77):
"""
Returns the tokenized representation of given input string(s)
Parameters
----------
texts : Union[str, List[str]]
An input string or a list of input strings to tokenize
context_length : int
The context length to use; all CLIP models use 77 as the context length
Returns
-------
A two-dimensional tensor containing the resulting tokens, shape = [number of input strings, context_length]
"""
if isinstance(texts, str):
texts = [texts]
sot_token = tokenizer.encoder["<|startoftext|>"]
eot_token = tokenizer.encoder["<|endoftext|>"]
all_tokens = [[sot_token] +
tokenizer.encode(text) + [eot_token] for text in texts]
result = paddle.zeros((len(all_tokens), context_length), dtype='int64')
for i, tokens in enumerate(all_tokens):
if len(tokens) > context_length:
raise RuntimeError(
f"Input {texts[i]} is too long for context length {context_length}")
result[i, :len(tokens)] = paddle.to_tensor(tokens)
return result
| 5,337,627
|
def _int64_feature_list(values):
"""Wrapper for inserting an int64 FeatureList into a SequenceExample proto,
e.g, sentence in list of ints
"""
return tf.train.FeatureList(feature=[_int64_feature(v) for v in values])
| 5,337,628
|
def read_relative_pose(object_frame_data: dict) -> tf.Transform:
"""
Read the pose of an object relative to the camera, from the frame data.
For reasons (known only to the developer), these poses are in OpenCV convention.
So x is right, y is down, z is forward.
Scale is still 1cm, so we divide by 100 again.
see
https://github.com/jskinn/Dataset_Synthesizer/blob/local-devel/Source/Plugins/NVSceneCapturer/Source/NVSceneCapturer/Private/NVSceneFeatureExtractor_DataExport.cpp#L143
:param object_frame_data: The frame data dict from the matching object in the objects array
:return: The relative pose of the object, as a Transform
"""
tx, ty, tz = object_frame_data['location']
qx, qy, qz, qw = object_frame_data['quaternion_xyzw']
return tf.Transform(
location=(tz / 100, -tx / 100, -ty / 100),
rotation=(qw, qz, -qx, -qy),
w_first=True
)
| 5,337,629
|
def upsert_task(task_uuid: str, task: Task) -> Task:
"""Upsert a task.
It is used to create a task in the database if it does not already exists,
else it is used to update the existing one.
Args:
task_uuid:
The uuid of the task to upsert.
task:
The task data.
Returns:
The upserted task.
"""
with Session(engine) as session:
# check if the task exists
statement = select(Task).where(Task.uuid == task_uuid)
result = session.exec(statement).first()
# if not, create it
if result is None:
result = task
# sync the data
for key, value in task.dict(exclude_unset=True).items():
setattr(result, key, value)
# persist the data to the database
session.add(result)
session.commit()
session.refresh(result)
return result
| 5,337,630
|
def shuffle_entries(x, entry_cls, config=None, value_type=sgf2n, reverse=False, perm_size=None):
""" Shuffle a list of ORAM entries.
Randomly permutes the first "perm_size" entries, leaving the rest (empty
entry padding) in the same position. """
n = len(x)
l = len(x[0])
if n & (n-1) != 0:
raise CompilerError('Entries must be padded to power of two length.')
if perm_size is None:
perm_size = n
xarrays = [Array(n, value_type.reg_type) for i in range(l)]
for i in range(n):
for j,value in enumerate(x[i]):
if isinstance(value, MemValue):
xarrays[j][i] = value.read()
else:
xarrays[j][i] = value
if config is None:
config = config_shuffle(perm_size, value_type)
for xi in xarrays:
shuffle(xi, config, value_type, reverse)
for i in range(n):
x[i] = entry_cls(xarrays[j][i] for j in range(l))
return config
| 5,337,631
|
def file(input_file):
"""Import colorscheme from json file."""
theme_name = ".".join((input_file, "json"))
user_theme_file = os.path.join(CONF_DIR, "colorschemes", theme_name)
theme_file = os.path.join(MODULE_DIR, "colorschemes", theme_name)
util.create_dir(os.path.dirname(user_theme_file))
# Find the theme file.
if os.path.isfile(input_file):
theme_file = input_file
elif os.path.isfile(user_theme_file):
theme_file = user_theme_file
elif input_file == "random":
themes = [theme.path for theme in list_themes()]
random.shuffle(themes)
theme_file = themes[0]
# Parse the theme file.
if os.path.isfile(theme_file):
logging.info("Set theme to \033[1;37m%s\033[0m.",
os.path.basename(theme_file))
return parse(theme_file)
else:
logging.error("No colorscheme file found.")
sys.exit(1)
| 5,337,632
|
def entities(request):
"""Get entities for the specified project, locale and paths."""
try:
project = request.GET['project']
locale = request.GET['locale']
paths = json.loads(request.GET['paths'])
except MultiValueDictKeyError as e:
log.error(str(e))
return HttpResponse("error")
try:
project = Project.objects.get(slug=project)
except Entity.DoesNotExist as e:
log.error(str(e))
return HttpResponse("error")
try:
locale = Locale.objects.get(code__iexact=locale)
except Locale.DoesNotExist as e:
log.error(str(e))
return HttpResponse("error")
search = None
if request.GET.get('keyword', None):
search = request.GET
entities = Entity.for_project_locale(project, locale, paths, search)
return JsonResponse(entities, safe=False)
| 5,337,633
|
def info(request, token):
"""
Return the HireFire json data needed to scale worker dynos
"""
if not settings.HIREFIRE_TOKEN:
return HttpResponseBadRequest(
"Hirefire not configured. Set the HIREFIRE_TOKEN environment variable on the app to use Hirefire for dyno scaling"
)
if token != settings.HIREFIRE_TOKEN:
raise PermissionDenied("Invalid token")
current_tasks = 0
queues = []
for index, config in enumerate(QUEUES_LIST):
queue = get_queue_by_index(index)
connection = queue.connection
# Only look at the default queue
if queue.name != "default":
continue
queue_data = {
"name": queue.name,
"jobs": queue.count,
"index": index,
"connection_kwargs": connection.connection_pool.connection_kwargs,
}
connection = get_connection(queue.name)
all_workers = Worker.all(connection=connection)
queue_workers = [worker for worker in all_workers if queue in worker.queues]
queue_data["workers"] = len(queue_workers)
finished_job_registry = FinishedJobRegistry(queue.name, connection)
started_job_registry = StartedJobRegistry(queue.name, connection)
deferred_job_registry = DeferredJobRegistry(queue.name, connection)
queue_data["finished_jobs"] = len(finished_job_registry)
queue_data["started_jobs"] = len(started_job_registry)
queue_data["deferred_jobs"] = len(deferred_job_registry)
current_tasks += queue_data["jobs"]
current_tasks += queue_data["started_jobs"]
queues.append(queue_data)
payload = [{"quantity": current_tasks, "name": "worker"}]
payload = json.dumps(payload)
return HttpResponse(payload, content_type="application/json")
| 5,337,634
|
def get_domain_from_url(url):
"""get domain from url"""
domain=''
# url is http://a.b.com/ads/asds
if re.search(r'://.*?/',url):
try:
domain = url.split('//', 1)[1].split('/', 1)[0]
except IndexError, e:
LOGGER.warn('Get domain error,%s,%s' % (url, e))
# http://a.b.com?a=adsd
elif re.search(r'://.*?\?',url):
try:
domain = url.split('//', 1)[1].split('?', 1)[0]
except IndexError, e:
LOGGER.warn('Get domain error,%s,%s' % (url, e))
elif re.search(r'://.*?',url):
try:
domain = url.split('//', 1)[1].split('/', 1)[0]
except IndexError, e:
LOGGER.warn('Get domain error,%s,%s' % (url, e))
# url is a.b.com/a/b/c, a.b.com, /a/b/c,
elif re.search(r'/',url):
value = url.split('/', 1)[0]
if value=='':
pass
elif value=='.':
pass
elif '.' not in value:
pass
elif domain=='..':
pass
return domain
| 5,337,635
|
def render_response(body=None, status=None, headers=None):
"""生成WSGI返回消息"""
headers = [] if headers is None else list(headers)
if body is None:
body = ''
status = status or (204, 'No Content')
else:
body = json.dumps(body, encoding='utf-8')
headers.append(('Content-Type', 'application/json'))
status = status or (200, 'OK')
resp = webob.Response(body=body,
status='%s %s' % status,
headerlist=headers)
return resp
| 5,337,636
|
def sanic_client():
"""Fixture for using sanic async HTTP server rather than a asgi async server used by test client"""
env = os.environ.copy()
env["SANIC_PORT"] = str(SERVER_PORT)
args = ["ddtrace-run", "python", RUN_SERVER_PY]
subp = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True, env=env)
client = Client("http://0.0.0.0:{}".format(SERVER_PORT))
client.wait(path="/hello")
try:
yield client
finally:
resp = client.get_ignored("/shutdown-tracer")
assert resp.status_code == 200
subp.terminate()
try:
# Give the server 3 seconds to shutdown, then kill it
subp.wait(3)
except subprocess.TimeoutExpired:
subp.kill()
| 5,337,637
|
def tests(request):
"""Print a list of tests."""
test_list = Test.objects.all()
tag_list = Tag.objects.all().order_by(u'name')
# check if we need to filter the test list based on tags
# defaults to empty list because we're always passing the list to the template
tags = request.GET.get(u'tag', [])
if tags:
# plus means only those tests that are tagged with every tag
# TODO: support commas, for aggregating stuff that includes at least one tag in the list
tags = tags.split(u'+')
log.debug(u'displaying tests for search tags: {}'.format(tags))
# order the list by name if search tags are specified
# this list contains tests if they have any of the tags passed in, so it's still 'unfiltered'
test_list = test_list.filter(tags__name__in=tags).distinct().order_by(u'name')
# return only the tests that have every tag specified
test_list = filter_tags(test_list, tags)
else:
# order the list by newest -> oldest if there are no tags specified
test_list = test_list.order_by(u'-id')
paginator = Paginator(test_list, 20) # decides how many results to show per page
# https://docs.djangoproject.com/en/dev/topics/pagination/
page = request.GET.get(u'page')
try:
tests = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
tests = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
tests = paginator.page(paginator.num_pages)
return render(request, u'django_bdd/bddscenarios.html', {u'title': u'Scenarios', u'tests': tests, u'tag_list': tag_list, u'searched_tags': tags, u'label_classes': LABEL_CLASSES})
| 5,337,638
|
def nashpobench_benchmark(params):
"""
The underlying tabulated blackbox does not have an `elapsed_time_attr`,
but only a `time_this_resource_attr`.
"""
config_space = dict(
CONFIGURATION_SPACE,
epochs=params['max_resource_level'],
dataset_name=params['dataset_name'])
return {
'script': None,
'metric': METRIC_VALID_LOSS,
'mode': 'min',
'resource_attr': RESOURCE_ATTR,
'elapsed_time_attr': METRIC_ELAPSED_TIME,
'max_resource_attr': 'epochs',
'config_space': config_space,
'cost_model': None,
'supports_simulated': True,
'blackbox_name': BLACKBOX_NAME,
}
| 5,337,639
|
def test_application():
"""Test application running"""
app = use_app()
print(app) # __repr__ without app
app.create()
wrong = 'glfw' if app.backend_name.lower() != 'glfw' else 'pyqt5'
assert_raises(RuntimeError, use_app, wrong)
app.process_events()
print(app) # test __repr__
assert_raises(ValueError, Canvas, keys='foo')
assert_raises(TypeError, Canvas, keys=dict(escape=1))
assert_raises(ValueError, Canvas, keys=dict(escape='foo')) # not an attr
pos = [0, 0] if app.backend_module.capability['position'] else None
size = (100, 100)
# Use "with" statement so failures don't leave open window
# (and test context manager behavior)
title = 'default'
with Canvas(title=title, size=size, app=app, show=True,
position=pos) as canvas:
context = canvas.context
assert_true(canvas.create_native() is None) # should be done already
assert_is(canvas.app, app)
assert_true(canvas.native)
assert_equal('swap_buffers', canvas.events.draw.callback_refs[-1])
canvas.measure_fps(0.001)
sleep(0.002)
canvas.update()
app.process_events()
assert_true(canvas.fps > 0)
# Other methods
print(canvas) # __repr__
assert_equal(canvas.title, title)
canvas.title = 'you'
with use_log_level('warning', record=True, print_msg=False):
if app.backend_module.capability['position']:
# todo: disable more tests based on capability
canvas.position = pos
canvas.size = size
canvas.connect(on_mouse_move)
assert_raises(ValueError, canvas.connect, _on_mouse_move)
if sys.platform != 'darwin': # XXX knownfail, prob. needs warmup
canvas.show(False)
canvas.show()
app.process_events()
assert_raises(ValueError, canvas.connect, on_nonexist)
# deprecation of "paint"
with use_log_level('info', record=True, print_msg=False) as log:
olderr = sys.stderr
try:
fid = StringIO()
sys.stderr = fid
@canvas.events.paint.connect
def fake(event):
pass
finally:
sys.stderr = olderr
assert_equal(len(log), 1)
assert_in('deprecated', log[0])
# screenshots
gl.glViewport(0, 0, *size)
ss = _screenshot()
assert_array_equal(ss.shape, size + (4,))
assert_equal(len(canvas._backend._vispy_get_geometry()), 4)
if sys.platform != 'win32': # XXX knownfail for windows
assert_array_equal(canvas.size, size)
assert_equal(len(canvas.position), 2) # XXX knownfail, doesn't "take"
# GLOO: should have an OpenGL context already, so these should work
vert = "void main (void) {gl_Position = pos;}"
frag = "void main (void) {gl_FragColor = pos;}"
program = Program(vert, frag)
assert_raises(RuntimeError, program.glir.flush, context.shared.parser)
vert = "uniform vec4 pos;\nvoid main (void) {gl_Position = pos;}"
frag = "uniform vec4 pos;\nvoid main (void) {gl_FragColor = pos;}"
program = Program(vert, frag)
# uniform = program.uniforms[0]
program['pos'] = [1, 2, 3, 4]
vert = "attribute vec4 pos;\nvoid main (void) {gl_Position = pos;}"
frag = "void main (void) {}"
program = Program(vert, frag)
# attribute = program.attributes[0]
program["pos"] = [1, 2, 3, 4]
# use a real program
program._glir.clear()
vert = ("uniform mat4 u_model;"
"attribute vec2 a_position; attribute vec4 a_color;"
"varying vec4 v_color;"
"void main (void) {v_color = a_color;"
"gl_Position = u_model * vec4(a_position, 0.0, 1.0);"
"v_color = a_color;}")
frag = "void main() {gl_FragColor = vec4(0.0, 0.0, 0.0, 1.0);}"
n, p = 250, 50
T = np.random.uniform(0, 2 * np.pi, n)
position = np.zeros((n, 2), dtype=np.float32)
position[:, 0] = np.cos(T)
position[:, 1] = np.sin(T)
color = np.ones((n, 4), dtype=np.float32) * (1, 1, 1, 1)
data = np.zeros(n * p, [('a_position', np.float32, 2),
('a_color', np.float32, 4)])
data['a_position'] = np.repeat(position, p, axis=0)
data['a_color'] = np.repeat(color, p, axis=0)
program = Program(vert, frag)
program.bind(VertexBuffer(data))
program['u_model'] = np.eye(4, dtype=np.float32)
# different codepath if no call to activate()
program.draw(gl.GL_POINTS)
subset = IndexBuffer(np.arange(10, dtype=np.uint32))
program.draw(gl.GL_POINTS, subset)
# bad programs
frag_bad = ("varying vec4 v_colors") # no semicolon
program = Program(vert, frag_bad)
assert_raises(RuntimeError, program.glir.flush, context.shared.parser)
frag_bad = None # no fragment code. no main is not always enough
assert_raises(ValueError, Program, vert, frag_bad)
# Timer
timer = Timer(interval=0.001, connect=on_mouse_move, iterations=2,
start=True, app=app)
timer.start()
timer.interval = 0.002
assert_equal(timer.interval, 0.002)
assert_true(timer.running)
sleep(.003)
assert_true(timer.elapsed >= 0.002)
timer.stop()
assert_true(not timer.running)
assert_true(timer.native)
timer.disconnect()
# test that callbacks take reasonable inputs
_test_callbacks(canvas)
# cleanup
canvas.swap_buffers()
canvas.update()
app.process_events()
# put this in even though __exit__ will call it to make sure we don't
# have problems calling it multiple times
canvas.close()
| 5,337,640
|
def horizontal_plate_natual_convection_2(Gr, Pr):
"""hot side downward, or cold side upward """
""" 1e5 < Ra < 1e10 """
Ra = Gr * Pr
return 0.27 * Ra**0.25
| 5,337,641
|
def run_test_case(test_case: TestCase) -> Tuple[Simulation, Scenario, ExtThread, SimulationID]:
"""
This method starts the actual simulation in a separate thread.
Additionally it already calculates and attaches all information that is need by this node and the separate
thread before calling _start_simulation(...).
"""
import dill as pickle
from drivebuildclient import create_client, send_request
from config import SIM_NODE_PORT, FIRST_SIM_PORT
sid = SimulationID()
response = send_request(create_client("localhost", SIM_NODE_PORT), b"generateSid", [])
sid.ParseFromString(response)
sim = Simulation(sid, pickle.dumps(test_case), FIRST_SIM_PORT + run_test_case.counter)
run_test_case.counter += 1 # FIXME Add a lock?
# Make sure there is no folder of previous tests having the same sid that got not propery removed
bng_scenario, thread = sim._start_simulation(test_case)
return sim, bng_scenario, thread, sid
| 5,337,642
|
def test_pre_commit_hook_sync_nbstripout(
tmpdir,
cwd_tmpdir,
tmp_repo,
jupytext_repo_root,
jupytext_repo_rev,
notebook_with_outputs,
):
"""Here we sync the ipynb notebook with a Markdown file and also apply nbstripout."""
pre_commit_config_yaml = f"""
repos:
- repo: {jupytext_repo_root}
rev: {jupytext_repo_rev}
hooks:
- id: jupytext
args: [--sync]
- repo: https://github.com/kynan/nbstripout
rev: 0.3.9
hooks:
- id: nbstripout
"""
tmpdir.join(".pre-commit-config.yaml").write(pre_commit_config_yaml)
tmp_repo.git.add(".pre-commit-config.yaml")
pre_commit(["install", "--install-hooks", "-f"])
# write a test notebook
write(notebook_with_outputs, "test.ipynb")
# We pair the notebook to a md file
jupytext(["--set-formats", "ipynb,md", "test.ipynb"])
# try to commit it, should fail because
# 1. the md version hasn't been added
# 2. the notebook has outputs
tmp_repo.git.add("test.ipynb")
with pytest.raises(
HookExecutionError,
match="files were modified by this hook",
):
tmp_repo.index.commit("failing")
# Add the two files
tmp_repo.git.add("test.ipynb")
tmp_repo.git.add("test.md")
# now the commit will succeed
tmp_repo.index.commit("passing")
assert "test.ipynb" in tmp_repo.tree()
assert "test.md" in tmp_repo.tree()
# the ipynb file has no outputs on disk
nb = read("test.ipynb")
assert not nb.cells[0].outputs
| 5,337,643
|
def translate_value_data(
new_values: list,
options: dict,
parent_value: str,
translate_dict: typing.Optional[dict],
values: list,
):
"""Translates value data if necessary and checks if it falls within the Castor optiongroup"""
for value in values:
if pd.isnull(parent_value):
if translate_dict:
value = translate_dict.get(str(value), "Error: no translation provided")
new_values.append(options.get(str(value), "Error: non-existent option"))
else:
if translate_dict:
value = translate_dict.get(str(value), parent_value)
new_values.append(options.get(str(value), parent_value))
return new_values
| 5,337,644
|
def makeVocabFromText(
filelist=None,
max_size=10*10000,
least_freq=2,
trunc_len=100,
filter_len=0,
print_log=None,
vocab_file=None,
encoding_format='utf-8',
lowercase = True):
""" the core of this function is getting a word2count dict and writing it to a .txt file,then use Vocab to read it """
if print_log:
print_log("%s: the max vocab size = %d, least_freq is %d truncate length = %d" \
% ( filelist[0], max_size, least_freq , trunc_len ))
else:
print("%s: the max vocab size = %d, least_freq is %d truncate length = %d" \
% ( filelist[0], max_size, least_freq , trunc_len ))
"""tokenizing sentence and add word to word2count dict"""
word2count={}
for filename in filelist:
with open(filename, 'r', encoding = encoding_format) as f:
for sent in f:
tokens = sent.strip().split()
if 0 < filter_len < len(sent.strip().split()):
continue
if trunc_len > 0:
tokens = tokens[:trunc_len]
for word in tokens:
word = word if not lowercase else word.lower()
if word not in word2count:
word2count[word] = 1
else:
word2count[word] += 1
return makeVocabFormDict(word2count=word2count,max_size=max_size,least_freq=least_freq,\
vocab_file=vocab_file,encoding_format=encoding_format,filename=filelist[0])
| 5,337,645
|
def elimV(N, a, b, c, p, uu, alpha, beta):
"""
elimV. Called by LPSteps
Double sweep elimination along the vertical direction, i.e. column-wise
Inputs NxN arrays or scalars
"""
"""
/* initial condition, everything is going to be zero at the edge */
"""
alpha[0,:] = 0.0
beta[0,:] = 0.0
alpha[N-2,:] = 0.0
beta[N-2,:] = 0.0
"""
//* forward elimination */
"""
for i in range(1, N-2):
cc = c[i,:] - a * alpha[i-1]
alpha[i,:] = b / cc
beta[i,:] = (p[i,:] + a * beta[i-1,:] ) / cc
#basically same as one more loop ??:
cc = c[N-1,:] - a * alpha[N-2,:]
beta[N-1,:] = (p[N-1,:] + a * beta[N-2,:] ) / cc
"""
//* edge amplitude =0 */
"""
uu[N-1,:] = beta[N-1,:]
"""
//* backward elimination */
"""
for i in range(N-2, -1, -1):
uu[i,:] = alpha[i,:] * uu[i+1,:] + beta[i,:]
pass
| 5,337,646
|
def _run_agent(
agent_dir: Union[PathLike, str], stop_event: Event, log_level: Optional[str] = None
) -> None:
"""
Load and run agent in a dedicated process.
:param agent_dir: agent configuration directory
:param stop_event: multithreading Event to stop agent run.
:param log_level: debug level applied for AEA in subprocess
:return: None
"""
import asyncio # pylint: disable=import-outside-toplevel
import select # pylint: disable=import-outside-toplevel
import selectors # pylint: disable=import-outside-toplevel
if hasattr(select, "kqueue"): # pragma: nocover # cause platform specific
selector = selectors.SelectSelector()
loop = asyncio.SelectorEventLoop(selector) # type: ignore
asyncio.set_event_loop(loop)
_set_logger(log_level=log_level)
agent = load_agent(agent_dir)
def stop_event_thread():
try:
stop_event.wait()
except (KeyboardInterrupt, EOFError, BrokenPipeError) as e: # pragma: nocover
_default_logger.error(
f"Exception raised in stop_event_thread {e} {type(e)}. Skip it, looks process is closed."
)
finally:
_default_logger.debug("_run_agent: stop event raised. call agent.stop")
agent.runtime.stop()
Thread(target=stop_event_thread, daemon=True).start()
try:
agent.start()
except KeyboardInterrupt: # pragma: nocover
_default_logger.debug("_run_agent: keyboard interrupt")
except BaseException as e: # pragma: nocover
_default_logger.exception("exception in _run_agent")
exc = AEAException(f"Raised {type(e)}({e})")
exc.__traceback__ = e.__traceback__
raise exc
finally:
_default_logger.debug("_run_agent: call agent.stop")
agent.stop()
| 5,337,647
|
def import_supplemental(file_path):
"""Get data from a supplemental file"""
data = sio.loadmat(file_path)
data['move'] = np.squeeze(data['move'])
data['rep'] = np.squeeze(data['rep'])
data['emg_time'] = np.squeeze(data['emg_time'])
return data
| 5,337,648
|
def _unpickle_injected_object(base_class, mixin_class, class_name=None):
"""
Callable for the pickler to unpickle objects of a dynamically created class
based on the InjectableMixin. It creates the base object from the original
base class and re-injects the mixin class when unpickling an object.
:param type base_class: The base class of the pickled object before adding
the mixin via injection.
:param type mixin_class: The :class:`InjectableMixin` subclass that was
injected into the pickled object.
:param str class_name: The class name of the pickled object's dynamically
created class.
:return: The initial unpickled object (before the pickler restores the
object's state).
"""
obj = base_class.__new__(base_class, ())
return mixin_class.inject_into_object(obj, class_name)
| 5,337,649
|
def parse_endfblib(libdir):
"""Parse ENDF/B library
Parametres:
-----------
libdir : str
directory with ENDFB file structure"""
filepaths = []
nuclidnames = []
endf_dir = Path(libdir)
neutron_files = tuple((endf_dir / "neutrons").glob("*endf"))
for n in neutron_files:
filepaths.append(n.absolute())
nuclidnames.append(n.name.split('_')[1] +
re.split("^0*", n.name.split('_')[2][:-5])[-1])
return nuclidnames, filepaths
| 5,337,650
|
def get_parser():
"""Get parser object."""
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser
parser = ArgumentParser(
description=__doc__, formatter_class=ArgumentDefaultsHelpFormatter
)
parser.add_argument(
"-f",
"--file",
dest="filename",
help="Read this PDF file",
metavar="FILE",
required=True,
)
return parser
| 5,337,651
|
def build_val_col_list(tableName):
"""Build and return a schema to use for the sample data."""
statement = "( SELECT column_name, data_type, case when data_type='NUMBER' THEN NVL(DATA_PRECISION,38) + DATA_SCALE ELSE DATA_LENGTH END AS ORACLE_LENGTH FROM dba_tab_columns WHERE table_name = '" + tableName + "' order by column_id asc )"
buildColTypeList = spark.read.format("jdbc") \
.option("url","jdbc:oracle:thin:system/oracle@//0.0.0.0:1521/xe") \
.option("dbtable", statement) \
.option("user","system") \
.option("password","oracle") \
.option("driver","oracle.jdbc.driver.OracleDriver") \
.load()
xList = buildColTypeList.collect()
return xList
| 5,337,652
|
def save_image(user, filename, image_tif, process, latency, size, hist):
"""
Function that saves image to Mongo database
Args:
user: username
filename: desired file name in database
image_tif: tiff image in byte format
process: processing algorithm applied to image
latency: time to process image
size: image size
hist: histogram values of image
bins: bin locations of image
Returns:
outstr: Confirmation that image has been saved
"""
time = datetime.datetime.now()
Image_Dict = {
"File": filename,
"Image": image_tif,
"Process": process,
"Timestamp": time,
"Latency": latency,
"Size": size,
"Histogram": hist,
}
Image_List = user.ImageFile
Image_List.append(Image_Dict)
user.filenames.append(filename)
user.save()
outstr = "Image saved successfully"
return outstr
| 5,337,653
|
def run_simulation_with_params(
sim_params, replicate, repeats=10, should_perform_gwas=True):
"""Runs simulation with given params and returns result object.
"""
try:
simulation_result = run_simulation(
simulation_params=sim_params)
except Exception as e:
print sim_params
raise e
result = {
'num_snps_considered': sim_params.num_snps_considered,
'num_samples': sim_params.num_samples,
'num_snps_with_effect': sim_params.num_snps_with_effect,
'replicate': replicate,
'total_fitness_effect': np.prod(simulation_result['snp_effect']),
'mage_cycles': sim_params.mage_cycles,
'population_size': sim_params.population_size
}
# Apply linear modeling.
lm_result = run_linear_modeling(
simulation_result['wgs_samples'],
simulation_result['wgs_samples_doubling_times'],
repeats=repeats)
lm_eval_results = evaluate_modeling_result(
simulation_result, lm_result)
lm_eval_results_df = lm_eval_results['results_df']
result.update({
'lm_pearson_r': lm_eval_results['pearson_r'],
'lm_pearson_p': lm_eval_results['p_value'],
})
result.update(
calculate_modeling_metrics(
lm_eval_results_df, 'linear_model_coef',
results_prefix='lm_'))
# Maybe perform GWAS.
if should_perform_gwas:
gwas_results_df = run_gwas(
simulation_result['wgs_samples'],
simulation_result['wgs_samples_doubling_times'])
gwas_eval_results = evaluate_gwas_result(
gwas_results_df, lm_eval_results_df)
gwas_eval_results_df = gwas_eval_results['results_df']
result.update({
'gwas_pearson_r': gwas_eval_results['pearson_r'],
'gwas_pearson_p': gwas_eval_results['p_value'],
})
result.update(
calculate_modeling_metrics(
gwas_eval_results_df, 'gwas_coef', results_prefix='gwas_'))
# Perform enrichment analysis on final timepoint.
enrichment_result_df = run_enrichment_analysis(simulation_result)
result.update(
calculate_enrichment_metrics(
enrichment_result_df))
return result
| 5,337,654
|
def hsl_to_rgb(hsl):
"""Convert hsl colorspace values to RGB."""
# Convert hsl to 0-1 ranges.
h = hsl[0] / 359.
s = hsl[1] / 100.
l = hsl[2] / 100.
hsl = (h, s, l)
# returns numbers between 0 and 1
tmp = colorsys.hls_to_rgb(h, s, l)
# convert to 0 to 255
r = int(round(tmp[0] * 255))
g = int(round(tmp[1] * 255))
b = int(round(tmp[2] * 255))
return (r, g, b)
| 5,337,655
|
def init_celery(app=None):
"""
Initialize celery.
"""
app = app or create_app(os.environ.get('APP_MODE'))
celery.conf.update(app.config.get("CELERY", {}))
class ContextTask(celery.Task):
"""
Make celery tasks work with Flask app context
"""
def __call__(self, *args, **kwargs):
with app.app_context():
return self.run(*args, **kwargs)
celery.Task = ContextTask
return celery
| 5,337,656
|
async def test_filterfalse_matches_itertools_filterfalse(
arange: ty.Type[ty.AsyncIterator[int]], stop: int
):
"""Ensure that our async filterfalse implementation follows the standard
implementation.
"""
async def _pair(x):
return (x % 2) == 0
target = list(itertools.filterfalse(lambda x: (x % 2) == 0, range(stop)))
result = [x async for x in none.collection.a.filterfalse(_pair, arange(stop))]
assert result == target
| 5,337,657
|
def extract_buffer_info(mod, param_dict):
"""
This function is to read the tvm.IRModule that
contains Relay to TIR compiled IRModule. Thereafter,
this will extract the buffer information as the shape
and constant data (if any).
Parameters
----------
mod : tvm.IRModule
The NPU TIR IRModule.
param_dict : dict
A dictionary containing param idx --> const numpy.NDArray
Returns
-------
dict
a dictionary of buffer names --> BufferInfo
"""
buffer_info = dict()
# There should only be a single function
assert len(mod.functions.items()) == 1
primfunc = mod.functions.items()[0][1]
for idx, const_data in param_dict.items():
param = primfunc.params[idx]
buffer_info[primfunc.buffer_map[param].data] = BufferInfo(
const_data, const_data.shape, const_data.dtype, BufferType.constant
)
for param in primfunc.params:
if primfunc.buffer_map[param].data not in buffer_info.keys():
buffer_info[primfunc.buffer_map[param].data] = BufferInfo(
None,
primfunc.buffer_map[param].shape,
primfunc.buffer_map[param].dtype,
BufferType.input_or_output,
)
def populate_allocate_buffer_info(stmt):
if isinstance(stmt, tvm.tir.stmt.Allocate):
allocate = stmt
buffer_info[allocate.buffer_var] = BufferInfo(
None,
allocate.extents,
allocate.dtype,
BufferType.scratch,
)
tvm.tir.stmt_functor.post_order_visit(primfunc.body, populate_allocate_buffer_info)
return buffer_info
| 5,337,658
|
def reproject_as_needed(src, tilespec, resampling='nearest'):
""" Return a ``rasterio`` dataset, reprojected if needed
Returns src dataset if reprojection unncessary. Otherwise returns an in
memory ``rasterio`` dataset. Reprojection will snap the bounding
coordinates of the source dataset to align with the tile specification.
Args:
src (rasterio._io.RasterReader): rasterio raster dataset
tilespec (TileSpec): tile specification
resampling (str): reprojection resampling method (default: nearest)
Returns:
rasterio._io.RasterReader: original or reprojected dataset
"""
if src.crs == tilespec.crs:
yield src
else:
# Calculate new transform & size
transform, width, height = warp.calculate_default_transform(
src.crs, tilespec.crs,
src.width, src.height,
*src.bounds, resolution=tilespec.res)
# Snap bounds
transform = snap_transform(transform, tilespec.ul)
dst_meta = src.meta.copy()
dst_meta['driver'] = 'MEM'
dst_meta['crs'] = tilespec.crs
dst_meta['width'] = width
dst_meta['height'] = height
dst_meta['transform'] = transform
with rasterio.open(os.path.basename(src.name), 'w', **dst_meta) as dst:
warp.reproject(
rasterio.band(src, 1),
rasterio.band(dst, 1),
resampling=getattr(warp.Resampling, resampling)
)
yield dst
| 5,337,659
|
def find_external_nodes(digraph):
"""Return a set of external nodes in a directed graph.
External nodes are node that are referenced as a dependency not defined as
a key in the graph dictionary.
"""
external_nodes = set()
for ni in digraph:
for nj in digraph[ni]:
if nj not in digraph:
external_nodes.add(nj)
return external_nodes
| 5,337,660
|
def strip(val):
"""
Strip val, which may be str or iterable of str.
For str input, returns stripped string, and for iterable input,
returns list of str values without empty str (after strip) values.
"""
if isinstance(val, six.string_types):
return val.strip()
try:
return list(filter(None, map(strip, val)))
except TypeError:
return val
| 5,337,661
|
def warn_deprecated(obj, msg):
"""
Issue the warning message `msg`.
"""
warnings.warn(msg, category=DeprecationWarning, stacklevel=2)
| 5,337,662
|
def test_single_shape_properties(attribute):
"""Test creating single shape with properties"""
shape = (4, 2)
np.random.seed(0)
data = 20 * np.random.random(shape)
layer_kwargs = {f'{attribute}_color': 'red'}
layer = Shapes(data, **layer_kwargs)
layer_color = getattr(layer, f'{attribute}_color')
assert len(layer_color) == 1
np.testing.assert_allclose([1, 0, 0, 1], layer_color[0])
| 5,337,663
|
def export_gene_exons(adapter, hgnc_id, build="37"):
"""Export all exons from one gene
Args:
adapter(scout.adapter.MongoAdapter)
hgnc_id(int): hgnc ID of a gene
build(str): "37" or "38"
Yields:
printlines(str): formatted like this: Chrom\tStart\tEnd\tExonId\tTranscripts\tHgncIDs\tHgncSymbols
"""
gene_obj = adapter.hgnc_gene(hgnc_id, build)
if gene_obj is None:
LOG.warning(f"Could't find a gene with HGNC id '{hgnc_id}' in Scout database.")
return
query = {"hgnc_id": hgnc_id, "build": build}
result = adapter.exon_collection.find(query)
print_line = "{0}\t{1}\t{2}\t{3}\t{4}\t{5}\t{6}"
for exon in result:
yield print_line.format(
exon["chrom"],
exon["start"],
exon["end"],
exon["exon_id"],
exon["transcript"],
hgnc_id,
gene_obj["hgnc_symbol"],
)
| 5,337,664
|
def set_manager_hook(file_manager):
"""Saves a reference to the file_manager.py."""
global FMANAGER
FMANAGER = file_manager
| 5,337,665
|
def durations_histo(filename: str, v1_2_1, v1_5_2):
"""Generate all the figures for the histograms.
Returns a dictionary of dict with dict containing the full filename.
"""
from matplotlib.backends.backend_pdf import PdfPages
ensure_path(os.path.dirname(filename))
filenames = dict()
filenames['dev'] = dict()
filenames['train'] = dict()
labels_in_the_datasets = set(list(
v1_2_1['events_counts_dev'].keys()) + list(
v1_2_1['events_counts_train'].keys()) + list(
v1_5_2['events_counts_dev'].keys()) + list(
v1_5_2['events_counts_train'].keys()))
labels_in_the_datasets = sorted(
list(set([label.lower() for label in labels_in_the_datasets])))
dev_seizures_durations_by_type_v1_2_1 = {
seizure_type: np.array(
[event['duration'] for event in v1_2_1['events_dev']
if event['event'] == seizure_type])
for seizure_type in list(v1_2_1['events_counts_dev'].keys())}
train_seizures_durations_by_type_v1_2_1 = {
seizure_type: np.array(
[event['duration'] for event in v1_2_1['events_train']
if event['event'] == seizure_type])
for seizure_type in list(v1_2_1['events_counts_train'].keys())}
dev_seizures_durations_by_type_v1_5_2 = v1_5_2[
'dev_seizures_durations_by_type']
train_seizures_durations_by_type_v1_5_2 = v1_5_2[
'train_seizures_durations_by_type']
with PdfPages(filename + ".pdf") as pdf:
d = pdf.infodict()
d['Title'] = 'Seizures duration histograms'
d['Author'] = 'Vincent Stragier'
d['Subject'] = 'Compilation of all the duration histograms'
d['Keywords'] = 'seizures epilepsy histogram TUSZ EEG'
d['CreationDate'] = datetime.datetime(2020, 10, 21)
d['ModDate'] = datetime.datetime.today()
for seizure_type in labels_in_the_datasets:
plt.figure(
"Histograms for {0} seizures ('{1}') - dev set".format(
EPILEPTIC_SEIZURE_LABELS_DICT[
seizure_type.lower()], seizure_type),
figsize=(16/2.54*2, 9/2.54*2))
pdf.attach_note(
"Histograms for {0} seizures ('{1}') - dev set".format(
EPILEPTIC_SEIZURE_LABELS_DICT[
seizure_type.lower()],
seizure_type))
plt.rc('text', usetex=True)
plt.rcParams["font.family"] = 'sans-serif'
plt.rcParams['font.sans-serif'] = "Calibri"
plt.suptitle(
"\\Huge Histograms for {0} seizures ('{1}') - dev set".format(
EPILEPTIC_SEIZURE_LABELS_DICT[
seizure_type.lower()],
seizure_type),
fontsize=15)
if seizure_type.upper()\
in dev_seizures_durations_by_type_v1_2_1.keys():
data = dev_seizures_durations_by_type_v1_2_1[
seizure_type.upper()]
mu = data.mean()
median = np.median(data)
sigma = data.std()
minimum = data.min()
maximum = data.max()
plt.subplot(2, 2, 1)
counts, bins, _ = plt.hist(
data, bins=data.size, rwidth=0.8, color='#6eb055ff')
i = np.argmax(counts)
hist_mode = (bins[i] + bins[i + 1])/2
plt.ylabel(r'Count per bin')
plt.legend(
[r'$\mu={0:.4f}$, $\sigma={1:.4f}$,{7}min$\,={2:.4f}$,'
r' max$\,={6:.4f}$,{7}median$\,={4:.4f}$,'
r' mode$\,={5:.4f}$,{7}Number of seizures: {3}'.format(
mu,
sigma,
minimum,
len(data),
median,
hist_mode,
maximum, "\n")])
plt.xlabel(r'Time in seconds')
plt.title(r'Dev set v1.2.1')
plt.subplot(2, 2, 3)
counts, bins, _ = plt.hist(
data, bins=data.size, rwidth=0.8, color='#6eb055ff')
plt.xlim(0, mu)
i = np.argmax(counts)
hist_mode = (bins[i] + bins[i + 1])/2
plt.ylabel(r'Count per bin')
plt.legend(
[r'$\mu={0:.4f}$, $\sigma={1:.4f}$,{7}min$\,={2:.4f}$,'
r' max$\,={6:.4f}$,{7}median$\,={4:.4f}$,'
r' mode$\,={5:.4f}$,{7}Number of seizures: {3}'.format(
mu,
sigma,
minimum,
len(data),
median,
hist_mode,
maximum,
"\n")])
plt.xlabel(r'Time in seconds')
plt.title(r'Dev set v1.2.1 [0, %.2f]' % mu)
if seizure_type in dev_seizures_durations_by_type_v1_5_2.keys():
data = dev_seizures_durations_by_type_v1_5_2[seizure_type]
mu = data.mean()
median = np.median(data)
sigma = data.std()
minimum = data.min()
maximum = data.max()
plt.subplot(2, 2, 2)
counts, bins, _ = plt.hist(
data, bins=data.size, rwidth=0.8, color='#575656ff')
i = np.argmax(counts)
hist_mode = (bins[i] + bins[i + 1])/2
plt.ylabel(r'Count per bin')
plt.legend(
[r'$\mu={0:.4f}$, $\sigma={1:.4f}$,{7}min$\,={2:.4f}$,'
r' max$\,={6:.4f}$,{7}median$\,={4:.4f}$,'
r' mode$\,={5:.4f}$,{7}Number of seizures: {3}'.format(
mu,
sigma,
minimum,
len(data),
median,
hist_mode,
maximum,
"\n")])
plt.xlabel(r'Time in seconds')
plt.title(r'Dev set v1.5.2')
plt.subplot(2, 2, 4)
counts, bins, _ = plt.hist(
data, bins=data.size, rwidth=0.8, color='#575656ff')
plt.xlim(0, mu)
i = np.argmax(counts)
hist_mode = (bins[i] + bins[i + 1])/2
plt.ylabel(r'Count per bin')
plt.legend(
[r'$\mu={0:.4f}$, $\sigma={1:.4f}$,{7}min$\,={2:.4f}$,'
r' max$\,={6:.4f}$,{7}median$\,={4:.4f}$,'
r' mode$\,={5:.4f}$,{7}Number of seizures: {3}'.format(
mu,
sigma,
minimum,
len(data),
median,
hist_mode,
maximum,
"\n")])
plt.xlabel(r'Time in seconds')
plt.title(r'Dev set v1.5.2 [0, %.2f]' % mu)
# tight_layout docs: [left, bottom, right, top]
# in normalized (0, 1) figure
plt.tight_layout(rect=[0, 0.03, 1, 0.95])
plt.savefig(
"_".join([filename, seizure_type, 'dev.pdf']),
format="PDF",
transparent=True)
filenames['dev'][seizure_type] = "_".join(
[filename, seizure_type, 'dev.pdf']).replace('\\', '/')
pdf.savefig(transparent=True)
plt.close()
plt.figure(
"Histograms for {0} seizures ('{1}') - train set".format(
EPILEPTIC_SEIZURE_LABELS_DICT[
seizure_type.lower()],
seizure_type),
figsize=(16/2.54*2, 9/2.54*2))
plt.rc('text', usetex=True)
plt.rcParams["font.family"] = 'sans-serif'
plt.rcParams['font.sans-serif'] = "Calibri"
plt.suptitle(
"\\Huge Histograms for {0}"
" seizures ('{1}') - train set".format(
EPILEPTIC_SEIZURE_LABELS_DICT[
seizure_type.lower()],
seizure_type),
fontsize=15)
if seizure_type.upper()\
in train_seizures_durations_by_type_v1_2_1.keys():
data = train_seizures_durations_by_type_v1_2_1[
seizure_type.upper()]
mu = data.mean()
median = np.median(data)
sigma = data.std()
minimum = data.min()
maximum = data.max()
plt.subplot(2, 2, 1)
counts, bins, _ = plt.hist(
data, bins=data.size, rwidth=0.8, color='#6eb055ff')
i = np.argmax(counts)
hist_mode = (bins[i] + bins[i + 1])/2
plt.ylabel(r'Count per bin')
plt.legend(
[r'$\mu={0:.4f}$, $\sigma={1:.4f}$,{7}min$\,={2:.4f}$,'
r' max$\,={6:.4f}$,{7}median$\,={4:.4f}$,'
r' mode$\,={5:.4f}$,{7}Number of seizures: {3}'.format(
mu,
sigma,
minimum,
len(data),
median,
hist_mode,
maximum,
"\n")])
plt.xlabel(r'Time in seconds')
plt.title(r'Train set v1.2.1')
plt.subplot(2, 2, 3)
counts, bins, _ = plt.hist(
data, bins=data.size, rwidth=0.8, color='#6eb055ff')
plt.xlim(0, mu)
i = np.argmax(counts)
hist_mode = (bins[i] + bins[i + 1])/2
plt.ylabel(r'Count per bin')
plt.legend(
[r'$\mu={0:.4f}$, $\sigma={1:.4f}$,{7}min$\,={2:.4f}$,'
r' max$\,={6:.4f}$,{7}median$\,={4:.4f}$,'
r' mode$\,={5:.4f}$,{7}Number of seizures: {3}'.format(
mu,
sigma,
minimum,
len(data),
median,
hist_mode,
maximum,
"\n")])
plt.xlabel(r'Time in seconds')
plt.title(r'Train set v1.2.1 [0, %.2f]' % mu)
if seizure_type in train_seizures_durations_by_type_v1_5_2.keys():
data = train_seizures_durations_by_type_v1_5_2[seizure_type]
mu = data.mean()
median = np.median(data)
sigma = data.std()
minimum = data.min()
maximum = data.max()
plt.subplot(2, 2, 2)
counts, bins, _ = plt.hist(
data, bins=data.size, rwidth=0.8, color='#575656ff')
i = np.argmax(counts)
hist_mode = (bins[i] + bins[i + 1])/2
plt.ylabel(r'Count per bin')
plt.legend(
[r'$\mu={0:.4f}$, $\sigma={1:.4f}$,{7}min$\,={2:.4f}$,'
r' max$\,={6:.4f}$,{7}median$\,={4:.4f}$,'
r' mode$\,={5:.4f}$,{7}Number of seizures: {3}'.format(
mu,
sigma,
minimum,
len(data),
median,
hist_mode,
maximum,
"\n")])
plt.xlabel(r'Time in seconds')
plt.title(r'Train set v1.5.2')
plt.subplot(2, 2, 4)
counts, bins, _ = plt.hist(
data, bins=data.size, rwidth=0.8, color='#575656ff')
plt.xlim(0, mu)
i = np.argmax(counts)
hist_mode = (bins[i] + bins[i + 1])/2
plt.ylabel(r'Count per bin')
plt.legend(
[r'$\mu={0:.4f}$, $\sigma={1:.4f}$,{7}min$\,={2:.4f}$,'
r' max$\,={6:.4f}$,{7}median$\,={4:.4f}$,'
r' mode$\,={5:.4f}$,{7}Number of seizures: {3}'.format(
mu,
sigma,
minimum,
len(data),
median,
hist_mode,
maximum,
"\n")])
plt.xlabel(r'Time in seconds')
plt.title(r'Train set v1.5.2 [0, %.2f]' % mu)
# tight_layout docs: [left, bottom, right, top]
# in normalized (0, 1) figure
plt.tight_layout(rect=[0, 0.03, 1, 0.95])
plt.savefig("_".join(
[filename, seizure_type, 'train.pdf']),
format="PDF",
transparent=True)
filenames['train'][seizure_type] = "_".join(
[filename, seizure_type, 'train.pdf']).replace('\\', '/')
pdf.savefig(transparent=True)
plt.close()
return filenames
| 5,337,666
|
def construct_area_cube(var_name, area_data, global_atts, dim_coords):
"""Construct the new area cube """
dim_coords_list = []
for i, coord in enumerate(dim_coords):
dim_coords_list.append((coord, i))
if var_name == 'areacello':
long_name = 'Grid-Cell Area for Ocean Variables'
else:
long_name = 'Grid-Cell Area for Atmospheric Grid Variables'
area_cube = iris.cube.Cube(area_data,
standard_name='cell_area',
long_name=long_name,
var_name=var_name,
units='m2',
attributes=global_atts,
dim_coords_and_dims=dim_coords_list)
return area_cube
| 5,337,667
|
def _switch_obs_2_time_dim(ds):
"""Function to create a single time variable that is the midpoint of the
ObsPack averaging interval, and make it the xarray coordinate. """
# Get the midpoint of the average pulled from the model:
midpoint = pd.to_datetime(ds.averaging_interval_start.data) + \
np.asarray(ds.averaging_interval.data) / 2
# Make it the time midpoint a new variable in the dataset.
t = midpoint.to_series().reset_index(drop=True)
ds['time'] = ("obs", t)
# Tell xarray that we want time to be a coordinate.
ds = ds.set_coords('time')
# And tell it to replace Obs # with time as the preferred dimension.
ds = ds.swap_dims({"obs": "time"})
return ds
| 5,337,668
|
def mqtt_cb_on_message(client, userdata, msg):
"""Do the callback for when a message is received without it own callback."""
print("misc_message_cb: unexpected message:",
msg.topic, str(msg.payload), file=sys.stderr)
| 5,337,669
|
def bitfield_v(val, fields, col=15):
"""
return a string of bit field components formatted vertically
val: the value to be split into bit fields
fields: a tuple of (name, output_function, (bit_hi, bit_lo)) tuples
"""
fmt = '%%-%ds: %%s' % col
s = []
for (name, func, field) in fields:
s.append(fmt % (name, func(bits(val, field))))
return '\n'.join(s)
| 5,337,670
|
def split_housenumber_line(line: str) -> Tuple[str, bool, bool, str, Tuple[int, str], str,
Tuple[int, str], Iterable[str], Tuple[int, str]]:
"""
Augment TSV Overpass house numbers result lines to aid sorting.
It prepends two bools to indicate whether an entry is missing either a house number, a house name
or a conscription number.
Entries lacking either a house number or all of the above IDs come first.
The following fields are interpreted numerically: oid, house number, conscription number.
"""
field = line.split('\t')
oid = get_array_nth(field, 0)
street = get_array_nth(field, 1)
housenumber = get_array_nth(field, 2)
postcode = get_array_nth(field, 3)
housename = get_array_nth(field, 4)
cons = get_array_nth(field, 5)
tail = field[6:] if len(field) > 6 else []
have_housenumber = housenumber != ''
have_houseid = have_housenumber or housename != '' or cons != ''
return (postcode, have_houseid, have_housenumber, street,
split_house_number(housenumber),
housename, split_house_number(cons), tail, split_house_number(oid))
| 5,337,671
|
def readAsync(tagPaths, callback):
"""Asynchronously reads the value of the Tags at the given paths.
You must provide a python callback function that can process the
read results.
Args:
tagPaths (list[str]): A List of Tag paths to read from. If no
property is specified in the path, the Value property is
assumed.
callback (object): A Python callback function to process the
read results. The function definition must provide a single
argument, which will hold a List of qualified values when
the callback function is invoked. The qualified values will
have three sub members: value, quality, and timestamp.
"""
print(tagPaths, callback)
| 5,337,672
|
def parse_lambda_config(params, name):
"""
Parse the configuration of lambda coefficient (for scheduling).
x = "3" # lambda will be a constant equal to x
x = "0:1,1000:0" # lambda will start from 1 and linearly decrease to 0 during the first 1000 iterations
x = "0:0,1000:0,2000:1" # lambda will be equal to 0 for the first 1000 iterations, then will linearly increase to 1 until iteration 2000
"""
x = getattr(params, name)
split = x.split(',')
if len(split) == 1:
setattr(params, name, float(x))
setattr(params, name + '_config', None)
else:
split = [s.split(':') for s in split]
assert all(len(s) == 2 for s in split)
assert all(k.isdigit() for k, _ in split)
assert all(int(split[i][0]) < int(split[i + 1][0]) for i in range(len(split) - 1))
setattr(params, name, float(split[0][1]))
setattr(params, name + '_config', [(int(k), float(v)) for k, v in split])
| 5,337,673
|
def tau_polinomyal_coefficients(z):
"""
Coefficients (z-dependent) for the log(tau) formula from
Raiteri C.M., Villata M. & Navarro J.F., 1996, A&A 315, 105-115
"""
log_z = math.log10(z)
log_z_2 = log_z ** 2
a0 = 10.13 + 0.07547 * log_z - 0.008084 * log_z_2
a1 = -4.424 - 0.7939 * log_z - 0.1187 * log_z_2
a2 = 1.262 + 0.3385 * log_z + 0.05417 * log_z_2
return [a0, a1, a2]
| 5,337,674
|
def test_stop_after_max_trial_reached():
"""Check that all results are registered before exception are raised"""
count = 10
max_trials = 1
workers = 2
runner = new_runner(0.1, n_workers=workers)
runner.max_broken = 2
runner.max_trials_per_worker = max_trials
client = runner.client
client.trials.extend([new_trial(i) for i in range(count)])
runner.run()
status = ["completed" for i in range(max_trials)]
assert client.status == status
| 5,337,675
|
def register( # lgtm[py/similar-function]
fn: callbacks.ResourceHandlerFn,
*,
id: Optional[str] = None,
errors: Optional[errors_.ErrorsMode] = None,
timeout: Optional[float] = None,
retries: Optional[int] = None,
backoff: Optional[float] = None,
cooldown: Optional[float] = None, # deprecated, use `backoff`
registry: Optional[registries.ResourceChangingRegistry] = None,
labels: Optional[bodies.Labels] = None,
annotations: Optional[bodies.Annotations] = None,
when: Optional[callbacks.WhenHandlerFn] = None,
) -> callbacks.ResourceHandlerFn:
"""
Register a function as a sub-handler of the currently executed handler.
Example::
@kopf.on.create('zalando.org', 'v1', 'kopfexamples')
def create_it(spec, **kwargs):
for task in spec.get('tasks', []):
def create_single_task(task=task, **_):
pass
kopf.register(id=task, fn=create_single_task)
This is efficiently an equivalent for::
@kopf.on.create('zalando.org', 'v1', 'kopfexamples')
def create_it(spec, **kwargs):
for task in spec.get('tasks', []):
@kopf.on.this(id=task)
def create_single_task(task=task, **_):
pass
"""
decorator = this(
id=id, registry=registry,
errors=errors, timeout=timeout, retries=retries, backoff=backoff, cooldown=cooldown,
labels=labels, annotations=annotations, when=when,
)
return decorator(fn)
| 5,337,676
|
def release_notes(c, version=None, username=None, password=None, write=False):
"""Generates release notes based on issues in the issue tracker.
Args:
version: Generate release notes for this version. If not given,
generated them for the current version.
username: GitHub username.
password: GitHub password.
write: When set to True, write release notes to a file overwriting
possible existing file. Otherwise just print them to the
terminal.
Username and password can also be specified using ``GITHUB_USERNAME`` and
``GITHUB_PASSWORD`` environment variable, respectively. If they aren't
specified at all, communication with GitHub is anonymous and typically
pretty slow.
"""
pattern = '__version__ = "(.*)"'
if write and not RELEASE_NOTES_PATH.parent.is_dir():
RELEASE_NOTES_PATH.parent.mkdir(parents=True)
version = Version(version, VERSION_PATH, pattern)
file = RELEASE_NOTES_PATH if write else sys.stdout
generator = ReleaseNotesGenerator(
REPOSITORY,
RELEASE_NOTES_TITLE,
RELEASE_NOTES_INTRO.replace("REPLACE_PW_VERSION", _get_pw_version()),
)
generator.generate(version, username, password, file)
| 5,337,677
|
def pd_read_csv_using_metadata(filepath_or_buffer, table_metadata, ignore_partitions=False, *args, **kwargs):
"""
Use pandas to read a csv imposing the datatypes specified in the table_metadata
Passes through kwargs to pandas.read_csv
If ignore_partitions=True, assume that partitions are not columns in the dataset
"""
if ignore_partitions:
table_metadata = _remove_paritions_from_table_metadata(table_metadata)
dtype = _pd_dtype_dict_from_metadata(table_metadata, ignore_partitions)
parse_dates = _pd_date_parse_list_from_metadatadata(table_metadata)
return pd.read_csv(filepath_or_buffer, dtype = dtype, parse_dates = parse_dates, *args, **kwargs)
| 5,337,678
|
def read_parfile_dirs_props(filename):
"""Reads BRUKER parfile-dirs.prop file to in order to get correct mapping
of the topspin parameters.
Args:
filename: input Bruker parfile-dirs.prop file
Returns:
A dict mapping parameter classes to the their respective directory.
E.g. {'PY_DIRS': ['py/user', 'py']}
"""
fh = open(filename)
dirs = fh.read()
fh.close()
par_dc = {}
dirs = dirs.replace('\\\n', '').replace(';', ' ')
for line in dirs.split('\n'):
if len(line) > 0 and line[0] != '#':
key, values = line.split('=')
par_dc[key] = values.split()
if verbose_level > 1:
print 'Dictionary for BRUKER search paths:'
for key in par_dc.keys():
print key, par_dc[key]
return par_dc
| 5,337,679
|
def get_all_playlist_items(playlist_id, yt_client):
"""
Get a list of video ids of videos currently in playlist
"""
return yt_client.get_playlist_items(playlist_id)
| 5,337,680
|
def test_check_file_many_conflicts(
capsys, tmp_path, shared_datadir, source_pipfile_dirname
): # type: (Any, Path, Path, str) -> None
"""
many conflicts, return code should be one
"""
pipfile_dir = shared_datadir / source_pipfile_dirname
for filename in ("Pipfile", "Pipfile.lock", "setup.py"):
copy_file(pipfile_dir / filename, tmp_path)
with cwd(tmp_path):
with pytest.raises(SystemExit) as e:
cmd(argv=["", "check"])
assert e.value.code == 1
| 5,337,681
|
def generator(samples, is_validation=False, correction=0.15, batch_size=32):
"""Generates batches of training features and labels.
Args
samples: driving log file records
"""
# Fetch corresponding images, and build a generator
num_samples = len(samples)
while 1:
samples = sklearn.utils.shuffle(samples)
for offset in range(0, num_samples, batch_size):
batch_samples = samples[offset:offset + batch_size]
images = []
angles = []
for batch_sample in batch_samples:
f = batch_sample[-1]
original_name = batch_sample[0]
name = f.replace("driving_log.csv", "") + "IMG/" + original_name.split("IMG/")[-1]
center_image = mpimg.imread(name)
center_angle = float(batch_sample[3])
images.append(center_image)
angles.append(center_angle)
# Flip the image to augment training data
images.append(np.fliplr(center_image))
angles.append(-center_angle)
# Add left & right camera images if it's not validation run
if not is_validation:
left_angle = center_angle + correction
right_angle = center_angle - correction
left_image = mpimg.imread(f.replace("driving_log.csv", "") + "IMG/" + batch_sample[1].split("IMG/")[-1])
right_image = mpimg.imread(f.replace("driving_log.csv", "") + "IMG/" + batch_sample[2].split("IMG/")[-1])
images.append(left_image)
angles.append(left_angle)
images.append(right_image)
angles.append(right_angle)
X_train = np.array(images)
y_train = np.array(angles)
yield sklearn.utils.shuffle(X_train, y_train)
| 5,337,682
|
def plot_best_laps(timings):
"""Plot each driver's best lap in as a bar chart and output a file.
`timings` : dict
Return value of `api.get_best_laps`
"""
race, sn = timings['race'], timings['season']
drivers = [x['Driver'] for x in timings['data']]
times = [lap_time_to_seconds(x['Time']) for x in timings['data']]
fig = plt.figure(figsize=FIGSIZE)
y_pos = np.arange(len(drivers))
plt.barh(y_pos, times, figure=fig, height=0.5, align='center')
plt.title(f"Best Laps Per Driver {race} ({sn})")
plt.ylabel('Drivers')
plt.yticks(y_pos, labels=drivers)
plt.gca().invert_yaxis()
plt.xlabel('Time (s)')
plt.xlim(left=min(times) - 1, right=max(times) + 1)
plt.gca().get_xaxis().set_minor_locator(AutoMinorLocator())
plt.grid(True, axis='x')
save_figure(fig, name='plot_fastest.png')
| 5,337,683
|
def patch_callables(callables: List[str], patch_func: PatchFunction) -> None:
"""Patch the given list of callables.
Parameters
----------
callables : List[str]
Patch all of these callables (functions or methods).
patch_func : PatchFunction
Called on every callable to patch it.
Notes
-----
The callables list should look like:
[
"module.module.ClassName.method_name",
"module.function_name"
...
]
Nested classes and methods not allowed, but support could be added.
An example patch_func is::
import wrapt
def _my_patcher(parent: CallableParent, callable: str, label: str):
@wrapt.patch_function_wrapper(parent, callable)
def my_announcer(wrapped, instance, args, kwargs):
print(f"Announce {label}")
return wrapped(*args, **kwargs)
"""
patched: Set[str] = set()
for target_str in callables:
if target_str in patched:
# Ignore duplicated targets in the config file.
print(f"Patcher: [WARN] skipping duplicate {target_str}")
continue
# Patch the target and note that we did.
try:
module, attribute_str = _import_module(target_str)
_patch_attribute(module, attribute_str, patch_func)
except PatchError as exc:
# We don't stop on error because if you switch around branches
# but keep the same config file, it's easy for your config
# file to contain targets that aren't in the code.
print(f"Patcher: [ERROR] {exc}")
patched.add(target_str)
| 5,337,684
|
def test_particles_ja011_particles_ja011_v(mode, save_output, output_format):
"""
TEST :3.9.1 The Particle Schema Component [ check length of element
information items ] : Particle Derivation OK (elt:Any) element R
drived by restriction from wildcard (any) B : B's namespace=##any, R's
targetNamespace=foo, B's minOccurs=1, R's minOccurs=absent
"""
assert_bindings(
schema="msData/particles/particlesJa011.xsd",
instance="msData/particles/particlesJa011.xml",
class_name="Doc",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
| 5,337,685
|
def _get_last_measurement(object_id: int):
"""
Get the last measurement of object with given ID.
Args:
object_id (int): Object ID whose last measurement to look for.
Returns:
(GamMeasurement): The last measurement of the object, or None if it doesn't exist.
"""
last_mea = (GamMeasurement.select()
.where(GamMeasurement.mea_object == object_id)
.order_by(GamMeasurement.mea_id.desc())
.get())
return last_mea if last_mea else None
| 5,337,686
|
def get_library_isotopes(acelib_path):
"""
Returns the isotopes in the cross section library
Parameters
----------
acelib_path : str
Path to the cross section library
(i.e. '/home/luke/xsdata/endfb7/sss_endfb7u.xsdata')
Returns
-------
iso_array: array
array of isotopes in cross section library:
"""
lib_isos_list = []
with open(acelib_path, 'r') as f:
lines = f.readlines()
for line in lines:
iso = line.split()[0]
lib_isos_list.append(iso)
return lib_isos_list
| 5,337,687
|
def read_wires(data: str) -> Mapping[int, Wire]:
"""Read the wiring information from data."""
wires = {}
for line in data.splitlines():
wire_name, wire = get_wire(line)
wires[wire_name] = wire
return wires
| 5,337,688
|
def test_disk_dataset_get_shape_single_shard():
"""Test that get_shape works for disk dataset."""
num_datapoints = 100
num_features = 10
num_tasks = 10
# Generate data
X = np.random.rand(num_datapoints, num_features)
y = np.random.randint(2, size=(num_datapoints, num_tasks))
w = np.random.randint(2, size=(num_datapoints, num_tasks))
ids = np.array(["id"] * num_datapoints)
dataset = dc.data.DiskDataset.from_numpy(X, y, w, ids)
X_shape, y_shape, w_shape, ids_shape = dataset.get_shape()
assert X_shape == X.shape
assert y_shape == y.shape
assert w_shape == w.shape
assert ids_shape == ids.shape
| 5,337,689
|
def back_ease_out(p):
"""Modeled after overshooting cubic y = 1-((1-x)^3-(1-x)*sin((1-x)*pi))"""
f = 1 - p
return 1 - (f * f * f - f * sin(f * pi))
| 5,337,690
|
def configure_estimator_params(init_args, train_args):
"""Validates the initialization and training arguments and constructs a
`params` dictionary for creating a TensorFlow Estimator object."""
params = {}
init_val = ArgumentsValidator(init_args, "Initialization arguments")
with init_val:
params["rm_dir_on_init"] = init_val.get("rm_dir", ATYPE_BOOL, True)
params["use_ortho_weights"] = init_val.get("use_ortho_weights", ATYPE_BOOL, True)
params["max_lsuv_iters"] = init_val.get("max_lsuv_iters", [ATYPE_NONE, ATYPE_INT], True)
params["lsuv_tolerance"] = init_val.get("lsuv_tolerance", ATYPE_FLOAT, True)
params["init_alpha"] = init_val.get("init_alpha", ATYPE_FLOAT, True)
train_val = ArgumentsValidator(train_args, "Training arguments")
with train_val:
params["save_time"] = train_val.get("save_time", ATYPE_FLOAT, True)
params["val_throttle_time"] = train_val.get("val_throttle_time", ATYPE_FLOAT, True)
params["learning_rate"] = train_val.get("learning_rate", ATYPE_FLOAT, True)
params["sgd_momentum"] = train_val.get("sgd_momentum", [ATYPE_NONE, ATYPE_FLOAT], True)
params["sgd_use_nesterov"] = train_val.get("sgd_use_nesterov", ATYPE_BOOL, True)
params["use_rmsprop"] = train_val.get("use_rmsprop", ATYPE_BOOL, True)
params["rmsprop_decay"] = train_val.get("rmsprop_decay", ATYPE_FLOAT, True)
params["rmsprop_momentum"] = train_val.get("rmsprop_momentum", ATYPE_FLOAT, True)
params["rmsprop_epsilon"] = train_val.get("rmsprop_epsilon", ATYPE_FLOAT, True)
params["reg_weight_decay"] = train_val.get("reg_weight_decay", [ATYPE_NONE, ATYPE_FLOAT], True)
params["cost_type"] = train_val.get("cost_type", ATYPE_STRING, True).lower()
params["max_grad_norm"] = train_val.get("max_grad_norm", [ATYPE_NONE, ATYPE_FLOAT], True)
params["parallel_grad_gate"] = train_val.get("parallel_grad_gate", ATYPE_BOOL, True)
return params
| 5,337,691
|
def bit_lshift(bin_name, bit_offset, bit_size, shift, policy=None):
"""Creates a bit_lshift_operation to be used with operate or operate_ordered.
Server left shifts bitmap starting at bit_offset for bit_size by shift bits.
No value is returned.
Args:
bin_name (str): The name of the bin containing the map.
bit_offset (int): The offset where the bits will start being shifted.
bit_size (int): The number of bits that will be shifted by shift places.
shift (int): How many bits to shift by.
policy (dict, optional): The bit_policy policy dictionary. See: See :ref:`aerospike_bit_policies`. default: None
Returns:
A dictionary usable in operate or operate_ordered. The format of the dictionary
should be considered an internal detail, and subject to change.
"""
return {
OP_KEY: aerospike.OP_BIT_LSHIFT,
BIN_KEY: bin_name,
BIT_OFFSET_KEY: bit_offset,
BIT_SIZE_KEY: bit_size,
VALUE_KEY: shift,
POLICY_KEY: policy
}
| 5,337,692
|
def compute_ccas(sigma_xx, sigma_xy, sigma_yx, sigma_yy, epsilon,
verbose=True):
"""Main cca computation function, takes in variances and crossvariances.
This function takes in the covariances and cross covariances of X, Y,
preprocesses them (removing small magnitudes) and outputs the raw results of
the cca computation, including cca directions in a rotated space, and the
cca correlation coefficient values.
Args:
sigma_xx: 2d numpy array, (num_neurons_x, num_neurons_x)
variance matrix for x
sigma_xy: 2d numpy array, (num_neurons_x, num_neurons_y)
crossvariance matrix for x,y
sigma_yx: 2d numpy array, (num_neurons_y, num_neurons_x)
crossvariance matrix for x,y (conj) transpose of sigma_xy
sigma_yy: 2d numpy array, (num_neurons_y, num_neurons_y)
variance matrix for y
epsilon: small float to help with stabilizing computations
verbose: boolean on whether to print intermediate outputs
Returns:
[ux, sx, vx]: [numpy 2d array, numpy 1d array, numpy 2d array]
ux and vx are (conj) transposes of each other, being
the canonical directions in the X subspace.
sx is the set of canonical correlation coefficients-
how well corresponding directions in vx, Vy correlate
with each other.
[uy, sy, vy]: Same as above, but for Y space
invsqrt_xx: Inverse square root of sigma_xx to transform canonical
directions back to original space
invsqrt_yy: Same as above but for sigma_yy
x_idxs: The indexes of the input sigma_xx that were pruned
by remove_small
y_idxs: Same as above but for sigma_yy
"""
(sigma_xx, sigma_xy, sigma_yx, sigma_yy,
x_idxs, y_idxs) = remove_small(sigma_xx, sigma_xy, sigma_yx, sigma_yy, epsilon)
numx = sigma_xx.shape[0]
numy = sigma_yy.shape[0]
if numx == 0 or numy == 0:
return ([0, 0, 0], [0, 0, 0], np.zeros_like(sigma_xx),
np.zeros_like(sigma_yy), x_idxs, y_idxs)
if verbose:
print("adding eps to diagonal and taking inverse")
sigma_xx += epsilon * np.eye(numx)
sigma_yy += epsilon * np.eye(numy)
inv_xx = np.linalg.pinv(sigma_xx)
inv_yy = np.linalg.pinv(sigma_yy)
if verbose:
print("taking square root")
invsqrt_xx = positivedef_matrix_sqrt(inv_xx)
invsqrt_yy = positivedef_matrix_sqrt(inv_yy)
if verbose:
print("dot products...")
arr = np.dot(invsqrt_xx, np.dot(sigma_xy, invsqrt_yy))
if verbose:
print("trying to take final svd")
u, s, v = np.linalg.svd(arr)
if verbose:
print("computed everything!")
return [u, np.abs(s), v], invsqrt_xx, invsqrt_yy, x_idxs, y_idxs
| 5,337,693
|
def vcf_entry_match(vcf_id, start, stop, target_vcf):
"""Returns whether the requested update entry matches the target VCF entry"""
pass
| 5,337,694
|
def generate_samples(
segment_mask: np.ndarray, num_of_samples: int = 64, p: float = 0.5
) -> np.ndarray:
"""Generate samples by randomly selecting a subset of the segments.
Parameters
----------
segment_mask : np.ndarray
The mask generated by `create_segments()`: An array of shape (image_width, image_height).
num_of_samples : int
The number of samples to generate.
p : float
The probability for each segment to be removed from a sample.
Returns
-------
samples : np.ndarray
A two-dimensional array of size (num_of_samples, num_of_segments).
"""
num_of_segments = int(np.max(segment_mask) + 1)
return np.random.binomial(n=1, p=p, size=(num_of_samples, num_of_segments))
| 5,337,695
|
def get_mix_bandpassed(bp_list, comp, param_dict_file=None,bandpass_shifts=None,
ccor_cen_nus=None, ccor_beams=None, ccor_exps = None,
normalize_cib=True,param_dict_override=None,bandpass_exps=None,nus_ghz=None,btrans=None,
dust_beta_param_name='beta_CIB',
radio_beta_param_name='beta_radio',
override_lbeam_bnus=None):
"""
Get mixing factors for a given component that have "color corrections" that account for
a non-delta-function bandpass and for possible variation of the beam within the bandpass.
If the latter is provided, the resulting output is of shape [Nfreqs,nells], otherwise
the output is of shape [Nfreqs,].
Parameters
----------
bp_list : list of strings
a list of strings of length Nfreqs where each string is the filename for a file
containing a specification of the bandpass for that frequency channel. For each
file, the first column is frequency in GHz and the second column is the transmission
whose overall normalization does not matter.
comp : string
a string specifying the component whose mixing is requested. Currently, the following are
supported (1) CMB or kSZ (considered identical, and always returns ones)
(2) tSZ (3) mu (4) rSZ (5) CIB (6) radio
param_dict_file : string, optional
filename of a YAML file used to create a dictionary of SED parameters and values
(only needed for some SEDs). If None, defaults to parameters specified in
input/fg_SEDs_default_params.yml.
bandpass_shifts : list of floats, optional
A list of floats of length [Nfreqs,] specifying how much in GHz to shift the
entire bandpass. Each value can be positive (shift right) or negative (shift left).
If None, no shift is applied and the bandpass specified in the files is used as is.
ccor_cen_nus : list of floats, optional
If not None, this indicates that the dependence of the beam on frequency with the
bandpass should be taken into account. ccor_cen_nus will then be interpreted as a
[Nfreqs,] length list of the "central frequencies" of each bandpass in GHz.
The provided beams in ccor_beams for each channel are then scaled by
(nu/nu_central)**ccor_exp where ccor_exp defaults to -1.
ccor_beams : list of array_like, optional
Only used if ccor_cen_nus is not None. In that mode, ccor_beams is interpreted as
an [Nfreqs,] length list where each element is a 1d numpy array specifying the
beam transmission starting from ell=0 and normalized to one at ell=0.
The provided beams for each channel are then scaled by
(nu/nu_central)**ccor_exp where ccor_exp defaults to -1 and nu_central is specified
through ccor_cen_nus. If any list element is None, no scale dependent color correction
is applied for that frequency channel. See get_scaled_beams for more information.
ccor_exps : list of floats, optional
Only used if ccor_cen_nus is not None. Defaults to -1 for each frequncy channel.
This controls how the beam specified in ccor_beams for the central frequencies
specified in ccor_cen_nus is scaled to other frequencies.
"""
if bandpass_shifts is not None and np.any(np.array(bandpass_shifts)!=0):
print("WARNING: shifted bandpasses provided.")
assert (comp is not None)
assert (bp_list is not None)
N_freqs = len(bp_list)
if ccor_cen_nus is not None:
assert len(ccor_cen_nus)==N_freqs
assert len(ccor_beams)==N_freqs
lmaxs = []
for i in range(N_freqs):
if ccor_beams[i] is not None:
assert ccor_beams[i].ndim==1
lmaxs.append( ccor_beams[i].size )
if len(lmaxs)==0:
ccor_cen_nus = None
shape = N_freqs
else:
lmax = max(lmaxs)
shape = (N_freqs,lmax)
if ccor_exps is None: ccor_exps = [-1]*N_freqs
elif override_lbeam_bnus is not None:
lbeam,bnus = override_lbeam_bnus
lmax = lbeam.size
shape = (N_freqs,lmax)
else:
shape = N_freqs
if (comp == 'CIB' or comp == 'rSZ' or comp == 'radio'):
if param_dict_file is None:
p = default_dict
else:
p = read_param_dict_from_yaml(param_dict_file)
if (comp == 'CMB' or comp == 'kSZ'): #CMB (or kSZ)
output = np.ones(shape) #this is unity by definition, since we're working in Delta T units [uK_CMB]; output ILC map will thus also be in uK_CMB
for i in range(N_freqs):
if(bp_list[i] == None): #this case is appropriate for HI or other maps that contain no CMB-relevant signals (and also no CIB); they're assumed to be denoted by None in bp_list
output[i] = 0.
return output
else:
output = np.zeros(shape)
for i,bp in enumerate(bp_list):
if (bp_list[i] is not None):
if nus_ghz is None:
nu_ghz, trans = np.loadtxt(bp, usecols=(0,1), unpack=True)
else:
nu_ghz = nus_ghz
trans = btrans
if bandpass_shifts is not None: nu_ghz = nu_ghz + bandpass_shifts[i]
if bandpass_exps is not None: trans = trans * nu_ghz**bandpass_exps[i]
lbeam = 1
bnus = 1
# It turns out scaling the beam is actually the slowest part of the calculation
# so we allow pre-calculated ones to be provided
if override_lbeam_bnus is not None:
lbeam,bnus = override_lbeam_bnus
else:
if ccor_cen_nus is not None:
if ccor_beams[i] is not None:
lbeam = ccor_beams[i]
ells = np.arange(lbeam.size)
cen_nu_ghz = ccor_cen_nus[i]
bnus = get_scaled_beams(ells,lbeam,cen_nu_ghz,nu_ghz,ccor_exp=ccor_exps[i]).swapaxes(0,1)
assert np.all(np.isfinite(bnus))
if (comp == 'tSZ' or comp == 'mu' or comp == 'rSZ'):
# Thermal SZ (y-type distortion) or mu-type distortion or relativistic tSZ
# following Sec. 3.2 of https://arxiv.org/pdf/1303.5070.pdf
# -- N.B. IMPORTANT TYPO IN THEIR EQ. 35 -- see https://www.aanda.org/articles/aa/pdf/2014/11/aa21531-13.pdf
mixs = get_mix(nu_ghz, comp,
param_dict_file=param_dict_file, param_dict_override=param_dict_override,
dust_beta_param_name=dust_beta_param_name,radio_beta_param_name=radio_beta_param_name)
val = np.trapz(trans * dBnudT(nu_ghz) * bnus * mixs, nu_ghz) / np.trapz(trans * dBnudT(nu_ghz), nu_ghz) / lbeam
# this is the response at each frequency channel in uK_CMB for a signal with y=1 (or mu=1)
elif (comp == 'CIB'):
# following Sec. 3.2 of https://arxiv.org/pdf/1303.5070.pdf
# -- N.B. IMPORTANT TYPO IN THEIR EQ. 35 -- see https://www.aanda.org/articles/aa/pdf/2014/11/aa21531-13.pdf
# CIB SED parameter choices in dict file: Tdust_CIB [K], beta_CIB, nu0_CIB [GHz]
# N.B. overall amplitude is not meaningful here; output ILC map (if you tried to preserve this component) would not be in sensible units
mixs = get_mix(nu_ghz, 'CIB_Jysr',
param_dict_file=param_dict_file, param_dict_override=param_dict_override,
dust_beta_param_name=dust_beta_param_name,radio_beta_param_name=radio_beta_param_name)
vnorm = np.trapz(trans * dBnudT(nu_ghz), nu_ghz)
val = (np.trapz(trans * mixs * bnus , nu_ghz) / vnorm) / lbeam
# N.B. this expression follows from Eqs. 32 and 35 of
# https://www.aanda.org/articles/aa/pdf/2014/11/aa21531-13.pdf ,
# and then noting that one also needs to first rescale the CIB emission
# in Jy/sr from nu0_CIB to the "nominal frequency" nu_c that appears in
# those equations (i.e., multiply by get_mix(nu_c, 'CIB_Jysr')).
# The resulting cancellation leaves this simple expression which has no dependence on nu_c.
elif (comp == 'radio'):
# same logic/formalism as used for CIB component immediately above this
# radio SED parameter choices in dict file: beta_radio, nu0_radio [GHz]
mixs = get_mix(nu_ghz, 'radio_Jysr',
param_dict_file=param_dict_file, param_dict_override=param_dict_override,
dust_beta_param_name=dust_beta_param_name,radio_beta_param_name=radio_beta_param_name)
val = (np.trapz(trans * mixs * bnus , nu_ghz) / np.trapz(trans * dBnudT(nu_ghz), nu_ghz)) / lbeam
else:
print("unknown component specified")
raise NotImplementedError
if (ccor_cen_nus is not None) and (ccor_beams[i] is not None): val[lbeam==0] = 0
output[i] = val
assert np.all(np.isfinite(val))
elif (bp_list[i] is None):
#this case is appropriate for HI or other maps that contain no CMB-relevant signals (and also no CIB); they're assumed to be denoted by None in bp_list
output[i] = 0.
if (comp == 'CIB' or comp == 'radio') and normalize_cib:
#overall amplitude not meaningful, so divide by max to get numbers of order unity;
# output gives the relative conversion between CIB (or radio) at different frequencies, for maps in uK_CMB
omax = output.max(axis=0)
ret = output / omax
if (ccor_cen_nus is not None): ret[:,omax==0] = 0
else:
ret = output
assert np.all(np.isfinite(ret))
return ret
| 5,337,696
|
def try_again() -> None:
""" Prompts the user to retry some process or exit program """
while True:
value = input("Enter 't' to try again or 'e' to exit. ").lower()
if value == 't': return
if value == 'e': sys.exit(0)
| 5,337,697
|
def partition(data, label_name, ratio):
""" Partitions data set according to a provided ratio.
params:
data - The data set in a pandas data frame
label_name - the name of the collumn in the data set that contains the labels
ratio - the training/total data ratio
returns:
training_data - The data set to train on
training_labels - Indexed labels for training set
testing_data - The data set to test on
testing_labels - The data set to test on """
data = data.loc[np.random.permutation(data.index)]
partition_idx = int(data.shape[0] * ratio)
train, test = np.split(data, [partition_idx])
def splitDataLabels(data):
"""Separates labels from data."""
labels = data[label_name].to_frame()
data = data.drop(columns = [label_name])
return data , labels
train_data, train_label = splitDataLabels(train)
test_data, test_label = splitDataLabels(test)
return train_data, train_label, test_data, test_label
| 5,337,698
|
def find_background2(data, mask, channels, apertureset_lst,
method='poly', scale='linear', scan_step=200,
xorder=2, yorder=2, maxiter=5, upper_clip=3, lower_clip=3,
extend=True, display=True, fig_file=None, reg_file=None):
"""Subtract the background for an input FITS image.
Args:
data (:class:`numpy.ndarray`): Input data image.
mask (:class:`numpy.ndarray`): Mask of input data image.
channels (list): List of channels as strings.
apertureset_lst (dict): Dict of :class:`~edrs.echelle.trace.ApertureSet`
at different channels.
method (str): Method of finding background light.
scale (str): Scale of the 2-D polynomial fitting. If 'log', fit the
polynomial in the logrithm scale.
scan_step (int): Steps of scan in pixels.
xorder (int): Order of 2D polynomial along the main dispersion
direction (only applicable if **method** = "poly").
yorder (int): Order of 2D polynomial along the cross-dispersion
direction (only applicable if **method** = "poly").
maxiter (int): Maximum number of iteration of 2D polynomial fitting
(only applicable if **method** = "poly").
upper_clip (float): Upper sigma clipping threshold (only applicable if
**method** = "poly").
lower_clip (float): Lower sigma clipping threshold (only applicable if
**method** = "poly").
extend (bool): Extend the grid to the whole CCD image if *True*.
display (bool): Display figures on the screen if *True*.
fig_file (str): Name of the output figure. No image file generated if
*None*.
reg_file (string): Name of the output DS9 region file. No file generated
if *None*.
Returns:
:class:`numpy.ndarray`: Image of background light.
"""
plot = (display or fig_file is not None)
plot_paper_fig = False
h, w = data.shape
meddata = median_filter(data, size=(3,3), mode='reflect')
xnodes, ynodes, znodes = [], [], []
# find the minimum and maximum aperture number
min_aper = min([min(apertureset_lst[ch].keys()) for ch in channels])
max_aper = max([max(apertureset_lst[ch].keys()) for ch in channels])
# generate the horizontal scan list
x_lst = np.arange(0, w-1, scan_step)
# add the last column to the list
if x_lst[-1] != w-1:
x_lst = np.append(x_lst, w-1)
# find intra-order pixels
_message_lst = ['Column, N (between), N (extend), N (removed), N (total)']
for x in x_lst:
xsection = meddata[:,x]
inter_aper = []
prev_newy = None
# loop for every aperture
for aper in range(min_aper, max_aper+1):
# for a new aperture, initialize the count of channel
count_channel = 0
for ich, channel in enumerate(channels):
# check every channel in this frame
if aper in apertureset_lst[channel]:
count_channel += 1
this_newy = apertureset_lst[channel][aper].position(x)
if count_channel == 1 and prev_newy is not None:
# this channel is the first channel in this aperture and
# there is a previous y
mid_newy = (prev_newy + this_newy)//2
i1 = min(h-1, max(0, int(prev_newy)))
i2 = min(h-1, max(0, int(this_newy)))
#if len(inter_aper)==0 or \
# abs(mid_newy - inter_aper[-1])>scan_step*0.7:
# if i2-i1>0:
if i2-i1>0:
mid_newy = i1 + xsection[i1:i2].argmin()
inter_aper.append(mid_newy)
prev_newy = this_newy
inter_aper = np.array(inter_aper)
# count how many nodes found between detected orders
n_nodes_inter = inter_aper.size
# if extend = True, expand the grid with polynomial fitting to
# cover the whole CCD area
n_nodes_extend = 0
if extend:
if x==2304:
_fig = plt.figure(dpi=150)
_ax = _fig.gca()
for _x in inter_aper:
_ax.axvline(x=_x,color='g', ls='--',lw=0.5, alpha=0.6)
_ax.plot(data[:, x],'b-',lw=0.5)
_fig2 = plt.figure(dpi=150)
_ax2 = _fig2.gca()
print(inter_aper)
coeff = np.polyfit(np.arange(inter_aper.size), inter_aper, deg=3)
if x== 2304:
_ax2.plot(np.arange(inter_aper.size), inter_aper,'go', alpha=0.6)
_newx = np.arange(0, inter_aper.size, 0.1)
_ax2.plot(_newx, np.polyval(coeff, _newx),'g-')
# find the points after the end of inter_aper
ii = inter_aper.size-1
new_y = inter_aper[-1]
while(new_y<h-1):
ii += 1
new_y = int(np.polyval(coeff,ii))
inter_aper = np.append(inter_aper,new_y)
n_nodes_extend += 1
# find the points before the beginning of order_mid
ii = 0
new_y = inter_aper[0]
while(new_y>0):
ii -= 1
new_y = int(np.polyval(coeff,ii))
inter_aper = np.insert(inter_aper,0,new_y)
n_nodes_extend += 1
if x==2304:
#for _x in np.polyval(coeff, np.arange(0,25)):
# _ax.axvline(x=_x, color='r',ls='--',lw=0.5)
#_newx = np.arange(0, 25)
#_ax2.plot(_newx, np.polyval(coeff, _newx), 'ro', alpha=0.6)
plt.show()
# remove those points with y<0 or y>h-1
m1 = inter_aper > 0
m2 = inter_aper < h-1
inter_aper = inter_aper[np.nonzero(m1*m2)[0]]
# filter those masked pixels
m = mask[inter_aper, x]==0
inter_aper = inter_aper[m]
# remove backward points
tmp = np.insert(inter_aper,0,0.)
m = np.diff(tmp)>0
inter_aper = inter_aper[np.nonzero(m)[0]]
# count how many nodes removed
n_nodes_removed = (n_nodes_inter + n_nodes_extend) - inter_aper.size
# pack infos into message list
_message_lst.append('| %6d | %6d | %6d | %6d | %6d |'%(
x, n_nodes_inter, n_nodes_extend, n_nodes_removed, inter_aper.size))
# pack all nodes
for y in inter_aper:
xnodes.append(x)
ynodes.append(y)
znodes.append(meddata[y,x])
# extrapolate
#if extrapolate:
if False:
_y0, _y1 = inter_aper[0], inter_aper[1]
newy = _y0 - (_y1 - _y0)
newz = meddata[_y0, x] - (meddata[_y1, x] - meddata[_y0, x])
xnodes.append(x)
ynodes.append(newy)
znodes.append(newz)
_y1, _y2 = inter_aper[-2], inter_aper[-1]
newy = _y2 + (_y2 - _y1)
newz = meddata[_y2, x] + (meddata[_y2, x] - meddata[_y1, x])
xnodes.append(x)
ynodes.append(newy)
znodes.append(newz)
# convert to numpy array
xnodes = np.array(xnodes)
ynodes = np.array(ynodes)
znodes = np.array(znodes)
# write to running log
_message_lst.append('Total: %4d'%xnodes.size)
logger.info((os.linesep+' ').join(_message_lst))
# if scale='log', filter the negative values
if scale=='log':
pmask = znodes > 0
znodes[~pmask] = znodes[pmask].min()
znodes = np.log10(znodes)
if plot:
# initialize figures
fig = plt.figure(figsize=(10,10), dpi=150)
ax11 = fig.add_axes([0.07, 0.54, 0.39, 0.39])
ax12 = fig.add_axes([0.52, 0.54, 0.39, 0.39])
ax13 = fig.add_axes([0.94, 0.54, 0.015, 0.39])
ax21 = fig.add_axes([0.07, 0.07, 0.39, 0.39], projection='3d')
ax22 = fig.add_axes([0.52, 0.07, 0.39, 0.39], projection='3d')
fig.suptitle('Background')
ax11.imshow(data, cmap='gray')
# plot nodes
for ax in [ax11, ax12]:
ax.set_xlim(0,w-1)
ax.set_ylim(h-1,0)
ax.set_xlabel('X (pixel)', fontsize=10)
ax.set_ylabel('Y (pixel)', fontsize=10)
for ax in [ax21, ax22]:
ax.set_xlim(0,w-1)
ax.set_ylim(0,h-1)
ax.set_xlabel('X (pixel)', fontsize=10)
ax.set_ylabel('Y (pixel)', fontsize=10)
for ax in [ax11, ax12]:
for tick in ax.xaxis.get_major_ticks():
tick.label1.set_fontsize(9)
for tick in ax.yaxis.get_major_ticks():
tick.label1.set_fontsize(9)
for ax in [ax21, ax22]:
for tick in ax.xaxis.get_major_ticks():
tick.label1.set_fontsize(9)
for tick in ax.yaxis.get_major_ticks():
tick.label1.set_fontsize(9)
for tick in ax.zaxis.get_major_ticks():
tick.label1.set_fontsize(9)
if display:
plt.show(block=False)
# plot the figure used in paper
if plot_paper_fig:
figp1 = plt.figure(figsize=(6,6), dpi=150)
axp1 = figp1.add_axes([0.00, 0.05, 1.00, 0.95], projection='3d')
figp2 = plt.figure(figsize=(6.5,6), dpi=150)
axp2 = figp2.add_axes([0.12, 0.1, 0.84, 0.86])
if method=='poly':
background_data, fitmask = fit_background(data.shape,
xnodes, ynodes, znodes, xorder=xorder, yorder=yorder,
maxiter=maxiter, upper_clip=upper_clip, lower_clip=lower_clip)
elif method=='interp':
background_data, fitmask = interpolate_background(data.shape,
xnodes, ynodes, znodes)
else:
print('Unknown method: %s'%method)
m = (ynodes >= 0)*(ynodes <= h-1)
xnodes = xnodes[m]
ynodes = ynodes[m]
znodes = znodes[m]
fitmask = fitmask[m]
if scale=='log':
background_data = np.power(10, background_data)
# save nodes to DS9 region file
if reg_file is not None:
outfile = open(reg_file, 'w')
outfile.write('# Region file format: DS9 version 4.1'+os.linesep)
outfile.write('global color=green dashlist=8 3 width=1 ')
outfile.write('font="helvetica 10 normal roman" select=1 highlite=1 ')
outfile.write('dash=0 fixed=0 edit=1 move=1 delete=1 include=1 ')
outfile.write('source=1'+os.linesep)
outfile.write('physical'+os.linesep)
for x, y in zip(xnodes, ynodes):
text = ('point(%4d %4d) # point=circle'%(x+1, y+1))
outfile.write(text+os.linesep)
outfile.close()
# write nodes to running log
message = ['Background Nodes:', ' x, y, value, mask']
for x,y,z,m in zip(xnodes, ynodes, znodes, fitmask):
message.append('| %4d | %4d | %+10.8e | %1d |'%(x,y,z,m))
logger.info((os.linesep+' '*4).join(message))
residual = znodes - background_data[ynodes, xnodes]
if plot:
# prepare for plotting the fitted surface with a loose grid
yy, xx = np.meshgrid(np.linspace(0,h-1,32), np.linspace(0,w-1,32))
yy = np.int16(np.round(yy))
xx = np.int16(np.round(xx))
zz = background_data[yy, xx]
# plot 2d fitting in a 3-D axis in fig2
# plot the linear fitting
ax21.set_title('Background fitting (%s Z)'%scale, fontsize=10)
ax22.set_title('residuals (%s Z)'%scale, fontsize=10)
ax21.plot_surface(xx, yy, zz, rstride=1, cstride=1, cmap='jet',
linewidth=0, antialiased=True, alpha=0.5)
ax21.scatter(xnodes[fitmask], ynodes[fitmask], znodes[fitmask],
color='C0', linewidth=0)
ax22.scatter(xnodes[fitmask], ynodes[fitmask], residual[fitmask],
color='C0', linewidth=0)
if (~fitmask).sum()>0:
ax21.scatter(xnodes[~fitmask], ynodes[~fitmask], znodes[~fitmask],
color='none', edgecolor='C0', linewidth=1)
ax22.scatter(xnodes[~fitmask], ynodes[~fitmask], residual[~fitmask],
color='none', edgecolor='C0', linewidth=1)
# plot the logrithm fitting in another fig
#if scale=='log':
# ax23.plot_surface(xx, yy, log_zz, rstride=1, cstride=1, cmap='jet',
# linewidth=0, antialiased=True, alpha=0.5)
# ax23.scatter(xnodes[fitmask], ynodes[fitmask], zfit[fitmask], linewidth=0)
# ax24.scatter(xnodes[fitmask], ynodes[fitmask], log_residual[fitmask], linewidth=0)
for ax in [ax21, ax22]:
ax.xaxis.set_major_locator(tck.MultipleLocator(500))
ax.xaxis.set_minor_locator(tck.MultipleLocator(100))
ax.yaxis.set_major_locator(tck.MultipleLocator(500))
ax.yaxis.set_minor_locator(tck.MultipleLocator(100))
if display: fig.canvas.draw()
# plot figure for paper
if plot_paper_fig:
axp1.plot_surface(xx, yy, zz, rstride=1, cstride=1, cmap='jet',
linewidth=0, antialiased=True, alpha=0.5)
axp1.scatter(xnodes[fitmask], ynodes[fitmask], znodes[fitmask], linewidth=0)
axp1.xaxis.set_major_locator(tck.MultipleLocator(500))
axp1.xaxis.set_minor_locator(tck.MultipleLocator(100))
axp1.yaxis.set_major_locator(tck.MultipleLocator(500))
axp1.yaxis.set_minor_locator(tck.MultipleLocator(100))
axp1.set_xlim(0, w-1)
axp1.set_ylim(0, h-1)
axp1.set_xlabel('X')
axp1.set_ylabel('Y')
axp1.set_zlabel('Count')
if plot:
# plot the accepted nodes in subfig 1
ax11.scatter(xnodes[fitmask], ynodes[fitmask],
c='r', s=6, linewidth=0, alpha=0.8)
# plot the rejected nodes
if (~fitmask).sum()>0:
ax11.scatter(xnodes[~fitmask], ynodes[~fitmask],
c='none', s=6, edgecolor='r', linewidth=0.5)
# plot subfig 2
cnorm = colors.Normalize(vmin = background_data.min(),
vmax = background_data.max())
scalarmap = cmap.ScalarMappable(norm=cnorm, cmap=cmap.jet)
# plot the background light
image = ax12.imshow(background_data, cmap=scalarmap.get_cmap())
# plot the accepted nodes
ax12.scatter(xnodes[fitmask], ynodes[fitmask],
c='k', s=6, linewidth=0.5)
# plot the rejected nodes
if (~fitmask).sum()>0:
ax12.scatter(xnodes[~fitmask], ynodes[~fitmask],
c='none', s=6, edgecolor='k', linewidth=0.5)
# set colorbar
plt.colorbar(image, cax=ax13)
# set font size of colorbar
for tick in ax13.get_yaxis().get_major_ticks():
tick.label2.set_fontsize(9)
if display: fig.canvas.draw()
# plot for figure in paper
if plot_paper_fig:
pmask = data>0
logdata = np.zeros_like(data)-1
logdata[pmask] = np.log(data[pmask])
axp2.imshow(logdata, cmap='gray')
axp2.scatter(xnodes, ynodes, c='b', s=8, linewidth=0, alpha=0.8)
cs = axp2.contour(background_data, linewidth=1, cmap='jet')
axp2.clabel(cs, inline=1, fontsize=11, fmt='%d', use_clabeltext=True)
axp2.set_xlim(0, w-1)
axp2.set_ylim(h-1, 0)
axp2.set_xlabel('X')
axp2.set_ylabel('Y')
figp1.savefig('fig_background1.png')
figp2.savefig('fig_background2.png')
figp1.savefig('fig_background1.pdf')
figp2.savefig('fig_background2.pdf')
plt.close(figp1)
plt.close(figp2)
if fig_file is not None:
fig.savefig(fig_file)
plt.close(fig)
return background_data
| 5,337,699
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.