content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
def overlap(a, b):
"""check if two intervals overlap.
Positional arguments:
a -- First interval.
b -- Second interval.
"""
return a[1] > b[0] and a[0] < b[1]
|
88860f46a94eb53f1d6f636211916dd828c83550
| 3,645,100
|
import warnings
import re
def contAvg_headpos(condition, method='median', folder=[], summary=False):
"""
Calculate average transformation from dewar to head coordinates, based
on the continous head position estimated from MaxFilter
Parameters
----------
condition : str
String containing part of common filename, e.g. "task" for files
task-1.fif, task-2.fif, etc. Consistent naiming of files is mandatory!
method : str
How to calculate "average, "mean" or "median" (default = "median")
folder : str
Path to input files. Default = current dir.
Returns
-------
MNE-Python transform object
4x4 transformation matrix
"""
# Check that the method works
method = method.lower()
if method not in ['median','mean']:
raise RuntimeError('Wrong method. Must be either \"mean\" or "median"!')
if not condition:
raise RuntimeError('You must provide a conditon!')
# Get and set folders
if not folder:
rawdir = getcwd() # [!] Match up with bash script !
else:
rawdir = folder
print(rawdir)
quatdir = op.join(rawdir,'quat_files')
mean_trans_folder = op.join(rawdir, 'trans_files')
if not op.exists(mean_trans_folder): # Make sure output folder exists
mkdir(mean_trans_folder)
mean_trans_file = op.join(mean_trans_folder, condition+'-trans.fif')
if op.isfile(mean_trans_file):
warnings.warn('N"%s\" already exists is %s. Delete if you want to rerun' % (mean_trans_file, mean_trans_folder), RuntimeWarning)
return
# Change to subject dir
files2combine = find_condition_files(quatdir, condition)
files2combine.sort()
if not files2combine:
raise RuntimeError('No files called \"%s\" found in %s' % (condition, quatdir))
allfiles = []
for ff in files2combine:
fl = ff.split('_')[0]
tmplist = [f for f in listdir(quatdir) if fl in f and '_quat' in f]
#Fix order
if len(tmplist) > 1:
tmplist.sort()
if any("-" in f for f in tmplist):
firstfile = tmplist[-1] # The file without a number will always be last!
tmpfs = sorted(tmplist[:-1], key=lambda a: int(re.split('-|.fif', a)[-2]) ) # Assuming consistent naming!!!
tmplist[0] = firstfile
tmplist[1:] = tmpfs
allfiles = allfiles + tmplist
if len(allfiles) > 1:
print('Files used for average head pos:')
for ib in range(len(allfiles)):
print('{:d}: {:s}'.format(ib + 1, allfiles[ib]))
else:
print('Will find average head pos in %s' % files2combine)
# LOAD DATA
# raw = read_raw_fif(op.join(quatdir,firstfile), preload=True, allow_maxshield=True, verbose=False).pick_types(meg=False, chpi=True)
# Use files2combine instead of allfiles as MNE will find split files automatically.
for idx, ffs in enumerate(files2combine):
if idx == 0:
raw = read_raw_fif(op.join(quatdir,ffs), preload=True, allow_maxshield=True).pick_types(meg=False, chpi=True)
else:
raw.append(read_raw_fif(op.join(quatdir,ffs), preload=True, allow_maxshield=True).pick_types(meg=False, chpi=True))
quat, times = raw.get_data(return_times=True)
gof = quat[6,] # Godness of fit channel
# fs = raw.info['sfreq']
# In case "record raw" started before "cHPI"
if np.any(gof < 0.98):
begsam = np.argmax(gof>0.98)
raw.crop(tmin=raw.times[begsam])
quat = quat[:,begsam:].copy()
times = times[begsam:].copy()
# Make summaries
if summary:
plot_movement(quat, times, dirname=rawdir, identifier=condition)
total_dist_moved(quat, times, write=True, dirname=rawdir, identifier=condition)
# Get continous transformation
print('Reading transformation. This will take a while...')
H = np.empty([4,4,len(times)]) # Initiate transforms
init_rot_angles = np.empty([len(times),3])
for i,t in enumerate(times):
Hi = np.eye(4,4)
Hi[0:3,3] = quat[3:6,i].copy()
Hi[:3,:3] = quat_to_rot(quat[0:3,i])
init_rot_angles[i,:] = rotation_angles(Hi[:3,:3])
assert(np.sum(Hi[-1]) == 1.0) # sanity check result
H[:,:,i] = Hi.copy()
if method in ["mean"]:
H_mean = np.mean(H, axis=2) # stack, then average over new dim
mean_rot_xfm = rotation3d(*tuple(np.mean(init_rot_angles, axis=0))) # stack, then average, then make new xfm
elif method in ["median"]:
H_mean = np.median(H, axis=2) # stack, then average over new dim
mean_rot_xfm = rotation3d(*tuple(np.median(init_rot_angles, axis=0))) # stack, then average, then make new xfm
H_mean[:3,:3] = mean_rot_xfm
assert(np.sum(H_mean[-1]) == 1.0) # sanity check result
# Create the mean structure and save as .fif
mean_trans = raw.info['dev_head_t'] # use the last info as a template
mean_trans['trans'] = H_mean.copy()
# Write file
write_trans(mean_trans_file, mean_trans)
print("Wrote "+mean_trans_file)
return mean_trans
|
42b65ce84f7b303575113ca2d57f5b61841f8294
| 3,645,101
|
from . import __path__ as path
from numpy import fromfile
from os.path import join
from os import listdir
from ..IO.output import printError
def getconstantfunc(name, **kwargs):
"""Get constants from file by name."""
path = path[0]
if not name in listdir(path):
printError("File {0} not exists.".format(name))
raise ValueError("File {0} not exists.".format(name))
temp = fromfile(join(path, name))
return temp
|
ae5279587c9d292d4f2cb0aa9480169ee386cc99
| 3,645,102
|
def scalar_prod_logp0pw_beta_basis_npf(pw, p0, DV, alpha):
"""
From normalized p_fact
Args:
pw: a batch of probabilities (row:word, column:chi)
DV: centered statistics (for p0, to be consistent)
p0: the central probability on which tangent space to project (row vector)
alpha: the value of alpha
Returns:
scalar product between Logmaps of each point in the batch and the basis of the tangent space
.. math:: \left< \Log^{(\alpha)_{p_0} p_w}, \beta_i^{(\alpha)} \right>_{\mathbb{R}^n_{(\alpha)}}
"""
p_fact_normalized, l_scale = get_norm_p_fact(p0, pw, alpha)
ldv_alpha = np.matmul(p_fact_normalized, DV)
return ldv_alpha, l_scale
|
be63a3bcd791be4ee116a1e9abbd10a455e10bfc
| 3,645,103
|
from typing import List
from typing import Tuple
from datetime import datetime
from bs4 import BeautifulSoup
def get_seminars() -> List[Tuple[str, str, datetime, str]]:
"""
Returns summary information for upcoming ITEE seminars, comprising
seminar date, seminar title, venue, and an information link.
"""
html = BeautifulSoup(get_seminar_summary_page(), 'html.parser')
summary_table = html.find('table', summary='ITEE Seminar List')
if (summary_table is None) or (summary_table.tbody is None):
# When no seminars are scheduled, no table is shown.
return []
seminar_rows = summary_table.tbody.find_all('tr')
seminar_summaries = map(get_seminar_summary, seminar_rows)
return list(seminar_summaries)
|
c1f149fa1625492f60b81b7c987f839c9d14abf3
| 3,645,104
|
def cluster_hierarchically(active_sites,num_clusters=7):
"""
Cluster the given set of ActiveSite instances using a hierarchical algorithm.
Input: a list of ActiveSite instances
(OPTIONAL): number of clusters (default 7)
Output: a list of clusterings
(each clustering is a list of lists of ActiveSite instances)
"""
labels = centroid_linkage(active_sites,num_clusters)
clustering = []
for clust in np.unique(labels):
clustering.append([active_sites[ind] for ind,val in enumerate(labels.tolist() )if val==clust])
return clustering
|
e7ca3f8a9a098630a51944ae870b1beb730684dd
| 3,645,105
|
def insert(table: _DMLTableArgument) -> Insert:
"""Construct an :class:`_expression.Insert` object.
E.g.::
from sqlalchemy import insert
stmt = (
insert(user_table).
values(name='username', fullname='Full Username')
)
Similar functionality is available via the
:meth:`_expression.TableClause.insert` method on
:class:`_schema.Table`.
.. seealso::
:ref:`tutorial_core_insert` - in the :ref:`unified_tutorial`
:param table: :class:`_expression.TableClause`
which is the subject of the
insert.
:param values: collection of values to be inserted; see
:meth:`_expression.Insert.values`
for a description of allowed formats here.
Can be omitted entirely; a :class:`_expression.Insert` construct
will also dynamically render the VALUES clause at execution time
based on the parameters passed to :meth:`_engine.Connection.execute`.
:param inline: if True, no attempt will be made to retrieve the
SQL-generated default values to be provided within the statement;
in particular,
this allows SQL expressions to be rendered 'inline' within the
statement without the need to pre-execute them beforehand; for
backends that support "returning", this turns off the "implicit
returning" feature for the statement.
If both :paramref:`_expression.Insert.values` and compile-time bind
parameters are present, the compile-time bind parameters override the
information specified within :paramref:`_expression.Insert.values` on a
per-key basis.
The keys within :paramref:`_expression.Insert.values` can be either
:class:`~sqlalchemy.schema.Column` objects or their string
identifiers. Each key may reference one of:
* a literal data value (i.e. string, number, etc.);
* a Column object;
* a SELECT statement.
If a ``SELECT`` statement is specified which references this
``INSERT`` statement's table, the statement will be correlated
against the ``INSERT`` statement.
.. seealso::
:ref:`tutorial_core_insert` - in the :ref:`unified_tutorial`
"""
return Insert(table)
|
70bd0accf66e72d6240f7cd586fc47c927e109a6
| 3,645,106
|
def parallelize_window_generation_imap(passed_args, procs=None):
"""Produce window files, in a parallel fashion
This method calls the get_win function as many times as sets of arguments
specified in passed_args. starmap is used to pass the list of arguments to
each invocation of get_win. The pool is created with either the number of
provided processors, or half the number of the available processors (be
kind, don't allocate everything).
Parameters
----------
passed_args : list
A list of lists, each one containing all the arguments to pass to an
invocation of the get_win function.
procs : int
The number of processors to use. Defaulted to None, will use half of
the available cores.
Returns
-------
list
A list containing the paths of all the results from the get_win calls.
"""
pool = mp.Pool(procs or int(mp.cpu_count() / 2))
results = pool.starmap(get_win, passed_args)
pool.close()
pool.join()
return results
|
74cf5c8138ad1511c9aeae62a3f14c705a3735b5
| 3,645,107
|
def build_train(q_func, ob_space, ac_space, optimizer, sess, grad_norm_clipping=None,
scope="deepq", reuse=None, full_tensorboard_log=False):
"""
Creates the train function:
:param q_func: (DQNPolicy) the policy
:param ob_space: (Gym Space) The observation space of the environment
:param ac_space: (Gym Space) The action space of the environment
:param reuse: (bool) whether or not to reuse the graph variables
:param optimizer: (tf.train.Optimizer) optimizer to use for the Q-learning objective.
:param sess: (TensorFlow session) The current TensorFlow session
:param grad_norm_clipping: (float) clip gradient norms to this value. If None no clipping is performed.
:param scope: (str or VariableScope) optional scope for variable_scope.
:param reuse: (bool) whether or not the variables should be reused. To be able to reuse the scope must be given.
:param full_tensorboard_log: (bool) enable additional logging when using tensorboard
WARNING: this logging can take a lot of space quickly
:return: (tuple)
act
train: (function (Any, numpy float, numpy float, Any, numpy bool, numpy float): numpy float)
optimize the error in Bellman's equation. See the top of the file for details.
step_model: (DQNPolicy) Policy for evaluation
"""
n_actions = ac_space.nvec if isinstance(ac_space, MultiDiscrete) else ac_space.n
with tf.variable_scope(scope, reuse=reuse):
policy = q_func(sess, ob_space, ac_space, 1, 1, None)
act = build_act(policy)
q_func_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=tf.get_variable_scope().name + "/model")
with tf.variable_scope("loss", reuse=reuse):
# set up placeholders
act_t_ph = tf.placeholder(tf.int32, [None], name="action")
rew_t_ph = tf.placeholder(tf.float32, [None], name="reward")
act_mask = tf.one_hot(act_t_ph, n_actions)
labels = tf.nn.relu(tf.math.sign(rew_t_ph))
dist = tf.nn.softmax(policy.q_values)
pred = tf.reduce_sum(dist * act_mask, axis=1)
loss = tf.math.reduce_mean(tf.keras.losses.binary_crossentropy(y_true=labels, y_pred=pred))
tf.summary.scalar("loss", loss)
# compute optimization op (potentially with gradient clipping)
gradients = optimizer.compute_gradients(loss, var_list=q_func_vars)
if grad_norm_clipping is not None:
for i, (grad, var) in enumerate(gradients):
if grad is not None:
gradients[i] = (tf.clip_by_norm(grad, grad_norm_clipping), var)
with tf.variable_scope("input_info", reuse=False):
tf.summary.scalar('rewards', tf.reduce_mean(rew_t_ph))
if full_tensorboard_log:
tf.summary.histogram('rewards', rew_t_ph)
optimize_expr = optimizer.apply_gradients(gradients)
summary = tf.summary.merge_all()
# Create callable functions
_train = tf_util.function(
inputs=[
policy._obs_ph,
act_t_ph,
rew_t_ph,
policy.targets_ph,
policy.train_ph
],
outputs=summary,
updates=[optimize_expr]
)
def train(obses, actions, rewards, targets, **kwargs):
return _train(obses, actions, rewards, targets, True, **kwargs)
return act, train, policy
|
395899bdb0497db5f56d42bd968ad07117cf0126
| 3,645,108
|
def _make_global_var_name(element):
"""creates a global name for the MAP element"""
if element.tag != XML_map:
raise ValueError('Expected element <%s> for variable name definition, found <%s>' % (XML_map,element.tag))
base_name = _get_attrib_or_None(element,XML_attr_name)
if base_name is None:
# a MAP element needs to have a name
raise ValueError('Element <%s> is missing attribute "%s".' % (XML_map,XML_attr_name))
# walk up in the hirachy until we find the group element
total_group_name = ''
while True:
element = element.find('..')
if element is None:
# no group element --> we could raise an exception
if total_group_name =='': total_group_name = 'NO_GROUP'
break
if element.tag == XML_group:
group_name = _get_attrib_or_None(element,XML_attr_name)
if group_name is None:
# perfectly legal case
group_name = 'EMPTY_GROUP_NAME'
total_group_name = group_name+'_'+total_group_name
#... and keep looking in case there are nested groups
h=str(hash(total_group_name+base_name)) # we calculate the hash in case somebody uses names in non-ASCII characters only
if h[0]=='-': h='M'+h[1:]
name= '_VAR_'+total_group_name+base_name+'_'+h
return _sanitized_var_name(name)
|
7a4c26ae3791ea0543a2690a61651604e727abf6
| 3,645,109
|
def list_user_images(user_id):
"""
Given a user_id, returns a list of Image objects scoped to that user.
:param user_id: str user identifier
:return: List of Image (messsage) objects
"""
db = get_session()
try:
imgs = [msg_mapper.db_to_msg(i).to_dict() for i in db.query(Image).filter(Image.user_id == user_id).all()]
finally:
db.close()
return imgs
|
5a00ccae4fdea9417f26158d9042cc62a56e1013
| 3,645,110
|
def is_lower_cased_megatron(pretrained_model_name):
"""
Returns if the megatron is cased or uncased
Args:
pretrained_model_name (str): pretrained model name
Returns:
do_lower_cased (bool): whether the model uses lower cased data
"""
return MEGATRON_CONFIG_MAP[pretrained_model_name]["do_lower_case"]
|
6ff0954a35e19144e99aa0bdbbbeddc7542aad9d
| 3,645,111
|
import subprocess
import json
def bright(args, return_data=True):
"""
Executes CA Brightside with arguments of this function. The response is returned as Python data structures.
Parameter ``return_data`` is by default ``True`` and it caused to return only the data section without metadata.
Metadata are processed automatically and if they mean that commands was not successful and ``BrightCallError`` exception
is raised.
Example:
jobs = bright("zos-jobs list jobs")
# jobs is equal to:
[{'class': 'A',
'files-url': 'https://ca32.ca.com:1443/zosmf/restjobs/jobs/J0038667USILCA11D4B949F2.......%3A/files',
'job-correlator': 'J0038667USILCA11D4B949F2.......:',
'jobid': 'JOB38667',
'jobname': 'PLAPALLC',
'owner': 'PLAPE03',
'phase': 20,
'phase-name': 'Job is on the hard copy queue',
'retcode': 'SEC ERROR',
'status': 'OUTPUT',
'subsystem': 'JES2',
'type': 'JOB',
'url': 'https://ca32.ca.com:1443/zosmf/restjobs/jobs/J0038667USILCA11D4B949F2.......%3A'}]
"""
if not isinstance(args, str):
args = subprocess.list2cmdline(args)
command = f"bright --rfj {args}"
try:
j, cp = _call_command_and_parse_json(command)
if j is None:
return None
if not j.get("success"):
be = BrightCallError(cp.returncode, command,
args, output=cp.stdout, stderr=cp.stderr)
be.errors = j.get("errors")
be.message = j.get("message")
else:
if "data" in j and return_data:
return j["data"]
return j
except CalledProcessError as e:
log.debug("error: %s, output=%s" % (repr(e), e.output))
if e.stderr:
raise BrightCallError(e.returncode, e.cmd,
args, output=e.output, stderr=e.stderr)
else:
j = json.loads(e.output)
be = BrightCallError(e.returncode, e.cmd, args, output=j.get(
"stdout"), stderr=j.get("stderr"))
be.errors = j.get("errors")
be.message = j.get("message")
raise be
|
cbc2824a2f78893fc4a5a38db95760a214c058d1
| 3,645,112
|
def decipher(string, key, a2i_dict, i2a_dict):
"""
This function is BASED on https://github.com/jameslyons/pycipher
"""
key = [k.upper() for k in key]
ret = ''
for (i, c) in enumerate(string):
i = i % len(key)
ret += i2a_dict[(a2i_dict[c] - a2i_dict[key[i]]) % len(a2i_dict)]
return ret
|
a414892f8ccf18ab5d3189b662b284939c931382
| 3,645,113
|
def keep_samples_from_pcoa_data(headers, coords, sample_ids):
"""Controller function to filter coordinates data according to a list
Parameters
----------
headers : list, str
list of sample identifiers, if used for jackknifed data, this
should be a list of lists containing the sample identifiers
coords : numpy.ndarray
2-D numpy array with the float data in the coordinates, if used for
jackknifed data, coords should be a list of 2-D numpy arrays
sample_ids : list, str
list of sample ids that should be kept
Returns
-------
out_headers : list, str
list of headers
out_coords : list, np.array
list of coordinates
"""
# if the coords are a list then it means that the input jackknifed
if type(coords) == list:
out_coords, out_headers = [], []
for single_headers, single_coords in zip(headers, coords):
a, b = filter_samples_from_coords(single_headers, single_coords,
sample_ids)
out_headers.append(a)
out_coords.append(b)
return out_headers, out_coords
else:
out_headers, out_coords = filter_samples_from_coords(headers,
coords,
sample_ids)
return out_headers, out_coords
|
c33eaf6c593fcebc09e98ec479ca0505c52424c8
| 3,645,114
|
import platform
def get_default_command() -> str:
"""get_default_command returns a command to execute the default output of g++ or clang++. The value is basically `./a.out`, but `.\a.exe` on Windows.
The type of return values must be `str` and must not be `pathlib.Path`, because the strings `./a.out` and `a.out` are different as commands but same as a path.
"""
if platform.system() == 'Windows':
return r'.\a.exe'
return './a.out'
|
d06abdefab189f9c69cba70d9dab25ce83bebc75
| 3,645,115
|
from skymaps import Band
def roi_circle(roi_index, galactic=True, radius=5.0):
""" return (lon,lat,radius) tuple for given nside=12 position
"""
sdir = Band(12).dir(roi_index)
return (sdir.l(),sdir.b(), radius) if galactic else (sdir.ra(),sdir.dec(), radius)
|
303c288749e3bd9ee63c95e79ef5460468d2cea0
| 3,645,116
|
from typing import List
def find_by_user_defined_key(user_defined_key: str) -> List[models.BBoundingBoxDTO]:
"""Get a list of bounding boxes by a user-defined key."""
res_json = BoundingBoxes.get('query/userdefinedkey/{}'.format(user_defined_key))
return list(map(models.BBoundingBoxDTO.from_dict, res_json))
|
a60137fdc43b04c1404eaeae021f61381c39c761
| 3,645,117
|
def object_type(r_name):
"""
Derives an object type (i.e. ``user``) from a resource name (i.e. ``users``)
:param r_name:
Resource name, i.e. would be ``users`` for the resource index URL
``https://api.pagerduty.com/users``
:returns: The object type name; usually the ``type`` property of an instance
of the given resource.
:rtype: str
"""
if r_name.endswith('ies'):
# Because English
return r_name[:-3]+'y'
else:
return r_name.rstrip('s')
|
b74e373691edf8a8b78c2a3ff5d7b9666504330a
| 3,645,118
|
import traceback
def create_comment(args, global_var):
"""
创建一条新评论 (每天只能允许创建最多 1000 条评论)
-------------
关于 uuid:
1. raw_str 就用 data["content"]
2. 由于生成的 comment_id 格式中有中划线, 很奇怪, 所以建议删掉: uuid = "-".join(uuid)
"""
can_create = hit_daily_comment_creation_threshold(global_var)
if not can_create:
res = {
"code": "FAILURE",
"message": "Hit max daily comment creation threshold, please try to comment tomorrow."
}
return res
data = args["data"]
db = global_var["db"]
comment_id = create_uuid(raw_str=data["content"])
comment_id = "".join(comment_id.split("-"))
try:
current_user_name = get_jwt_identity()
# 根据 current_user_name 找到 user_id
user_query = db.session.query(User.user_id).filter(User.name == current_user_name).first()
current_user_id = user_query.user_id
record = Comment(
comment_id=comment_id,
content=data["content"],
creator_user_id=getValueWithDefault(data, "creator_user_id", current_user_id)
)
db.session.add(record)
db.session.commit()
res = {
"code": "SUCCESS",
"data": {"id": record.id,
"creator": current_user_name}
}
global_var["today_already_created_comment_count"][1] += 1
except Exception as e:
res = {
"code": "FAILURE",
"message": traceback.format_exc()
}
return res
|
d8596febf2a020357654291952e447f175f2fe20
| 3,645,119
|
from datetime import datetime
import pprint
def get_aggregated_metrics(expr: ExperimentResource):
"""
Get aggregated metrics using experiment resource and metric resources.
"""
versions = [expr.spec.versionInfo.baseline]
if expr.spec.versionInfo.candidates is not None:
versions += expr.spec.versionInfo.candidates
# messages not working as intended...
messages = []
# initialize aggregated metrics object
iam = get_builtin_metrics(expr)
# check if start time is greater than now
# this is problematic.... start time is set by etc3 but checked by analytics.
# clocks are not synced, so this is not right...
if expr.status.startTime > (datetime.now(timezone.utc)):
messages.append(Message(MessageLevel.ERROR, "Invalid startTime: greater than current time"))
iam.message = Message.join_messages(messages)
return iam
# there are no metrics to be fetched
if expr.status.metrics is None:
iam.message = Message.join_messages(messages)
return iam
for metric_info in expr.status.metrics:
# only custom metrics is handled below... not builtin metrics
if metric_info.metricObj.spec.provider is None or \
metric_info.metricObj.spec.provider != "iter8":
iam.data[metric_info.name] = AggregatedMetric(data = {})
# fetch the metric value for each version...
for version in versions:
# initialize metric object for this version...
iam.data[metric_info.name].data[version.name] = VersionMetric()
val, err = get_metric_value(metric_info.metricObj, version, \
expr.status.startTime)
if err is None and val is not None:
iam.data[metric_info.name].data[version.name].value = val
else:
try:
val = float(expr.status.analysis.aggregated_metrics.data\
[metric_info.name].data[version.name].value)
except AttributeError:
val = None
iam.data[metric_info.name].data[version.name].value = val
if err is not None:
messages.append(Message(MessageLevel.ERROR, \
f"Error from metrics backend for metric: {metric_info.name} \
and version: {version.name}"))
iam.message = Message.join_messages(messages)
logger.debug("Analysis object after metrics collection")
logger.debug(pprint.PrettyPrinter().pformat(iam))
return iam
|
047f6aac3f2662f73133fb3f4be6716d2e070035
| 3,645,120
|
from fairseq.models.bart import BARTModel
import torch
def get_bart(folder_path, checkpoint_file):
"""
Returns a pretrained BART model.
Args:
folder_path: str, path to BART's model, containing the checkpoint.
checkpoint_file: str, name of BART's checkpoint file (starting from BART's folder).
"""
bart = BARTModel.from_pretrained(model_name_or_path=folder_path + '/',
checkpoint_file=checkpoint_file)
if torch.cuda.is_available():
bart.cuda()
print("Using BART on GPU...")
bart.eval()
print("BART loaded (in evaluation mode).\n")
return bart
|
504a242d24946761f2a880db66e1955752b89677
| 3,645,121
|
import os
def construct_s3_raw_data_path(study_id, filename):
""" S3 file paths for chunks are of this form:
RAW_DATA/study_id/user_id/data_type/time_bin.csv """
return os.path.join(RAW_DATA_FOLDER, study_id, filename)
|
d8ed6753947b055e88fc762135f434b5cb7eb7c7
| 3,645,122
|
def _mixed_s2(x, filters, name=None):
"""Utility function to implement the 'stride-2' mixed block.
# Arguments
x: input tensor.
filters: a list of filter sizes.
name: name of the ops
# Returns
Output tensor after applying the 'stride-2' mixed block.
"""
if len(filters) != 2:
raise ValueError('filters should have 2 components')
name1 = name + '_3x3' if name else None
branch3x3 = _depthwise_conv2d_bn(x, filters[0],
kernel_size=(3, 3),
strides=(2, 2),
name=name1)
name1 = name + '_5x5' if name else None
branch5x5 = _depthwise_conv2d_bn(x, filters[1],
kernel_size=(5, 5),
strides=(2, 2),
name=name1)
name1 = name + '_pool' if name else None
branchpool = layers.MaxPooling2D(pool_size=(3, 3), padding='same',
strides=(2, 2), name=name1)(x)
concat_axis = 1 if backend.image_data_format() == 'channels_first' else 3
x = layers.concatenate([branch3x3, branch5x5, branchpool],
axis=concat_axis,
name=name)
return x
|
b4260c1b2a9e5c38a6c48baab611ac2e0d73c888
| 3,645,123
|
from typing import Union
from typing import Type
def get_convertor(cls: Union[Type, str]) -> Convertor:
"""Returns Convertor for data type.
Arguments:
cls: Type or type name. The name could be simple class name, or full name that includes
the module name.
Note:
When `cls` is a type name:
1. If class name is NOT registered via `register_class()`, it's not possible to perform
lookup for bases classes.
2. If simple class name is provided and multiple classes of the same name but from
different modules have registered convertors, the first one found is used. If you
want to avoid this situation, use full names.
Raises:
TypeError: If there is no convertor for `cls` or any from its bases classes.
"""
if (conv := _get_convertor(cls)) is None:
raise TypeError(f"Type '{cls.__name__ if isinstance(cls, type) else cls}' has no Convertor")
return conv
|
c70ce0a032efc031de67fc965a6984779dd635e7
| 3,645,124
|
def _is_mchedr(filename):
"""
Checks whether a file format is mchedr
(machine-readable Earthquake Data Report).
:type filename: str
:param filename: Name of the mchedr file to be checked.
:rtype: bool
:return: ``True`` if mchedr file.
.. rubric:: Example
>>> _is_mchedr('/path/to/mchedr.dat') # doctest: +SKIP
True
"""
if not isinstance(filename, (str, native_str)):
return False
with open(filename, 'rb') as fh:
for line in fh.readlines():
# skip blank lines at beginning, if any
if line.strip() == b'':
continue
# first record has to be 'HY':
if line[0:2] == b'HY':
return True
else:
return False
|
f8d5521cfd6ebcdf490af41877e865fb185f0f7c
| 3,645,125
|
import math
def ddr3_8x8_profiling(
trace_file=None,
word_sz_bytes=1,
page_bits=8192, # number of bits for a dram page/row
min_addr_word=0,
max_addr_word=100000
):
"""
this code takes non-stalling dram trace and reorganizes the trace to meet the bandwidth requirement.
currently, it does not generate new trace, which can be improved later.
all default values are for ddr3, the output values are used by cacti "main memory" type
in this model, the burst behavior in the dram is not modeled, as such the reported cycle count will be larger, i.e., a pessimistic estimation
"""
# output list
tot_word = 0
max_word = 0
tot_access = 0
tot_row_access = 0
act_cycles = 0
shift_cycles = 0
ideal_start_cycle = 0
ideal_end_cycle = 0
bank=8 # number of banks in a chip. banks can be interleaved to reduce access latency. not modelled for simplicity.
burst=8 # number of bytes for a single bank row and col address, and burst is sequential. not modelled for simplicity.
prefetch=8 # number of prefetches/chips, with each chip referring to 1 prefetch. prefetch is parallel
io_bits=8 # number of bits provided by all chips, with each chip providing io_bits/prefectch bits, each 8 bit provided by a single bank in the chip
# number of words per page
page_byte = page_bits / 8
# per cycle ddr bandwidth in word
io_byte = io_bits / 8
requests = open(trace_file, 'r')
# applied address mapping: row + bank + col + chip
# this mapping is just for modeling, and actual implementation can be different
# for default ddr3 setting, 14-b row + 3-b bank + 10-b col + 3-b chip
# more info about ddr3 can be found here: http://mermaja.act.uji.es/docencia/is37/data/DDR3.pdf page 15
# parallel prefetch via chip has higher priority than sequential burst in a bank
# prefetch_buf format (row idx, col idx, chip idx)
# consecutive addresses are transmitted using prefetech instead of burst, as they are from the same bank but different chips
# bank interleaving is not simulated here, as they will not incur high access overhead
prefetch_buf_new = []
prefetch_buf_old = []
current_prefetch = []
first = True
for entry in requests:
elems = entry.strip().split(',')
elems = prune(elems)
elems = [float(x) for x in elems]
valid_word = 0
if first == True:
first = False
ideal_start_cycle = elems[0]
ideal_end_cycle = elems[0]
prefetch_buf_new = []
# memory row index and col index generation inside a chip
for e in range(1, len(elems)): # each element here is a word
# only count legal address
if (elems[e] >= min_addr_word) and (elems[e] < max_addr_word):
# get the byte addr of the element, as dram is byte addressable
elem_addr_byte = math.floor((elems[e] - min_addr_word) * word_sz_bytes)
# this row index contain both row and bank in the address
row_idx = math.floor(elem_addr_byte / page_byte)
# col idx inside a chip
col_idx = math.floor((elem_addr_byte % page_byte) / prefetch)
# chip index
chip_idx = math.floor(elem_addr_byte % prefetch)
prefetch_buf_new.append((row_idx, col_idx, chip_idx))
valid_word += 1
act_cycles += (len(prefetch_buf_new) > 0)
# add addresses for multi-byte word
tmp_prefetch_buf = list(prefetch_buf_new)
for w in range(math.ceil(word_sz_bytes) - 1):
for (x, y, z) in tmp_prefetch_buf:
# get the byte addr of the element, as dram is byte addressable
elem_addr_byte = x * page_byte + y * prefetch + z + (w + 1)
# this row index contain both row and bank in the address
row_idx = math.floor(elem_addr_byte / page_byte)
# col idx inside a chip
col_idx = math.floor((elem_addr_byte % page_byte) / prefetch)
# chip index
chip_idx = math.floor(elem_addr_byte % prefetch)
prefetch_buf_new.append((row_idx, col_idx, chip_idx))
tot_word += valid_word
if max_word < valid_word:
max_word = valid_word
# merge the repeated accesses in byte granularity
prefetch_buf_new = list(set(prefetch_buf_new))
new_access = 0
# update the prefetch start addr
prefetch_row_col_new = list(set([(x, y) for (x, y, z) in prefetch_buf_new]))
prefetch_row_col_old = list(set([(x, y) for (x, y, z) in prefetch_buf_old]))
for (x, y) in prefetch_row_col_new:
# a new start address for prefetch
if (x, y) not in prefetch_row_col_old:
start_chip = 1000000
for (i, j, k) in prefetch_buf_new:
if x == i and j == y and k < start_chip:
# add a new prefetch
start_chip = k
current_prefetch.append((x, y))
# each prefetch means an access
new_access += 1
tot_access += new_access
for (x, y) in prefetch_row_col_old:
if (x, y) not in prefetch_row_col_new:
# remove a prefetch if it's not used anymore
current_prefetch.remove((x, y))
# print(current_prefetch)
# only new row accesses from the last load are counted, as old are already buffered
new_row_access = 0
# only different blocks are accessed, old accesses are buffered already and required no access
prefetch_row_new = list(set([x for (x, y, z) in prefetch_buf_new]))
prefetch_row_old = list(set([x for (x, y, z) in prefetch_buf_old]))
for a in range(len(prefetch_row_new)):
if prefetch_row_new[a] not in prefetch_row_old:
new_row_access += 1
tot_row_access += new_row_access
prefetch_buf_old = prefetch_buf_new
# divided by two because of ddr
shift_cycles = max((math.ceil(tot_access / 2) - act_cycles), 0 )
requests.close()
return tot_word, max_word, tot_access, tot_row_access, act_cycles, shift_cycles, ideal_start_cycle, ideal_end_cycle
|
6a3369a79ae77e6f984b5250e17a7e79393e18d2
| 3,645,126
|
def get_sites_by_latlon(latlon, filter_date='', **kwargs):
"""Gets list of sites from BETYdb, filtered by a contained point.
latlon (tuple) -- only sites that contain this point will be returned
filter_date -- YYYY-MM-DD to filter sites to specific experiment by date
"""
latlon_api_arg = "%s,%s" % (latlon[0], latlon[1])
return get_sites(filter_date=filter_date, containing=latlon_api_arg, **kwargs)
|
40c9bcbd8884d287a8bbdf233376f27c6c2d041e
| 3,645,127
|
import logging
def get_context(book, chapter, pharse):
"""
Given book, chapter, and pharse number, return the bible context.
"""
try:
context = repository['{} {}:{}'.format(book, chapter, pharse)]
return context
except KeyError:
bookname = bookid2chinese[book]
pharse_name = '{}{}:{}'.format(bookname, chapter, pharse)
logging.warning('Cannot find this pharse:' + pharse_name)
raise KeyError('Cannot find this pharse')
|
a9d261a780b57db06af921c11001786107886b68
| 3,645,128
|
def find_cocotb_base (path = "", debug = False):
"""
Find Cocotb base directory in the normal installation path. If the user
specifies a location it attempts to find cocotb in that directory. This
function failes quietly because most people will probably not use cocotb
on the full design so it's not a big deal if it fails
Args:
path (string): Path to cocotb base if cocotb is not installed in the
default location(can be left blank)
Returns:
(String): Path to cocotb on the local machine, returns an empty string
if none is found
Raises: Nothing
"""
#Normally cocotb is installed (on Linux) at
if os.name == "posix":
if len(path) == 0:
path = DEFAULT_POSIX_COCOTB
else:
raise CocotbError("Error, Windows and Mac are not supported for " +
"cocotb utils")
dirs = os.listdir(path)
if debug: print "Look for directory"
if debug: print "path: %s" % path
for s in dirs:
if "cocotb" in s:
path = os.path.join(path, s)
if debug: print "Found: %s" % path
return path
raise CocotbWarning("Did not find Cocotb in %s" % path)
|
41bdc488a82017730785b1cad8b1f1925a82bbb7
| 3,645,129
|
import os
def read_dicom_volume(dcm_path):
"""
This function reads all dicom volumes in a folder as a volume.
"""
dcm_files = [ f for f in os.listdir(dcm_path) if f.endswith('.dcm')]
dcm_files = ns.natsorted(dcm_files, alg=ns.IGNORECASE)
Z = len(dcm_files)
reference = dicom.read_file(os.path.join(dcm_path,dcm_files[0]))
H,W = reference.pixel_array.shape
type = reference.pixel_array.dtype
volume = np.zeros((H,W,Z), dtype = type)
for (ii,dcm_slice) in enumerate(dcm_files):
volume[:,:,ii] = dicom.read_file(os.path.join(dcm_path,dcm_files[ii])).pixel_array
return volume
|
f93526bf0228c6f75638915bb1933b484bc23e13
| 3,645,130
|
from typing import OrderedDict
def _compare_namelists(gold_namelists, comp_namelists, case):
###############################################################################
"""
Compare two namelists. Print diff information if any.
Returns comments
Note there will only be comments if the namelists were not an exact match
Expect args in form: {namelist -> {key -> value} }.
value can be an int, string, list, or dict
>>> teststr = '''&nml
... val = 'foo'
... aval = 'one','two', 'three'
... maval = 'one', 'two', 'three', 'four'
... dval = 'one -> two', 'three -> four'
... mdval = 'one -> two', 'three -> four', 'five -> six'
... nval = 1850
... /
... &nml2
... val2 = .false.
... /
... '''
>>> _compare_namelists(_parse_namelists(teststr.splitlines(), 'foo'), _parse_namelists(teststr.splitlines(), 'bar'), None)
''
>>> teststr1 = '''&nml1
... val11 = 'foo'
... /
... &nml2
... val21 = 'foo'
... val22 = 'foo', 'bar', 'baz'
... val23 = 'baz'
... val24 = '1 -> 2', '2 -> 3', '3 -> 4'
... /
... &nml3
... val3 = .false.
... /'''
>>> teststr2 = '''&nml01
... val11 = 'foo'
... /
... &nml2
... val21 = 'foo0'
... val22 = 'foo', 'bar0', 'baz'
... val230 = 'baz'
... val24 = '1 -> 20', '2 -> 3', '30 -> 4'
... /
... &nml3
... val3 = .false.
... /'''
>>> comments = _compare_namelists(_parse_namelists(teststr1.splitlines(), 'foo'), _parse_namelists(teststr2.splitlines(), 'bar'), None)
>>> print(comments)
Missing namelist: nml1
Differences in namelist 'nml2':
BASE: val21 = 'foo'
COMP: val21 = 'foo0'
BASE: val22 list item 1 = 'bar'
COMP: val22 list item 1 = 'bar0'
missing variable: 'val23'
BASE: val24 dict item 1 = 2
COMP: val24 dict item 1 = 20
dict variable 'val24' missing key 3 with value 4
dict variable 'val24' has extra key 30 with value 4
found extra variable: 'val230'
Found extra namelist: nml01
<BLANKLINE>
>>> teststr1 = '''&rad_cnst_nl
... icecldoptics = 'mitchell'
... logfile = 'cpl.log.150514-001533'
... case_name = 'ERB.f19_g16.B1850C5.sandiatoss3_intel.C.150513-230221'
... runid = 'FOO'
... model_version = 'cam5_3_36'
... username = 'jgfouca'
... iceopticsfile = '/projects/ccsm/inputdata/atm/cam/physprops/iceoptics_c080917.nc'
... liqcldoptics = 'gammadist'
... liqopticsfile = '/projects/ccsm/inputdata/atm/cam/physprops/F_nwvl200_mu20_lam50_res64_t298_c080428.nc'
... mode_defs = 'mam3_mode1:accum:=', 'A:num_a1:N:num_c1:num_mr:+',
... 'A:so4_a1:N:so4_c1:sulfate:/projects/ccsm/inputdata/atm/cam/physprops/sulfate_rrtmg_c080918.nc:+', 'A:pom_a1:N:pom_c1:p-organic:/projects/ccsm/inputdata/atm/cam/physprops/ocpho_rrtmg_c101112.nc:+',
... 'A:soa_a1:N:soa_c1:s-organic:/projects/ccsm/inputdata/atm/cam/physprops/ocphi_rrtmg_c100508.nc:+', 'A:bc_a1:N:bc_c1:black-c:/projects/ccsm/inputdata/atm/cam/physprops/bcpho_rrtmg_c100508.nc:+',
... 'A:dst_a1:N:dst_c1:dust:/projects/ccsm/inputdata/atm/cam/physprops/dust4_rrtmg_c090521.nc:+', 'A:ncl_a1:N:ncl_c1:seasalt:/projects/ccsm/inputdata/atm/cam/physprops/ssam_rrtmg_c100508.nc',
... 'mam3_mode2:aitken:=', 'A:num_a2:N:num_c2:num_mr:+',
... 'A:so4_a2:N:so4_c2:sulfate:/projects/ccsm/inputdata/atm/cam/physprops/sulfate_rrtmg_c080918.nc:+', 'A:soa_a2:N:soa_c2:s-organic:/projects/ccsm/inputdata/atm/cam/physprops/ocphi_rrtmg_c100508.nc:+',
... 'A:ncl_a2:N:ncl_c2:seasalt:/projects/ccsm/inputdata/atm/cam/physprops/ssam_rrtmg_c100508.nc', 'mam3_mode3:coarse:=',
... 'A:num_a3:N:num_c3:num_mr:+', 'A:dst_a3:N:dst_c3:dust:/projects/ccsm/inputdata/atm/cam/physprops/dust4_rrtmg_c090521.nc:+',
... 'A:ncl_a3:N:ncl_c3:seasalt:/projects/ccsm/inputdata/atm/cam/physprops/ssam_rrtmg_c100508.nc:+', 'A:so4_a3:N:so4_c3:sulfate:/projects/ccsm/inputdata/atm/cam/physprops/sulfate_rrtmg_c080918.nc'
... rad_climate = 'A:Q:H2O', 'N:O2:O2', 'N:CO2:CO2',
... 'N:ozone:O3', 'N:N2O:N2O', 'N:CH4:CH4',
... 'N:CFC11:CFC11', 'N:CFC12:CFC12', 'M:mam3_mode1:/projects/ccsm/inputdata/atm/cam/physprops/mam3_mode1_rrtmg_c110318.nc',
... 'M:mam3_mode2:/projects/ccsm/inputdata/atm/cam/physprops/mam3_mode2_rrtmg_c110318.nc', 'M:mam3_mode3:/projects/ccsm/inputdata/atm/cam/physprops/mam3_mode3_rrtmg_c110318.nc'
... /'''
>>> teststr2 = '''&rad_cnst_nl
... icecldoptics = 'mitchell'
... logfile = 'cpl.log.150514-2398745'
... case_name = 'ERB.f19_g16.B1850C5.sandiatoss3_intel.C.150513-1274213'
... runid = 'BAR'
... model_version = 'cam5_3_36'
... username = 'hudson'
... iceopticsfile = '/something/else/inputdata/atm/cam/physprops/iceoptics_c080917.nc'
... liqcldoptics = 'gammadist'
... liqopticsfile = '/something/else/inputdata/atm/cam/physprops/F_nwvl200_mu20_lam50_res64_t298_c080428.nc'
... mode_defs = 'mam3_mode1:accum:=', 'A:num_a1:N:num_c1:num_mr:+',
... 'A:so4_a1:N:so4_c1:sulfate:/something/else/inputdata/atm/cam/physprops/sulfate_rrtmg_c080918.nc:+', 'A:pom_a1:N:pom_c1:p-organic:/something/else/inputdata/atm/cam/physprops/ocpho_rrtmg_c101112.nc:+',
... 'A:soa_a1:N:soa_c1:s-organic:/something/else/inputdata/atm/cam/physprops/ocphi_rrtmg_c100508.nc:+', 'A:bc_a1:N:bc_c1:black-c:/something/else/inputdata/atm/cam/physprops/bcpho_rrtmg_c100508.nc:+',
... 'A:dst_a1:N:dst_c1:dust:/something/else/inputdata/atm/cam/physprops/dust4_rrtmg_c090521.nc:+', 'A:ncl_a1:N:ncl_c1:seasalt:/something/else/inputdata/atm/cam/physprops/ssam_rrtmg_c100508.nc',
... 'mam3_mode2:aitken:=', 'A:num_a2:N:num_c2:num_mr:+',
... 'A:so4_a2:N:so4_c2:sulfate:/something/else/inputdata/atm/cam/physprops/sulfate_rrtmg_c080918.nc:+', 'A:soa_a2:N:soa_c2:s-organic:/something/else/inputdata/atm/cam/physprops/ocphi_rrtmg_c100508.nc:+',
... 'A:ncl_a2:N:ncl_c2:seasalt:/something/else/inputdata/atm/cam/physprops/ssam_rrtmg_c100508.nc', 'mam3_mode3:coarse:=',
... 'A:num_a3:N:num_c3:num_mr:+', 'A:dst_a3:N:dst_c3:dust:/something/else/inputdata/atm/cam/physprops/dust4_rrtmg_c090521.nc:+',
... 'A:ncl_a3:N:ncl_c3:seasalt:/something/else/inputdata/atm/cam/physprops/ssam_rrtmg_c100508.nc:+', 'A:so4_a3:N:so4_c3:sulfate:/something/else/inputdata/atm/cam/physprops/sulfate_rrtmg_c080918.nc'
... rad_climate = 'A:Q:H2O', 'N:O2:O2', 'N:CO2:CO2',
... 'N:ozone:O3', 'N:N2O:N2O', 'N:CH4:CH4',
... 'N:CFC11:CFC11', 'N:CFC12:CFC12', 'M:mam3_mode1:/something/else/inputdata/atm/cam/physprops/mam3_mode1_rrtmg_c110318.nc',
... 'M:mam3_mode2:/something/else/inputdata/atm/cam/physprops/mam3_mode2_rrtmg_c110318.nc', 'M:mam3_mode3:/something/else/inputdata/atm/cam/physprops/mam3_mode3_rrtmg_c110318.nc'
... /'''
>>> _compare_namelists(_parse_namelists(teststr1.splitlines(), 'foo'), _parse_namelists(teststr2.splitlines(), 'bar'), 'ERB.f19_g16.B1850C5.sandiatoss3_intel')
''
"""
different_namelists = OrderedDict()
for namelist, gold_names in gold_namelists.items():
if (namelist not in comp_namelists):
different_namelists[namelist] = ["Missing namelist: {}\n".format(namelist)]
else:
comp_names = comp_namelists[namelist]
for name, gold_value in gold_names.items():
if (name not in comp_names):
different_namelists.setdefault(namelist, []).append(" missing variable: '{}'\n".format(name))
else:
comp_value = comp_names[name]
comments = _compare_values(name, gold_value, comp_value, case)
if comments != "":
different_namelists.setdefault(namelist, []).append(comments)
for name in comp_names:
if (name not in gold_names):
different_namelists.setdefault(namelist, []).append(" found extra variable: '{}'\n".format(name))
for namelist in comp_namelists:
if (namelist not in gold_namelists):
different_namelists[namelist] = ["Found extra namelist: {}\n".format(namelist)]
comments = ""
for namelist, nlcomment in different_namelists.items():
if len(nlcomment) == 1:
comments += nlcomment[0]
else:
comments += "Differences in namelist '{}':\n".format(namelist)
comments += "".join(nlcomment)
return comments
|
7a4b8fc1fa23f54bf1015c2cfd7b4b05da33b5b4
| 3,645,131
|
def directMultiCreate( data, cfg_params='', *, dtype='',
doInfo = True, doScatter = True, doAbsorption = True ):
"""Convenience function which creates Info, Scatter, and Absorption objects
directly from a data string rather than an on-disk or in-memory
file. Such usage obviously precludes proper caching behind the scenes,
and is intended for scenarios where the same data should not be used
repeatedly.
"""
if not dtype and not data.startswith('NCMAT') and 'NCMAT' in data:
if data.strip().startswith('NCMAT'):
raise NCBadInput('NCMAT data must have "NCMAT" as the first 5 characters (must not be preceded by whitespace)')
rawi,raws,rawa = _rawfct['multicreate_direct'](data,dtype,cfg_params,doInfo,doScatter,doAbsorption)
info = Info( ('_rawobj_',rawi) ) if rawi else None
scatter = Scatter( ('_rawobj_',raws) ) if raws else None
absorption = Absorption( ('_rawobj_',rawa) ) if rawa else None
class MultiCreated:
def __init__(self,i,s,a):
self.__i,self.__s,self.__a = i,s,a
@property
def info(self):
"""Info object (None if not present)."""
return self.__i
@property
def scatter(self):
"""Scatter object (None if not present)."""
return self.__s
@property
def absorption(self):
"""Absorption object (None if not present)."""
return self.__a
def __str__(self):
fmt = lambda x : str(x) if x else 'n/a'
return 'MultiCreated(Info=%s, Scatter=%s, Absorption=%s)'%(fmt(self.__i),
fmt(self.__s),
fmt(self.__a))
return MultiCreated(info,scatter,absorption)
|
1fe6262c0f59c836d1edcc7555b30f3182ceb2e0
| 3,645,132
|
def write_date_json(date: str, df: DataFrame) -> str:
""" Just here so we can log in the list comprehension """
file_name = f"pmg_reporting_data_{date}.json"
print(f"Writing file {file_name}")
df.to_json(file_name, orient="records", date_format="iso")
print(f"{file_name} written")
return file_name
|
7af4d44559b28c92f2df409cb59abd441f29dde5
| 3,645,133
|
def fit_composite_peak(bands, intensities, locs, num_peaks=2, max_iter=10,
fit_kinds=('lorentzian', 'gaussian'), log_fn=print,
band_resolution=1):
"""Fit several peaks to a single spectral feature.
locs : sequence of float
Contains num_peaks peak-location guesses,
or a single feature-location guess.
fit_kinds : sequence of str
Specifies all the peak types that the composite may be made of.
Not all fit_kinds are guaranteed to appear in the final composite fit.
See fit_single_peak for details about the other arguments.
"""
# deal with bad scaling
intensities, scale = _scale_spectrum(intensities)
# get the appropriate function(s) to fit
fit_funcs = {k: _get_peak_function(k, None, False) for k in fit_kinds}
# find reasonable approximations for initial parameters: (loc, area, fwhm)
if len(locs) == num_peaks:
loc_guesses = locs
elif len(locs) == 1:
loc_guesses = np.linspace(locs[0]-band_resolution, locs[0]+band_resolution,
num_peaks)
else:
raise ValueError('Number of locs (%d) != number of peaks (%d)' % (
len(locs), num_peaks))
mean_loc = np.mean(locs)
area_guess = _guess_area(bands, intensities, mean_loc) / num_peaks
fwhm_guess = 2 * band_resolution / num_peaks
init_params = (tuple(loc_guesses) +
(area_guess,) * num_peaks +
(fwhm_guess,) * num_peaks)
loc_idx = slice(0, num_peaks)
# try all combinations of peaks, use the one that matches best
combs = []
for fit_keys in combinations_with_replacement(fit_funcs, num_peaks):
label = '+'.join(fit_keys)
fit_func = _combine_peak_functions([fit_funcs[k] for k in fit_keys])
params, pstd = _weighted_curve_fit(
bands, intensities, mean_loc, fit_func, init_params,
max_iter=max_iter, log_fn=log_fn, log_label=label,
band_resolution=band_resolution, loc_idx=loc_idx)
mask, peak_x, peak_y = _select_top99(bands, fit_func, params)
residual = np.linalg.norm(peak_y - intensities[mask])
log_fn('composite %s residual: %g' % (label, residual))
combs.append((residual, fit_keys, fit_func, params, pstd,
mask, peak_x, peak_y))
residual, fit_keys, fit_func, params, pstd, mask, peak_x, peak_y = min(combs)
# Calculate peak info, with original scaling
peak_data = dict(xmin=float(peak_x[0]), xmax=float(peak_x[-1]),
fit_kinds=fit_keys, height=[], center=[], area=[], fwhm=[],
center_std=[], area_std=[], fwhm_std=[])
peak_ys = [peak_y * scale]
for i, k in enumerate(fit_keys):
fn = fit_funcs[k]
loc, area, fwhm = map(float, params[i::num_peaks])
loc_std, area_std, fwhm_std = map(float, pstd[i::num_peaks])
peak_ys.append(fn(peak_x, loc, area, fwhm) * scale)
height = float(fn(loc, loc, area, fwhm))
peak_data['height'].append(height * scale)
peak_data['center'].append(loc)
peak_data['center_std'].append(loc_std)
peak_data['area'].append(area * scale)
peak_data['area_std'].append(area_std * scale)
peak_data['fwhm'].append(fwhm)
peak_data['fwhm_std'].append(fwhm_std)
peak_y *= scale
return mask, peak_ys, peak_data
|
b6cffef481fb158cf019831db716fd14ca8d6a86
| 3,645,134
|
def param():
"""
Create a generic Parameter object with generic name, description and no value defined
"""
return parameter.Parameter("generic_param",template_units.kg_s,"A generic param")
|
1062351f477405a8be66ea1b3e10ae1f8a6403c7
| 3,645,135
|
def extract_filter(filter_path):
"""Given a path to the weka's filter file,
return a list of selected features."""
with open(filepath) as f:
lnum = 0
for line in f:
lnum += 1 #pointer to the next line to read
if line.strip().startswith('Selected attributes:'):
print "next line to read: ",lnum
break
features = []
for line in f: #keep reading from where we stopped (from 'break' point)
# if len(line.strip()) != 0:
features.append(line.strip())
return features
|
cc8e91ac268cdeb4d9a0e4c72eeb50659342cda6
| 3,645,136
|
def convert_to_roman_numeral(number_to_convert):
"""
Converts Hindi/Arabic (decimal) integers to Roman Numerals.
Args:
param1: Hindi/Arabic (decimal) integer.
Returns:
Roman Numeral, or an empty string for zero.
"""
arabic_numbers = (1000, 900, 500, 400, 100, 90, 50, 40, 10, 9, 5, 4, 1)
roman_numerals = ('M', 'CM', 'D', 'CD', 'C', 'XC', 'L', 'XL', 'X', 'IX', 'V', 'IV', 'I')
result = ""
for index, arabic_number in enumerate(arabic_numbers):
count = int(number_to_convert / arabic_number)
result += roman_numerals[index] * count
number_to_convert -= arabic_numbers[index] * count
return result
|
f970517a7c2d1ceb13ec025d6d446499ce5c21ff
| 3,645,137
|
def rotate_xyz(vector: Vector, angle_x: float = 0., angle_y: float = 0., angle_z: float = 0.):
"""
Rotate a 3d-vector around the third (z) axis
:param vector: Vector to rotate
:param angle_x: Rotation angle around x-axis (in degrees)
:param angle_y: Rotation angle around y-axis (in degrees)
:param angle_z: Rotation angle around z-axis (in degrees)
:return: Rotated 3d vector
Dimension of vector is not checked for faster execution
"""
vec = rotate_x(vector, angle_x)
vec = rotate_y(vec, angle_y)
vec = rotate_z(vec, angle_z)
return vec
|
efe98080d3354713dfc4b7b26a19b21a1551ab69
| 3,645,138
|
def get_project_from_data(data):
"""
Get a project from json data posted to the API
"""
if 'project_id' in data:
return get_project_by_id(data['project_id'])
if 'project_slug' in data:
return get_project_by_slug(data['project_slug'])
if 'project_name' in data:
return get_project_by_name(data['project_name'])
return None
|
6ab01c4273248310ecd780fa24d6541fe1e9b0ad
| 3,645,139
|
def _from_atoms_and_bonds(atm_dct, bnd_dct):
""" Construct a molecular graph from atom and bond dictionaries.
format:
gra = (atm_dct, bnd_dct)
:param atm_dct: atom dictionary
:type atm_dct: dict
:param bnd_dct: bond dictionary
:type bnd_dct: dict
:rtype: (dict, dict)
"""
atm_dct = dict(atm_dct)
bnd_dct = dict(bnd_dct)
atm_keys = set(atm_dct.keys())
bnd_keys = set(bnd_dct.keys())
assert all(bnd_key <= atm_keys for bnd_key in bnd_keys)
return (atm_dct, bnd_dct)
|
0bd2d37442e2a141d9a0f81f77b6b45c5b82c06a
| 3,645,140
|
def __getattr__(item):
"""Ping the func map, if an attrib is not registered, fallback to the dll"""
try:
res = func_map[item]
except KeyError:
return dll.__getattr__(item)
else:
if callable(res):
return res # Return methods from interface.
else:
return dll_func(*res)
|
7b93dd68ce4b658226c0f6c072bb3cb51be82206
| 3,645,141
|
def seed_random_state(seed):
"""
Turn seed into np.random.RandomState instance
"""
if (seed is None) or (isinstance(seed, int)):
return np.random.RandomState(seed)
elif isinstance(seed, np.random.RandomState):
return seed
raise ValueError("%r cannot be used to generate numpy.random.RandomState"
"instance" % seed)
|
61639222ab74ffbb3599c5d75ee0c669db463965
| 3,645,142
|
def _normalize_dashboard_link(link, request):
"""
Given a dashboard link, make sure it conforms to what we expect.
"""
if not link.startswith("http"):
# If a local url is given, assume it is using the same host
# as the application, and prepend that.
link = url_path_join(f"{request.protocol}://{request.host}", link)
if link.endswith("/status"):
# If the default "status" dashboard is given, strip it.
link = link[: -len("/status")]
return link
|
57ff43c2b364fbce94ef06447a79eeec29c06b90
| 3,645,143
|
async def delete(category_id: int):
"""Delete category with set id."""
apm.capture_message(param_message={'message': 'Category with %s id deleted.', 'params': category_id})
return await db.delete(category_id)
|
2c1aa6e9f40006d5c07fe94a9838b5f2709bc781
| 3,645,144
|
import glob
import os
def find_files(fields):
""" Finds all FLT files from given fields and places them with metadata
into OrderedDicts.
Parameters
----------
fields : list of strings
The CLEAR fields; retain ability to specify the individual pointings
so that can easily re-run single ones if find an issue.
Returns
-------
visits : OrderedDict
Keys of 'files' and 'products'; values of list of FLT files and product name.
filters : OrderedDict
Keys of filters; values of OrderedDicts with keys of orient and values of
lists of FLT files.
"""
files = glob.glob(os.path.join(PATH_RAW, '*flt.fits'))
info = grizli.utils.get_flt_info(files)
# 'info' is an astropy table.
# Creating a new table and inserting only the rows I want is quite annoying
# Just convert 'info' into an ordered dictionary
info_dict = bypass_table.decompose_table(info, return_type=dict, include_meta=True)
new_info_list = []
# Convert table to ordered dictionary, put it in a list, convert to numpy array
# to convert it back into a table. awesome
for row in range(len(info_dict['TARGNAME'])):
if info_dict['TARGNAME'][row] in fields:
new_info_list.append([info_dict[key][row] for key in info_dict.keys() if key != 'meta'])
# Now, if one of the fields is GOODS-N, need be sure all the Barro programs
# are also retained.
for field in [field for field in fields if 'N' in field or 'ERSPRIME' in field]:
if info_dict['TARGNAME'][row] in overlapping_fields[field]:
new_info_list.append([info_dict[key][row] for key in info_dict.keys() if key != 'meta'])
# Break so the Barro programs are added only once.
break
# Convert 'info' back into a table
# I couldn't simply do dtype=info.dtype, so just hard code it
new_info_tab = Table(np.array(new_info_list), names=info.colnames, meta=info.meta,
dtype=['S18', 'S5', 'S8', 'S10', 'S8', '<f8', '<f8', '<f8', '<f8',
'<f8', '<f8', '<f8'])
visits, filters = grizli.utils.parse_flt_files(info=new_info_tab, uniquename=True)
return visits, filters
|
2ba34c95e996bf7a4ecb6f886df59502f6797339
| 3,645,145
|
def to_canonical_name(resource_name: str) -> str:
"""Parse a resource name and return the canonical version."""
return str(ResourceName.from_string(resource_name))
|
e9e972a8bc2eb48751da765b65ea8881e033d05c
| 3,645,146
|
import six
def apply_query_filters(query, model, **kwargs):
"""Parses through a list of kwargs to determine which exist on the model,
which should be filtered as ==, and which should be filtered as LIKE
"""
for k, v in six.iteritems(kwargs):
if v and hasattr(model, k):
column = getattr(model, k)
if column.is_attribute:
if isinstance(v, list):
# List() style parameters receive WHERE IN logic.
query = query.filter(column.in_(v))
elif isinstance(column.type, sqltypes.String):
# Filter strings with LIKE
query = query.filter(column.like("%" + v + "%"))
else:
# Everything else is a strict equal
query = query.filter(column == v)
return query
|
1a7885ab55645d6a00f35668718bc119a8b18939
| 3,645,147
|
def bytes_to_b64_str(bytestring: bytes) -> str:
"""Converts random bytes into a utf-8 encoded string"""
return b64encode(bytestring).decode(config.security.ENCODING)
|
cff8e979b3d5578c39477e88f2895ebbb07f794e
| 3,645,148
|
def read_event_analysis(s:Session, eventId:int) -> AnalysisData:
"""read the analysis data by its eventId"""
res = s.query(AnalysisData).filter_by(eventId=eventId).first()
return res
|
4dba5e7489e6cee28312710fa233b7066d04b995
| 3,645,149
|
def eval_semeval2012_analogies(
vectors, weight_direct, weight_transpose, subset, subclass
):
"""
For a set of test pairs:
* Compute a Spearman correlation coefficient between the ranks produced by vectors and
gold ranks.
* Compute an accuracy score of answering MaxDiff questions.
"""
train_pairs = read_train_pairs_semeval2012(subset, subclass)
test_questions = read_test_questions_semeval2012(subset, subclass)
pairqnum2least, pairqnum2most = read_turk_answers_semeval2012(
subset, subclass, test_questions
)
turk_rank = read_turk_ranks_semeval2012(subset, subclass)
pairs_to_rank = [pair for pair, score in turk_rank]
# Assign a score to each pair, according to pairwise_analogy_func
our_pair_scores = {}
for pair in pairs_to_rank:
rank_pair_scores = []
for train_pair in train_pairs:
pair_to_rank = pair.strip().replace('"', '').split(':')
score = pairwise_analogy_func(
vectors,
standardized_uri('en', train_pair[0]),
standardized_uri('en', train_pair[1]),
standardized_uri('en', pair_to_rank[0]),
standardized_uri('en', pair_to_rank[1]),
weight_direct,
weight_transpose,
)
rank_pair_scores.append(score)
our_pair_scores[pair] = np.mean(rank_pair_scores)
# Answer MaxDiff questions using the ranks from the previous step
correct_most = 0
correct_least = 0
total = 0
for i, question in enumerate(test_questions):
question_pairs_scores = []
for question_pair in question:
score = our_pair_scores[question_pair]
question_pairs_scores.append(score)
our_answer_most = question[np.argmax(question_pairs_scores)]
our_answer_least = question[np.argmin(question_pairs_scores)]
votes_guess_least = pairqnum2least[(i, our_answer_least)]
votes_guess_most = pairqnum2most[(i, our_answer_most)]
max_votes_least = 0
max_votes_most = 0
for question_pair in question:
num_votes_least = pairqnum2least[(i, question_pair)]
num_votes_most = pairqnum2most[(i, question_pair)]
if num_votes_least > max_votes_least:
max_votes_least = num_votes_least
if num_votes_most > max_votes_most:
max_votes_most = num_votes_most
# a guess is correct if it got the same number of votes as the most frequent turkers' answer
if votes_guess_least == max_votes_least:
correct_least += 1
if votes_guess_most == max_votes_most:
correct_most += 1
total += 1
# Compute Spearman correlation of our ranks and MT ranks
our_semeval_scores = [score for pair, score in sorted(our_pair_scores.items())]
turk_semeval_scores = [score for pair, score in turk_rank]
spearman = spearmanr(our_semeval_scores, turk_semeval_scores)[0]
spearman_results = confidence_interval(spearman, total)
# Compute an accuracy score on MaxDiff questions
maxdiff = (correct_least + correct_most) / (2 * total)
low_maxdiff, high_maxdiff = proportion_confint(
(correct_least + correct_most), (2 * total)
)
maxdiff_results = pd.Series(
[maxdiff, low_maxdiff, high_maxdiff], index=['acc', 'low', 'high']
)
return [maxdiff_results, spearman_results]
|
f8b13ef5520129e753024bc5ec3d25d7ab24ccee
| 3,645,150
|
def form_examples(request, step_variables):
"""
extract the examples from the request data, if possible
@param request: http request object
@type request rest_framework.request.Request
@param step_variables: set of variable names from the bdd test
@type step_variables: set(basestring)
@return: none if no examples or failed, or formed examples and an error msg,
if applicable
@rtype: (basestring, basestring)
"""
if u'examples' not in request.DATA:
return None, None
examples = request.DATA[u'examples']
log.debug(u'request has examples:\n{}'.format(examples))
# examples should be an array of json objects, each object being an
# example row
if not isinstance(examples, list):
return None, u'examples payload was not an array'
if not examples:
return None, u'examples array was empty'
# form the actual gherkin example text (sans "Examples:", engine adds it)
text = [u'|' + u'|'.join(step_variables) + u'|']
for ex in examples:
# verify the example obj has all the expected headers/fields
ex_field_diffs = step_variables.difference(ex.keys())
if ex_field_diffs:
return None, u'an example object was missing some fields: {} given: {}'.format(ex_field_diffs, ex)
vals = [unicode(ex[key]) for key in step_variables]
text.append(u'|' + u'|'.join(vals) + u'|')
text = u'\n'.join(text)
log.debug(u'resulting example text\n{}'.format(text))
return text, None
|
2385974ace0091fb54dbce3b820cc36082a90e78
| 3,645,151
|
def get_typefromSelection(objectType="Edge", info=0):
""" """
m_num_obj, m_selEx, m_objs, m_objNames = get_InfoObjects(info=0, printError=False)
m_found = False
for m_i_o in range(m_num_obj):
if m_found:
break
Sel_i_Object = m_selEx[m_i_o]
Obj_i_Object = m_objs[m_i_o]
Name_i_Object = m_objNames[m_i_o]
if info != 0:
print("Sel_i_Object = " + str(Sel_i_Object))
print("Obj_i_Object = " + str(Obj_i_Object))
print("Name_i_Object = " + str(Name_i_Object))
SubObjects_Inside = Sel_i_Object.SubObjects
for n in range(len(SubObjects_Inside)):
SubObject = SubObjects_Inside[n]
if info != 0:
print("SubObject = " + str(SubObject))
print("SubObject.ShapeType = " + str(SubObject.ShapeType))
if SubObject.ShapeType == objectType:
m_found = True
break
if m_found:
return Sel_i_Object, Obj_i_Object, Name_i_Object
else:
return None, None, None
|
a2e55139219417b05fc9486bb9880bc6d17e1777
| 3,645,152
|
def load_file(file_path, mode='rb', encoder='utf-8'):
"""
Loads the content of a given filename
:param file_path: The file path to load
:param mode: optional mode options
:param encoder: the encoder
:return: The content of the file
"""
with xbmcvfs.File(xbmcvfs.translatePath(file_path), mode) as file_handle:
return file_handle.readBytes().decode(encoder)
|
118f18fd651a332728e71d0b7fac5e57ad536f4b
| 3,645,153
|
def centcalc_by_weight(data):
"""
Determines the center (of grtavity) of a neutron beam on a 2D detector by weigthing each pixel with its count
--------------------------------------------------
Argments:
----------
data : ndarray : l x m x n array with 'pixel' - data to weight over m and n
Return:
----------
centers : ndarray : l x 2 array with all the centers (cx, cy)
INFO:
----------
1. Method implemented by C. Herb
2. CHECK the order of cx, cy if it fits to all other interpretations of 2d dimnensions
"""
centerdata = zeros((data.shape[0], 2))
for row in centerdata:
x_int = sum(data,axis = 0)
y_int = sum(data,axis = 1)
row[0] = sum([i* xval for i,xval in enumerate(x_int) ])/sum(x_int)
row[1] = sum([j* yval for j,yval in enumerate(y_int) ])/sum(y_int)
return centerdata
|
6d3bb14820bb11ac61c436055d723bbd49f77740
| 3,645,154
|
def sum(a, axis=None, dtype=None, out=None, keepdims=False):
"""Returns the sum of an array along given axes.
Args:
a (cupy.ndarray): Array to take sum.
axis (int or sequence of ints): Axes along which the sum is taken.
dtype: Data type specifier.
out (cupy.ndarray): Output array.
keepdims (bool): If ``True``, the specified axes are remained as axes
of length one.
Returns:
cupy.ndarray: The result array.
.. seealso:: :func:`numpy.sum`
"""
if fusion._is_fusing():
if keepdims:
raise NotImplementedError(
'cupy.sum does not support `keepdims` in fusion yet.')
return fusion._call_reduction(_math.sum_auto_dtype,
a, axis=axis, dtype=dtype, out=out)
# TODO(okuta): check type
return a.sum(axis, dtype, out, keepdims)
|
7382cbd6cbcf555cf63acc7b48a65f59a3d847c3
| 3,645,155
|
import os
def to_bool(env, default="false"):
"""
Convert a string to a bool.
"""
return bool(util.strtobool(os.getenv(env, default)))
|
a8471d8bb7600e7f54e8ad1612a5f1c090e864c5
| 3,645,156
|
import textwrap
def public_key():
""" returns public key """
return textwrap.dedent('''
-----BEGIN RSA PUBLIC KEY-----
MIIBCgKCAQEAwBLTc+75h13ZyLWlvup0OmbhZWxohLMMFCUBClSMxZxZdMvyzBnW
+JpOQuvnasAeTLLtEDWSID0AB/EG68Sesr58Js88ORUw3VrjObiG15/iLtAm6hiN
BboTqd8jgWr1yC3LfNSKJk82qQzHJPlCO9Gc5HcqvWrIrqrJL2kwjOU66U/iRxJu
dyOrz0sBkVgfwDBqNS96L0zFQCqk70w9KyOJqe4JNJUtBas6lbwgChDU4/B3BDW5
PYJy2Pp8MSs2n1lhrUkXxRnj+Vl5wLQLdwog1XAGu2J8pIckPg/aB7mB/fSlFihU
bnFlRlgHrlh8gyNYztbGWKMrQ4Bz2831PQIDAQAB
-----END RSA PUBLIC KEY-----
''')
|
4d27c3e72714bccd885178c05598f0f1d8d7914d
| 3,645,157
|
import pickle
import tqdm
def fetch_WIPOgamma(subset, classification_level, data_home, extracted_path, text_fields = ['abstract', 'description'], limit_description=300):
"""
Fetchs the WIPO-gamma dataset
:param subset: 'train' or 'test' split
:param classification_level: the classification level, either 'subclass' or 'maingroup'
:param data_home: directory containing the original 11 English zips
:param extracted_path: directory used to extract and process the original files
:param text_fields: indicates the fields to extract, in 'abstract', 'description', 'claims'
:param limit_description: the maximum number of words to take from the description field (default 300); set to -1 for all
:return:
"""
assert subset in {"train", "test"}, 'unknown target request (valid ones are "train" or "test")'
assert len(text_fields)>0, 'at least some text field should be indicated'
if not exists(data_home):
raise ValueError(f'{data_home} does not exist, and the dataset cannot be automatically download, '
f'since you need to request for permission. Please refer to {WIPO_URL}')
create_if_not_exist(extracted_path)
config = f'{"-".join(text_fields)}'
if 'description' in text_fields: config+='-{limit_description}'
pickle_path=join(extracted_path, f'wipo-{subset}-{classification_level}-{config}.pickle')
if exists(pickle_path):
print(f'loading pickled file in {pickle_path}')
return pickle.load(open(pickle_path,'rb'))
print('pickle file not found, processing...(this will take some minutes)')
extracted = sum([exists(f'{extracted_path}/EnglishWipoGamma{(i+1)}-{config}.txt') for i in range(11)])==11
if not extracted:
print(f'extraction files not found, extracting files in {data_home}... (this will take some additional minutes)')
Parallel(n_jobs=-1)(
delayed(extract)(
join(data_home, file), join(extracted_path, file.replace('.zip', f'-{config}.txt')), text_fields, limit_description
)
for file in list_files(data_home)
)
doc_labels, train_ids, test_ids = read_classification_file(data_home, classification_level=classification_level) # or maingroup
print(f'{len(doc_labels)} documents classified split in {len(train_ids)} train and {len(test_ids)} test documents')
train_request = []
test_request = []
pbar = tqdm([filename for filename in list_files(extracted_path) if filename.endswith(f'-{config}.txt')])
labelcut = LabelCut(classification_level)
errors=0
for proc_file in pbar:
pbar.set_description(f'processing {proc_file} [errors={errors}]')
if not proc_file.endswith(f'-{config}.txt'): continue
lines = open(f'{extracted_path}/{proc_file}', 'rt').readlines()
for lineno,line in enumerate(lines):
parts = line.split('\t')
assert len(parts)==4, f'wrong format in {extracted_path}/{proc_file} line {lineno}'
id,mainlabel,alllabels,text=parts
mainlabel = labelcut.trim(mainlabel)
alllabels = labelcut.trim(alllabels.split())
# assert id in train_ids or id in test_ids, f'id {id} out of scope'
if id not in train_ids and id not in test_ids:
errors+=1
else:
# assert mainlabel == doc_labels[id][0], 'main label not consistent'
request = train_request if id in train_ids else test_request
request.append(WipoGammaDocument(id, text, mainlabel, alllabels))
print('pickling requests for faster subsequent runs')
pickle.dump(train_request, open(join(extracted_path,f'wipo-train-{classification_level}-{config}.pickle'), 'wb', pickle.HIGHEST_PROTOCOL))
pickle.dump(test_request, open(join(extracted_path, f'wipo-test-{classification_level}-{config}.pickle'), 'wb', pickle.HIGHEST_PROTOCOL))
if subset== 'train':
return train_request
else:
return test_request
|
05d3f19950321c5e94abf846bd7fab2d9b905f39
| 3,645,158
|
from typing import Optional
import requests
def latest_maven_version(group_id: str, artifact_id: str) -> Optional[str]:
"""Helper function to find the latest released version of a Maven artifact.
Fetches metadata from Maven Central and parses out the latest released
version.
Args:
group_id (str): The groupId of the Maven artifact
artifact_id (str): The artifactId of the Maven artifact
Returns:
The latest version of the artifact as a string or None
"""
group_path = "/".join(group_id.split("."))
url = (
f"https://repo1.maven.org/maven2/{group_path}/{artifact_id}/maven-metadata.xml"
)
response = requests.get(url)
if response.status_code >= 400:
return "0.0.0"
return version_from_maven_metadata(response.text)
|
1c5f3b3e683e5e40f9779d53d93dad9e1397e25d
| 3,645,159
|
def zeropoint(info_dict):
"""
Computes the zero point of a particular system configuration
(filter, atmospheric conditions,optics,camera).
The zeropoint is the magnitude which will lead to one count per second.
By definition ZP = -2.5*log10( Flux_1e-_per_s / Flux_zeromag ),
where Flux_1e-_per_s = 1 e-/s and Flux_zeromag =
sum_over_passband ( zero_flux * system_response * A_tel ) in e-/s
Hence:
ZP = 2.5*log10( sum_over_passband ( zero_flux * system_response * A_tel ))
Parameters
----------
info_dict: dictionary
wavelength: array
wavelength in angstrom
Returns
------_
zeropoint: float
zeropoint in magnitude
"""
# Integrate over the wavelengths
# Flux_zero = np.trapz(zeromag_to_flux(info_dict,wavelength,unit='ph')
# * precomp.system_response(info_dict,wavelength),
# wavelength)*precomp.A_tel(info_dict)
# Flux_zero = np.trapz(zeromag_to_flux(info_dict,unit='ph')
# * info_dict['system_response'],
# info_dict['wavelength_ang'])
# * info_dict['A_tel']
Flux_zero = (
np.trapz(
utils.flambda_to_fph(
info_dict["wavelength_ang"],
utils.fJy_to_flambda(
info_dict["wavelength_ang"], info_dict["Flux_zero_Jy"]
),
)
* info_dict["system_response"]
* info_dict["Trans_atmosphere"],
info_dict["wavelength_ang"],
)
* info_dict["A_tel"]
)
ZP = 2.5 * np.log10(Flux_zero)
info_dict["zeropoint"] = ZP
return info_dict
|
0deba397aa14226a45af7a1acca6f51ec846083b
| 3,645,160
|
def model_comp(real_data, deltaT, binSize, maxTimeLag, abc_results1, final_step1, abc_results2, final_step2,\
model1, model2, distFunc, summStat_metric, ifNorm,\
numSamplesModelComp, eval_start = 3, disp1 = None, disp2 = None):
"""Perform Baysian model comparison with ABC fits from model1 and model2.
Parameters
-----------
real_data : nd array
time-series of continous data, e.g., OU process, (numTrials * numTimePoints)
or binned spike counts (numTrials * numBin).
deltaT : float
temporal resolution of data (or binSize of spike counts).
binSize : float
bin-size for computing the autocorrelation.
maxTimeLag : float
maximum time-lag for computing the autocorrelation.
abc_results1: object
output of fitting model1 with aABC algorithm.
final_step1 : int
final step of aABC fitting for model1.
abc_results2: object
output of fitting model2 with aABC algorithm.
final_step2 : int
final step of aABC fitting for model2.
model1: string
selected generative model for model1 (from generative models list).
model2: string
selected generative model for model2 (from generative models list).
distFunc: string
'linear_distance' or 'logarithmic_distance'.
summStat_metric : string
metric for computing summay statistics ('comp_cc', 'comp_ac_fft', 'comp_psd').
ifNorm : string
if normalize the autocorrelation or PSD.
numSamplesModelComp: int
number of samples from posterior distributions to compute the Bayes factor.
eval_start : int, default 3
defines the number of smallest errors we ignore before starting CDF computation.
disp1 : float, default None
The value of dispersion parameter if computed with the grid search method for model1.
disp2 : float, default None
The value of dispersion parameter if computed with the grid search method for model2.
Returns
-------
d1 : 1d array
distribution of errors (distances) for model1.
d2 : 1d array
distribution of errors (distances) for model2.
cdf1 : 1d array
CDF of errors for model1.
cdf2 : 1d array
CDF of errors for model2.
err_threshs : 1d array
error thresholds for which CDFs are computed
bf : 1d array
Bayes factors for each error threshold in "err_threshs" (CDF_M2/CDF_M1).
"""
# extract abc fits
theta_accepted1 = abc_results1[final_step1 - 1]['theta accepted']
theta_accepted2 = abc_results2[final_step2 - 1]['theta accepted']
# extract real data statistics
data_sumStat, data_mean, data_var, T, numTrials = extract_stats(real_data, deltaT, binSize,\
summStat_metric, ifNorm, maxTimeLag)
# compute distances
numSamplesPosterior1 = len(theta_accepted1[0])
numSamplesPosterior2 = len(theta_accepted2[0])
print('Computing distances for model1:')
d1 = gen_model_dist(data_sumStat, theta_accepted1, numSamplesModelComp, numSamplesPosterior1, model1, distFunc,\
summStat_metric, ifNorm, deltaT, binSize, T, numTrials, data_mean, data_var, maxTimeLag, disp1)
print('Computing distances for model2:')
d2 = gen_model_dist(data_sumStat, theta_accepted2, numSamplesModelComp, numSamplesPosterior2, model2, distFunc,\
summStat_metric, ifNorm, deltaT, binSize, T, numTrials, data_mean, data_var, maxTimeLag, disp2)
# compute CDFs and Bayes factors
cdf1, cdf2, eval_points, bf = comp_cdf(d1, d2, numSamplesModelComp, eval_start)
err_threshs = eval_points
return d1, d2, cdf1, cdf2, err_threshs, bf
|
060fa9dcd133363b93dd22353eee5c47e3fd90dd
| 3,645,161
|
def unwrap(*args, **kwargs):
"""
This in a alias for unwrap_array, which you should use now.
"""
if deprecation_warnings:
print('Use of "unwrap" function is deprecated, the new name '\
' is "unwrap_array".')
return unwrap_array(*args, **kwargs)
|
156b5607b3b76fff66ede40eed79ca5cf74b1313
| 3,645,162
|
def solve():
"""solve form"""
if request.method == 'GET':
sql="SELECT g.class, p.origin, p.no, p.title, p.address FROM GIVE g, PROBLEM p WHERE g.origin=p.origin AND g.no=p.no AND class = 1"
cursor.execute(sql)
week1=[]
for result in cursor.fetchall():
week1.append({
"class":result['class'],
"origin":result['origin'],
"no":result['no'],
"title":result['title'],
"address":result['address']
})
sql="SELECT g.class, p.origin, p.no, p.title, p.address FROM GIVE g, PROBLEM p WHERE g.origin=p.origin AND g.no=p.no AND class = 2"
cursor.execute(sql)
week2=[]
for result in cursor.fetchall():
week2.append({
"class":result['class'],
"origin":result['origin'],
"no":result['no'],
"title":result['title'],
"address":result['address']
})
return render_template('week.html',week1 = week1, week2 = week2)
|
4d1d117b686ef4bb265b9fcc2185f07351710f49
| 3,645,163
|
import re
def build_path(entities, path_patterns, strict=False):
"""
Constructs a path given a set of entities and a list of potential
filename patterns to use.
Args:
entities (dict): A dictionary mapping entity names to entity values.
path_patterns (str, list): One or more filename patterns to write
the file to. Entities should be represented by the name
surrounded by curly braces. Optional portions of the patterns
should be denoted by square brackets. Entities that require a
specific value for the pattern to match can pass them inside
carets. Default values can be assigned by specifying a string after
the pipe operator. E.g., (e.g., {type<image>|bold} would only match
the pattern if the entity 'type' was passed and its value is
"image", otherwise the default value "bold" will be used).
Example 1: 'sub-{subject}/[var-{name}/]{id}.csv'
Result 2: 'sub-01/var-SES/1045.csv'
strict (bool): If True, all passed entities must be matched inside a
pattern in order to be a valid match. If False, extra entities will
be ignored so long as all mandatory entities are found.
Returns:
A constructed path for this file based on the provided patterns.
"""
if isinstance(path_patterns, string_types):
path_patterns = [path_patterns]
# Loop over available patherns, return first one that matches all
for pattern in path_patterns:
# If strict, all entities must be contained in the pattern
if strict:
defined = re.findall('\{(.*?)(?:<[^>]+>)?\}', pattern)
if set(entities.keys()) - set(defined):
continue
# Iterate through the provided path patterns
new_path = pattern
optional_patterns = re.findall('\[(.*?)\]', pattern)
# First build from optional patterns if possible
for optional_pattern in optional_patterns:
optional_chunk = replace_entities(entities, optional_pattern) or ''
new_path = new_path.replace('[%s]' % optional_pattern,
optional_chunk)
# Replace remaining entities
new_path = replace_entities(entities, new_path)
if new_path:
return new_path
return None
|
120811c5560fabe48e10f412c0b3c0ab3592a887
| 3,645,164
|
def schedule_compensate():
"""
swagger-doc: 'schedule'
required: []
req:
course_schedule_id:
description: '课程id'
type: 'string'
start:
description: '课程开始时间 format YYYY-mm-dd HH:MM:ss.SSS'
type: 'string'
end:
description: '课程结束时间 in sql format YYYY-mm-dd HH:MM:ss.SSS'
type: 'string'
schedule_type:
description: '课节类型'
type: 'string'
res:
verify_code:
description: 'id'
type: ''
"""
course_schedule_id = request.json['course_schedule_id']
start = request.json['start'].replace('T', ' ').replace('Z', '')
end = request.json['end'].replace('T', ' ').replace('Z', '')
schedule_type = request.json['schedule_type']
with session_scope(db) as session:
courseSchedule = session.query(CourseSchedule).filter_by(id=course_schedule_id).one_or_none()
if courseSchedule is None:
return jsonify({
"error": "not found course_schedule: {0}".format(
course_schedule_id)
}), 500
courseschedule = CourseSchedule(
start = start,
end = end,
name = courseSchedule.name,
state = 98,
override_course_type=courseSchedule.override_course_type,
course_id = courseSchedule.course_id,
schedule_type = schedule_type,
delete_flag = 'IN_FORCE',
updated_by=getattr(g, current_app.config['CUR_USER'])['username']
)
session.add(courseschedule)
session.flush()
course = session.query(Course).filter_by(id=courseSchedule.course_id).one_or_none()
class_type =ClassroomTypeEnum.ONE_VS_ONE.name
if course.class_type != 1:
class_type = ClassroomTypeEnum.ONE_VS_MANY.name
live_service.create_room(getattr(g, current_app.config['CUR_USER'])['username'], courseschedule.id,courseSchedule.name, getTimeDiff(start,end),class_type,request.json['start'],0,'en')
studyschedules = session.query(StudySchedule).filter_by(course_schedule_id=course_schedule_id).all()
for studyschedule in studyschedules:
sudyschedule = StudySchedule(
actual_start = start,
actual_end = end,
name = courseSchedule.name,
study_state = 1,
order_id = studyschedule.order_id,
course_schedule_id = courseschedule.id,
student_id = studyschedule.student_id,
schedule_type = schedule_type,
delete_flag = 'IN_FORCE',
updated_by=getattr(g, current_app.config['CUR_USER'])['username']
)
session.add(sudyschedule)
session.flush()
return jsonify({'id':courseschedule.id })
|
7c3a14af27287e64535d7b9150531d8646cb0cfe
| 3,645,165
|
def parse(xml):
"""
Parse headerdoc XML into a dictionary format.
Extract classes, functions, and global variables from the given XML output
by headerdoc. Some formatting and text manipulation takes place while
parsing. For example, the `@example` is no longer recognized by headerdoc.
`parse()` will extract examples separately from the given description.
[Admonitions](https://python-markdown.github.io/extensions/admonition/)
are also not kept in the correct format by headerdoc. Admonitions text must
be indented to the same level as the admonition title, but headerdoc strips
leading whitespace. The dictionary returned from `parse` will have the
correct indentation restored.
Args:
xml (ElementTree): An `ElementTree` read from a headerdoc XML file. The
root must be the `<header>` element.
Returns:
Dict
"""
return _parse_script(xml)
|
f130a7b457d3b347153a3ea013f69a11dc3859c2
| 3,645,166
|
import os
def ReadKeywordValueInFile(filename,keyword):
""" Get value in the expression of keyword=vlaue in file
:param str filenname: file name
:param str keywors: keyword string
:return: value(str) - value string
"""
value=None; lenkey=len(keyword)
if not os.path.exists(filename): return value
fmomenu=False; found=False
f=open(filename)
dat=''; lenkey=len(keyword); leftpar=0; rightpar=0
for s in f.readlines():
cm=s.find('#')
if cm > 0: s=s[:cm]
s=s.strip()
if len(s) == 0: continue
items=s.split()
for item in items:
if item[:lenkey] == keyword:
found=True; value=item.split('=')[1]; value=value.strip()
break
f.close()
if not found: value=None
return value
|
cea315129a39384a80de227d17f647d24fcd27c7
| 3,645,167
|
def getSolventList():
"""
Return list of solvent molecules for initializing solvation search form.
If any of the Mintz parameters are None, that solvent is not shown in the list since it will cause error.
"""
database.load('solvation', '')
solvent_list = []
for index, entry in database.solvation.libraries['solvent'].entries.items():
mintz_parameter_list = [entry.data.s_h, entry.data.b_h, entry.data.e_h, entry.data.l_h, entry.data.a_h,
entry.data.c_h]
if not any(h is None for h in mintz_parameter_list):
solvent_list.append((entry.label, index))
return solvent_list
|
ea202459ac69b02d362c422935e396be04e7ec30
| 3,645,168
|
import random
def subset_samples(md_fp: str, factor: str, unstacked_md: pd.DataFrame,
number_of_samples: int, logs:list) -> pd.DataFrame:
"""
Subset the metadata to a maximum set of 100 samples.
! ATTENTION ! In case there are many columns with np.nan,
these should be selected to select samples
that do have actual numerical values...
Parameters
----------
md_fp : str
Metadata file path.
factor : str
Stratification factor.
unstacked_md : pd.DataFrame
Metadata table subset for the current
stratification and numerical variables.
number_of_samples : int
Number of samples to randomly select to
compute the distributions.
logs : list
List of lists: each nested list is:
[variable, metadata file path, warning message, a number]
Returns
-------
figure_tab : pd.DataFrame
re-stacked metadata table
"""
# get the unique samples names
samples = set(list(unstacked_md.sample_name.tolist()))
# take either 100 or if less, all the samples as data for the figure
if len(samples) < number_of_samples:
logs.append([factor, md_fp, 'not enough samples', len(samples)])
figure_tab = unstacked_md.copy()
else:
random_samples = random.sample(samples, number_of_samples)
figure_tab = unstacked_md.loc[unstacked_md.sample_name.isin(random_samples),:].copy()
return figure_tab
|
36b99aaecf7fc67b4a5809bb00758b158d753997
| 3,645,169
|
def _process_image(filename, coder):
"""Process a single image file.
Args:
filename: string, path to an image file e.g., '/path/to/example.JPG'.
coder: instance of ImageCoder to provide TensorFlow image coding utils.
Returns:
image_buffer: string, JPEG encoding of RGB image.
height: integer, image height in pixels.
width: integer, image width in pixels.
"""
# Read the image file.
with tf.gfile.GFile(filename, "rb") as f:
image_data = f.read()
# Decode the RGB JPEG.
image = coder.decode_jpeg(image_data)
# Check that image converted to RGB
assert len(image.shape) == 3
height = image.shape[0]
width = image.shape[1]
assert image.shape[2] == 3
return image, height, width
|
2b5c78a6b641ce31aed3f80590dcf6dbe3a36baa
| 3,645,170
|
def to_rna_sequences(model):
"""
Convert all the sequences present in the model to RNA.
:args dict model: Description model.
"""
for seq, path in yield_sub_model(model, ["sequence"]):
set_by_path(model, path, str(Seq(seq).transcribe().lower()))
return model
|
8a0929e631e6d162f47ba1773975116b0e2caff4
| 3,645,171
|
def rest_get_repositories(script, project=None, start=0, limit=50):
"""
Gets a list of repositories via REST
:param script: A TestScript instance
:type script: TestScript
:param project: An optional project
:type project: str
:param start: The offset to start from
:type start: int
:param limit: The max number of items to return
:type limit: int
:return: repositories or None
:rtype: list(Repository)
"""
if project:
project_filter = "projects/%s/" % project
else:
project_filter = ""
j = script.rest("GET", "/rest/api/latest/%srepos" % project_filter, {
"start": str(start),
"limit": str(limit)
})
if is_http_ok():
return map(lambda repo: Repository(repo["project"]["key"], repo["slug"]), j["values"])
|
772b1682ba3de11b98d943f4a768386b7907b1d9
| 3,645,172
|
def clean_float(input_float):
"""
Return float in seconds (even if it was a timestamp originally)
"""
return (timestamp_to_seconds(input_float)
if ":" in str(input_float) else std_float(input_float))
|
8ce6ca89b40750bb157d27a68631ddd64a824f3a
| 3,645,173
|
def resize(
image,
output_shape,
order=1,
mode="constant",
cval=0,
clip=True,
preserve_range=False,
anti_aliasing=False,
anti_aliasing_sigma=None,
):
"""A wrapper for Scikit-Image resize().
Scikit-Image generates warnings on every call to resize() if it doesn't
receive the right parameters. The right parameters depend on the version
of skimage. This solves the problem by using different parameters per
version. And it provides a central place to control resizing defaults.
"""
if LooseVersion(skimage.__version__) >= LooseVersion("0.14"):
# New in 0.14: anti_aliasing. Default it to False for backward
# compatibility with skimage 0.13.
return skimage.transform.resize(
image,
output_shape,
order=order,
mode=mode,
cval=cval,
clip=clip,
preserve_range=preserve_range,
anti_aliasing=anti_aliasing,
anti_aliasing_sigma=anti_aliasing_sigma,
)
else:
return skimage.transform.resize(
image,
output_shape,
order=order,
mode=mode,
cval=cval,
clip=clip,
preserve_range=preserve_range,
)
|
21273e385e892e21e6fb40241d9c430b6f41ba3d
| 3,645,174
|
def get_subsystem_fidelity(statevector, trace_systems, subsystem_state):
"""
Compute the fidelity of the quantum subsystem.
Args:
statevector (list|array): The state vector of the complete system
trace_systems (list|range): The indices of the qubits to be traced.
to trace qubits 0 and 4 trace_systems = [0,4]
subsystem_state (list|array): The ground-truth state vector of the subsystem
Returns:
The subsystem fidelity
"""
rho = np.outer(np.conj(statevector), statevector)
rho_sub = partial_trace(rho, trace_systems)
rho_sub_in = np.outer(np.conj(subsystem_state), subsystem_state)
fidelity = np.trace(
sqrtm(
np.dot(
np.dot(sqrtm(rho_sub), rho_sub_in),
sqrtm(rho_sub)
)
)
) ** 2
return fidelity
|
13d3ef7fc3e7d414fe6b936f839c42321a5e0982
| 3,645,175
|
import requests
def jyfm_data_coke(indicator="焦炭总库存", headers=""):
"""
交易法门-数据-黑色系-焦炭
:param indicator: ["焦企产能利用率-100家独立焦企产能利用率", "焦企产能利用率-230家独立焦企产能利用率",
"焦炭日均产量-100家独立焦企焦炭日均产量", "焦炭日均产量-230家独立焦企焦炭日均产量", "焦炭总库存",
"焦炭焦企库存-100家独立焦企焦炭库存", "焦炭焦企库存-230家独立焦企焦炭库存", "焦炭钢厂库存", "焦炭港口库存", "焦企焦化利润"]
:type indicator: str
:param headers: headers with cookies
:type headers: dict
:return: result
:rtype: pandas.DataFrame
"""
res = requests.get(jyfm_data_coke_url_dict[indicator], headers=headers,)
# 由于返回的数据长度不一致,先补齐再转置
return pd.read_json(res.text, orient="index").T
|
23f1c0cad5cb93953f5d5745f0c3e14aedd51fd8
| 3,645,176
|
from typing import Tuple
from typing import Any
def generate_index_distribution(
numTrain: int, numTest: int, numValidation: int, params: UQDistDict
) -> Tuple[Any, ...]:
"""
Generates a vector of indices to partition the data for training. NO
CHECKING IS DONE: it is assumed that the data could be partitioned in the
specified blocks and that the block indices describe a coherent partition.
:param int numTrain: Number of training data points
:param int numTest: Number of testing data points
:param int numValidation: Number of validation data points (may be zero)
:param Dict params: Contains the keywords that control the behavior of the function \
(uq_train_fr, uq_valid_fr, uq_test_fr for fraction specification, \
uq_train_vec, uq_valid_vec, uq_test_vec for block list specification, and \
uq_train_bks, uq_valid_bks, uq_test_bks for block number specification)
:return: Tuple of numpy arrays
- indexTrain (int numpy array): Indices for data in training
- indexValidation (int numpy array): Indices for data in validation (if any)
- indexTest (int numpy array): Indices for data in testing (if merging)
"""
if all(k in params for k in ("uq_train_fr", "uq_valid_fr", "uq_test_fr")):
# specification by fraction
print("Computing UQ cross-validation - Distributing by FRACTION")
return generate_index_distribution_from_fraction(
numTrain, numTest, numValidation, params
)
elif all(k in params for k in ("uq_train_vec", "uq_valid_vec", "uq_test_vec")):
# specification by block list
print("Computing UQ cross-validation - Distributing by BLOCK LIST")
return generate_index_distribution_from_block_list(
numTrain, numTest, numValidation, params
)
elif all(k in params for k in ("uq_train_bks", "uq_valid_bks", "uq_test_bks")):
# specification by block size
print("Computing UQ cross-validation - Distributing by BLOCK NUMBER")
return generate_index_distribution_from_blocks(
numTrain, numTest, numValidation, params
)
else:
print("ERROR !! No consistent UQ parameter specification found !! ... exiting ")
raise KeyError(
"No valid triplet of ('uq_train_*', 'uq_valid_*', 'uq_test_*') found. (* is any of fr, vec or bks)"
)
|
f3c35d5c99a3f79b40f3c7220519152b85fe679d
| 3,645,177
|
def generacionT (v, m):
"""
Signature of a message with hash, hashed. Here we have to use the private key.
Parameters:
m (int): Number of oil
v (int): Number of vinager
Returns:
T (matrix): Matrix with dimension nxn
"""
#Matriz de distorsion
T = []
n = v + m
for i in range(n):
row = []
if i < v:
for k in range(n):
if k < v: #Matriz indentidad dimension v
if i == k:
row += [1]
else:
row += [0]
else: #Matriz aleatoria vxm
if randint(0,2) == 1:
row += [1]
else:
row += [0]
else:
for k in range(n):
if k < v: #Matriz nula dimension v
row += [0]
else: #Matriz identidad dimension m
if i == k:
row += [1]
else:
row += [0]
T += [row]
return T
|
6fd4d9f8ebeea5f422c937b3b92c5c5134ffd541
| 3,645,178
|
from io import StringIO
import pandas
from datetime import datetime
def isotherm_from_bel(path):
"""
Get the isotherm and sample data from a BEL Japan .dat file.
Parameters
----------
path : str
Path to the file to be read.
Returns
-------
dataDF
"""
with open(path) as file:
line = file.readline().rstrip()
meta = {}
data = StringIO()
while line != '':
values = line.split(sep='\t')
line = file.readline().rstrip()
if len(values) < 2: # If "title" section
# read adsorption section
if values[0].strip().lower().startswith('adsorption data'):
line = file.readline().rstrip() # header
file_headers = line.replace('"', '').split('\t')
new_headers = ['branch']
for h in file_headers:
txt = next((
_FIELDS['isotherm_data'][a]
for a in _FIELDS['isotherm_data']
if h.lower().startswith(a)
), h)
new_headers.append(txt)
if txt == 'loading':
meta['loading_basis'] = 'molar'
for (u, c) in (
('/mmol', 'mmol'),
('/mol', 'mol'),
('/ml(STP)', 'cm3(STP)'),
('/cm3(STP)', 'cm3(STP)'),
):
if u in h:
meta['loading_unit'] = c
meta['material_basis'] = 'mass'
for (u, c) in (
('g-1', 'g'),
('kg-1', 'kg'),
):
if u in h:
meta['material_unit'] = c
if txt == 'pressure':
meta['pressure_mode'] = 'absolute'
for (u, c) in (
('/mmHg', 'torr'),
('/torr', 'torr'),
('/kPa', 'kPa'),
('/bar', 'bar'),
):
if u in h:
meta['pressure_unit'] = c
data.write('\t'.join(new_headers) + '\n')
line = file.readline() # firstline
while not line.startswith('0'):
data.write('False\t' + line)
line = file.readline()
# read desorption section
elif values[0].strip().lower().startswith('desorption data'):
file.readline() # header - discard
line = file.readline() # firstline
while not line.startswith('0'):
data.write('True\t' + line)
line = file.readline()
else:
continue
else:
values = [v.strip('"') for v in values]
key = values[0].lower()
try:
field = next(
v for k, v in _FIELDS.items()
if any([key.startswith(n) for n in v.get('text', [])])
)
except StopIteration:
continue
meta[field['name']] = values[1]
# Read prepared table
data.seek(0) # Reset string buffer to 0
data_df = pandas.read_csv(data, sep='\t')
data_df.dropna(inplace=True, how='all', axis='columns')
# Set extra metadata
meta['date'] = datetime.strptime(meta['date'], r'%y/%m/%d').isoformat()
meta['apparatus'] = 'BEL ' + meta["serialnumber"]
meta['loading_key'] = 'loading'
meta['pressure_key'] = 'pressure'
meta['other_keys'] = sorted([
a for a in data_df.columns
if a not in ['loading', 'pressure', 'measurement', 'branch']
])
return PointIsotherm(isotherm_data=data_df, **meta)
|
7aa144942ef6e0cb3d2926817015692b8fe8b99b
| 3,645,179
|
import os
import random
def image(height, width, image_dir):
"""
Create a background with a image
"""
images = [xx for xx in os.listdir(image_dir) \
if xx.endswith(".jpeg") or xx.endswith(".jpg") or xx.endswith(".png")]
if len(images) > 0:
image_name = images[random.randint(0, len(images) - 1)]
pic = Image.open(os.path.join(image_dir, image_name))
pic_original_width = pic.size[0]
pic_original_height = pic.size[1]
if pic.size[0] < width:
pic = pic.resize([width, int(pic.size[1] * (width / pic.size[0]))], Image.ANTIALIAS)
if pic.size[1] < height:
pic = pic.resize([int(pic.size[0] * (height / pic.size[1])), height], Image.ANTIALIAS)
pic_final_width = pic.size[0]
pic_final_height = pic.size[1]
if pic.size[0] == width:
x = 0
else:
x = random.randint(0, pic.size[0] - width)
if pic.size[1] == height:
y = 0
else:
y = random.randint(0, pic.size[1] - height)
return pic.crop((x, y, x + width, y + height)), (image_name, pic_original_width, pic_original_height,
pic_final_width, pic_final_height, x, y, x + width, y + height)
else:
raise Exception("No images where found in the images folder!")
|
c08c047490d1f14d7c97b8bd7927fca5f04dfeed
| 3,645,180
|
def model(x, a, b, c):
"""
Compute
.. math::
y = A + Be^{Cx}
Parameters
----------
x : array-like
The value of the model will be the same shape as the input.
a : float
The additive bias.
b : float
The multiplicative bias.
c : float
The exponent.
Return
------
y : array-like
An array of the same shape as ``x``, containing the model
computed for the given parameters.
"""
return a + b * exp(c * x)
|
12a8272fb773226ad328daeb460cc2ca84d4c6e0
| 3,645,181
|
from typing import List
import os
def add_abspath(dirs: List):
"""Recursively append the absolute path to the paths in a nested list
If not a list, returns the string with absolute path.
"""
if isinstance(dirs, list):
for i, elem in enumerate(dirs):
if isinstance(elem, str):
dirs[i] = os.path.abspath(elem)
else:
dirs[i] = add_abspath(elem)
return dirs
else:
return os.path.abspath(dirs)
|
544fb5bb680b6a7874c7364090109ee3cdc75632
| 3,645,182
|
import inspect
def equal_matches(
matches_a: kapture.Matches,
matches_b: kapture.Matches) -> bool:
"""
Compare two instances of kapture.Matches.
:param matches_a: first set of matches
:param matches_b: second set of matches
:return: True if they are identical, False otherwise.
"""
assert isinstance(matches_a, kapture.Matches)
assert isinstance(matches_b, kapture.Matches)
current_function_name = inspect.getframeinfo(inspect.currentframe()).function
return equal_sets(matches_a, matches_b, current_function_name)
|
57efdd63a56f4e94afc9a57e05f4e4f726ce7b44
| 3,645,183
|
def convert_to_example(img_data, target_data, img_shape, target_shape, dltile):
""" Converts image and target data into TFRecords example.
Parameters
----------
img_data: ndarray
Image data
target_data: ndarray
Target data
img_shape: tuple
Shape of the image data (h, w, c)
target_shape: tuple
Shape of the target data (h, w, c)
dltile: str
DLTile key
Returns
-------
Example: TFRecords example
TFRecords example
"""
if len(target_shape) == 2:
target_shape = (*target_shape, 1)
features = {
"image/image_data": _float64_feature(img_data),
"image/height": _int64_feature(img_shape[0]),
"image/width": _int64_feature(img_shape[1]),
"image/channels": _int64_feature(img_shape[2]),
"target/target_data": _float64_feature(target_data),
"target/height": _int64_feature(target_shape[0]),
"target/width": _int64_feature(target_shape[1]),
"target/channels": _int64_feature(target_shape[2]),
"dltile": _bytes_feature(tf.compat.as_bytes(dltile)),
}
return tf.train.Example(features=tf.train.Features(feature=features))
|
d8dd2b78a85d2e34d657aa36bfe3515ef1dd5418
| 3,645,184
|
def linear(args, output_size, bias, bias_start=0.0, scope=None, var_on_cpu=True, wd=0.0):
"""Linear map: sum_i(args[i] * W[i]), where W[i] is a variable.
Args:
args: a 2D Tensor or a list of 2D, batch x n, Tensors.
output_size: int, second dimension of W[i].
bias: boolean, whether to add a bias term or not.
bias_start: starting value to initialize the bias; 0 by default.
scope: VariableScope for the created subgraph; defaults to "Linear".
var_on_cpu: if True, put the variables on /cpu:0.
Returns:
A 2D Tensor with shape [batch x output_size] equal to
sum_i(args[i] * W[i]), where W[i]s are newly created matrices.
Raises:
ValueError: if some of the arguments has unspecified or wrong shape.
"""
assert args
if not isinstance(args, (list, tuple)):
args = [args]
# Calculate the total size of arguments on dimension 1.
total_arg_size = 0
shapes = [a.get_shape().as_list() for a in args]
for shape in shapes:
if len(shape) != 2:
raise ValueError("Linear is expecting 2D arguments: %s" % str(shapes))
if not shape[1]:
raise ValueError("Linear expects shape[1] of arguments: %s" % str(shapes))
else:
total_arg_size += shape[1]
# Now the computation.
with tf.variable_scope(scope or "Linear"):
if var_on_cpu:
with tf.device("/cpu:0"):
matrix = tf.get_variable("Matrix", [total_arg_size, output_size])
else:
matrix = tf.get_variable("Matrix", [total_arg_size, output_size])
if wd:
weight_decay = tf.mul(tf.nn.l2_loss(matrix), wd, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
if len(args) == 1:
res = tf.matmul(args[0], matrix)
else:
res = tf.matmul(tf.concat(1, args), matrix)
if not bias:
return res
if var_on_cpu:
with tf.device("/cpu:0"):
bias_term = tf.get_variable(
"Bias", [output_size],
initializer=tf.constant_initializer(bias_start))
else:
bias_term = tf.get_variable(
"Bias", [output_size],
initializer=tf.constant_initializer(bias_start))
return res + bias_term
|
1072115bc42b3d2acb3d41cd0116a636f6bb7804
| 3,645,185
|
def _start_job(rule, settings, urls=None):
"""Start a new job for an InfernoRule
Note that the output of this function is a tuple of (InfernoJob, DiscoJob)
If this InfernoJob fails to start by some reasons, e.g. not enough blobs,
the DiscoJob would be None.
"""
job = InfernoJob(rule, settings, urls)
return job, job.start()
|
5eab30433004240d79f1ec4eeac868b40632cdde
| 3,645,186
|
def mult(dic,data,r=1.0,i=1.0,c=1.0,inv=False,hdr=False,x1=1.0,xn='default'):
"""
Multiple by a Constant
Parameter c is used even when r and i are defined. NMRPipe ignores c when
r or i are defined.
Parameters:
* dic Dictionary of NMRPipe parameters.
* data array of spectral data.
* r Constant to multply real data by.
* i Constant to multiply imaginary data by.
* c Constant to multiply both real and imaginary data by.
* inv Multiply by inverse of Constant (both real and imaginary)
* hdr Use constant value from header.
* x1 First point of region to multiply constant by.
* xn Last point of region to multiply constant by. 'default' specifies
the end of the vector.
"""
mn = x1 - 1
if xn == 'default':
mx = data.shape[-1]
else:
mx = xn
if hdr: # read in C from header
fn = "FDF"+str(int(dic["FDDIMORDER"][0])) # F1, F2, etc
c = dic[fn+"C1"]
r = 1.0
i = 1.0
rf = (r*c) # real factor
cf = (i*c) # complex factor
if inv:
rf = 1/rf
cf = 1/cf
data[...,mn:mx] = p.mult(data[...,mn:mx],r=rf,i=cf,c=1.0)
dic = update_minmax(dic,data)
return dic,data
|
833ab3784dc9210aa6be0f968f9115982ea93753
| 3,645,187
|
import os
def generate_gps_photon(stream, source, focus, angle_gantry, angle_couch, angle_coll, beamletsize, sad, sfd, energy_mev, desc='Diverging Square field', gps_template=None):
"""Generate the gps input file using a template
Args:
idx (int): index of beamlet in beam (row-major order)
source (x, y, z, u): coordinates
focus (x, y, z, u): coordinates
angle_gantry (float): gantry angle
beamletsize (x, z, u)
sad (float): sad (units must match beamletsize units)
sfd (float): src-focus-distance (units must match beamletsize units)
"""
extra_kwargs = {}
# try to match requested template
if gps_template is not None:
fullpath = pjoin(TEMPLATES, gps_template)
if not os.path.isfile(fullpath):
raise FileNotFoundError('GPS template "{}" doesn\'t exist'.format(fullpath))
else:
if energy_mev is not None and is_numeric(energy_mev):
gps_template = 'gps_photon_mono.mac.tpl'
extra_kwargs['energy'] = float(energy_mev)
else:
gps_template = 'gps_photon_6MV.mac.tpl'
xp, yp = calculate_plane_rotation(angle_gantry, angle_couch, angle_coll)
adj_fsize = [0.5*sfd/sad*beamletsize[ii] for ii in range(2)]
with open(pjoin(TEMPLATES, gps_template), 'r') as fd:
stream.write(
fd.read().format(
description=desc,
cx=source[0],
cy=source[1],
cz=source[2],
cu=source[3],
rot1x=xp[0],
rot1y=xp[1],
rot1z=xp[2],
rot2x=yp[0],
rot2y=yp[1],
rot2z=yp[2],
fsx=adj_fsize[0],
fsy=adj_fsize[1],
fsu=beamletsize[2],
fx=focus[0],
fy=focus[1],
fz=focus[2],
fu=focus[3],
**extra_kwargs,
)
)
return stream
|
918c31ee44fe241a07c67587ccd673c4a83cdb2c
| 3,645,188
|
import re
def navigation_target(m) -> re.Pattern:
"""A target to navigate to. Returns a regular expression."""
if hasattr(m, 'any_alphanumeric_key'):
return re.compile(re.escape(m.any_alphanumeric_key), re.IGNORECASE)
if hasattr(m, 'navigation_target_name'):
return re.compile(m.navigation_target_name)
return re.compile(re.escape(m.text), re.IGNORECASE)
|
62cc847f5454e76afb128fd752b7fa83fd2e167e
| 3,645,189
|
def hammingDistance(strA, strB):
""" Determines the bitwise Hamming Distance between two strings. Used to
determine the fitness of a mutating string against the input.
Example:
bin(ord('a')) == '0b1100001'
bin(ord('9')) == '0b0111001'
bin(ord('a') ^ ord('9')) == '0b1011000'
bin(ord('a') ^ ord('9')).count('1') == 3
hammingDistance('a', '9') == 3
hammingDistance('a', '9') * 4 == 12
hammingDistance('aaaa', '9999') == 12
Args:
strA: A string
strB: A string
Returns:
Returns an integer that represents the Hamming Distance from a to b.
Raises:
ValueError: If the two strings are unequal in length or if one input is
not a string.
"""
if (not isinstance(strA, basestring) or not isinstance(strB, basestring)):
raise ValueError('Input is not a string', strA, strB)
if len(strA) != len(strB):
raise ValueError('The two strings are unequal in length', strA, strB)
# base case, hamming distance of nothing and nothing is 0
if (len(strA) == 0) and (len(strB) == 0):
return 0
# XOR both first characters, count the 1s, remaining is recursive case
return (
bin(ord(strA[0]) ^ ord(strB[0])).count('1') +
hammingDistance(strA[1:], strB[1:])
)
|
d417ac22a1abd4c2df0f274d809096f354ac4150
| 3,645,190
|
import regex
def convert(s, syntax=None):
"""Convert a regex regular expression to re syntax.
The first argument is the regular expression, as a string object,
just like it would be passed to regex.compile(). (I.e., pass the
actual string object -- string quotes must already have been
removed and the standard escape processing has already been done,
e.g. by eval().)
The optional second argument is the regex syntax variant to be
used. This is an integer mask as passed to regex.set_syntax();
the flag bits are defined in regex_syntax. When not specified, or
when None is given, the current regex syntax mask (as retrieved by
regex.get_syntax()) is used -- which is 0 by default.
The return value is a regular expression, as a string object that
could be passed to re.compile(). (I.e., no string quotes have
been added -- use quote() below, or repr().)
The conversion is not always guaranteed to be correct. More
syntactical analysis should be performed to detect borderline
cases and decide what to do with them. For example, 'x*?' is not
translated correctly.
"""
table = mastertable.copy()
if syntax is None:
syntax = regex.get_syntax()
if syntax & RE_NO_BK_PARENS:
del table[r'\('], table[r'\)']
del table['('], table[')']
if syntax & RE_NO_BK_VBAR:
del table[r'\|']
del table['|']
if syntax & RE_BK_PLUS_QM:
table['+'] = r'\+'
table['?'] = r'\?'
table[r'\+'] = '+'
table[r'\?'] = '?'
if syntax & RE_NEWLINE_OR:
table['\n'] = '|'
res = ""
i = 0
end = len(s)
while i < end:
c = s[i]
i = i+1
if c == '\\':
c = s[i]
i = i+1
key = '\\' + c
key = table.get(key, key)
res = res + key
else:
c = table.get(c, c)
res = res + c
return res
|
e61a9d555008c9b36b579f6eb1e32e1e9fa0e983
| 3,645,191
|
import select
def current_user(request):
"""Return the list of all the users with their ids.
"""
query = select([
User.id.label('PK_id'),
User.Login.label('fullname')
]).where(User.id == request.authenticated_userid)
print
return dict(DBSession.execute(query).fetchone())
|
6951a6c638886d773a9e92e161d9aa2b166b17b3
| 3,645,192
|
def get_test_examples_labels(dev_example_list, batch_size):
"""
:param dev_example_list: list of filenames containing dev examples
:param batch_size: int
:return: list of nlplingo dev examples, dev labels
"""
dev_chunk_generator = divide_chunks(dev_example_list, NUM_BIG_CHUNKS)
test_examples = []
# dev_chunk_generator yields lists, each of len == NUM_BIG_CHUNKS
for big_chunk in dev_chunk_generator:
chunk_lst = load_big_chunk(big_chunk) # big_chunk is a filepath to .npz
example_lst = []
for chunk in chunk_lst:
example_lst.extend(chunk)
example_generator = divide_chunks(example_lst, batch_size)
for example_chunk in example_generator:
test_examples.extend(example_chunk)
labels = [example.label for example in test_examples]
test_label = np.asarray(labels)
return test_examples, test_label
|
e46a3c1d9780c8b74fcef3311267ad87f5938a66
| 3,645,193
|
import uuid
import tokenize
from operator import getitem
from typing import Iterator
from typing import OrderedDict
def unpack_collections(*args, **kwargs):
"""Extract collections in preparation for compute/persist/etc...
Intended use is to find all collections in a set of (possibly nested)
python objects, do something to them (compute, etc...), then repackage them
in equivalent python objects.
Parameters
----------
*args
Any number of objects. If it is a dask collection, it's extracted and
added to the list of collections returned. By default, python builtin
collections are also traversed to look for dask collections (for more
information see the ``traverse`` keyword).
traverse : bool, optional
If True (default), builtin python collections are traversed looking for
any dask collections they might contain.
Returns
-------
collections : list
A list of all dask collections contained in ``args``
repack : callable
A function to call on the transformed collections to repackage them as
they were in the original ``args``.
"""
traverse = kwargs.pop("traverse", True)
collections = []
repack_dsk = {}
collections_token = uuid.uuid4().hex
def _unpack(expr):
if is_dask_collection(expr):
tok = tokenize(expr)
if tok not in repack_dsk:
repack_dsk[tok] = (getitem, collections_token, len(collections))
collections.append(expr)
return tok
tok = uuid.uuid4().hex
if not traverse:
tsk = quote(expr)
else:
# Treat iterators like lists
typ = list if isinstance(expr, Iterator) else type(expr)
if typ in (list, tuple, set):
tsk = (typ, [_unpack(i) for i in expr])
elif typ in (dict, OrderedDict):
tsk = (typ, [[_unpack(k), _unpack(v)] for k, v in expr.items()])
elif is_dataclass(expr) and not isinstance(expr, type):
tsk = (
apply,
typ,
(),
(
dict,
[
[f.name, _unpack(getattr(expr, f.name))]
for f in fields(expr)
],
),
)
else:
return expr
repack_dsk[tok] = tsk
return tok
out = uuid.uuid4().hex
repack_dsk[out] = (tuple, [_unpack(i) for i in args])
def repack(results):
dsk = repack_dsk.copy()
dsk[collections_token] = quote(results)
return simple_get(dsk, out)
return collections, repack
|
d855a4cea5cb16a5863d670edb4bda6d15e2b371
| 3,645,194
|
def get_usernames(joomlasession):
"""Get list of usernames on the homepage."""
users = joomlasession.query(Jos_Users).all()
return [user.username for user in users]
|
b866cdd8fb47d12b7b79291f3335ded217fa8a1d
| 3,645,195
|
def minor_block_encoder(block, include_transactions=False, extra_info=None):
"""Encode a block as JSON object.
:param block: a :class:`ethereum.block.Block`
:param include_transactions: if true transaction details are included, otherwise
only their hashes
:param extra_info: MinorBlockExtraInfo
:returns: a json encodable dictionary
"""
header = block.header
meta = block.meta
header_info = minor_block_header_encoder(header)
d = {
**header_info,
"hashMerkleRoot": data_encoder(meta.hash_merkle_root),
"hashEvmStateRoot": data_encoder(meta.hash_evm_state_root),
"gasUsed": quantity_encoder(meta.evm_gas_used),
"size": quantity_encoder(len(block.serialize())),
}
if include_transactions:
d["transactions"] = []
for i, _ in enumerate(block.tx_list):
d["transactions"].append(tx_encoder(block, i))
else:
d["transactions"] = [
id_encoder(tx.get_hash(), block.header.branch.get_full_shard_id())
for tx in block.tx_list
]
if extra_info:
_add_posw_info_to_resp(d, header.difficulty, extra_info)
return d
|
53b0eb26e7a8ef0c05f4149c71b09ff6505f85d0
| 3,645,196
|
def heaviside(x):
"""Implementation of the Heaviside step function (https://en.wikipedia.org/wiki/Heaviside_step_function)
Args:
x: Numpy-Array or single Scalar
Returns:
x with step values
"""
if x <= 0:
return 0
else:
return 1
|
5ef05263637501f82cea3befe897cd60ec39994d
| 3,645,197
|
def FallbackReader(fname):
"""Guess the encoding of a file by brute force by trying one
encoding after the next until something succeeds.
@param fname: file path to read from
"""
txt = None
for enc in GetEncodings():
try:
handle = open(fname, 'rb')
reader = codecs.getreader(enc)(handle)
txt = reader.read()
reader.close()
except Exception, msg:
handle.close()
continue
else:
return (enc, txt)
return (None, None)
|
d9f4235df9f472c7584192e920980f5f2668202a
| 3,645,198
|
def graph_3D(data, col="category", list_=[None], game=None, extents=None):
"""
3D t-sne graph data output
:param data: a pandas df generated from app_wrangling.call_boardgame_data()
:param col: string indicating which column (default 'category')
:param list_: list of elements in column (default [None])
:param game: string of board game name (default None)
:param extents: string (default None)
:return fig_out: 3D plotly figure
"""
# layout for the 3D plot:
axis_x = dict(
title="",
showgrid=True,
zeroline=False,
showticklabels=False,
showspikes=False,
range=[extents["min_x"], extents["max_x"]],
)
axis_y = axis_x.copy()
axis_y["range"] = [extents["min_y"], extents["max_y"]]
axis_z = axis_x.copy()
axis_z["range"] = [extents["min_z"], extents["max_z"]]
layout_out = go.Layout(
margin=dict(l=0, r=0, b=0, t=0),
scene=dict(xaxis=axis_x, yaxis=axis_y, zaxis=axis_z),
legend=dict(yanchor="top", y=0.93, xanchor="right", x=0.99),
paper_bgcolor="rgba(0,0,0,0)",
plot_bgcolor="rgba(0,0,0,0)",
)
# plotting data:
if (list_ == [None]) or (not list_):
set_data = data.copy(deep=True)
set_data["group"] = "none"
else:
set_data = app_wr.call_boardgame_radio(data, col, list_).explode("group")
data_out = []
# corresponds with dark2 palette:
# had trouble manually setting color palette for graph_object:
color_list = [
"#1b9e77",
"#d95f02",
"#7570b3",
"#e7298a",
"#66a61e",
"#e6ab02",
"#a6761d",
"#666666",
]
i = 0
for idx, val in set_data.groupby(set_data.group):
if idx == "none":
marker_style = dict(
size=val["average_rating"] * 1.6,
symbol="circle",
opacity=0.1,
color="grey",
)
legend_show = False
else:
marker_style = dict(
size=val["average_rating"] * 1.6,
symbol="circle",
opacity=0.4,
color=color_list[i],
)
legend_show = True
i += 1
scatter = go.Scatter3d(
name=idx,
x=val["x"],
y=val["y"],
z=val["z"],
mode="markers",
marker=marker_style,
text=val["name"],
hoverinfo="text+name",
showlegend=legend_show,
)
data_out.append(scatter)
if game:
game_data = data[data["name"] == game]
marker_style = dict(
size=game_data["average_rating"] * 1.6,
symbol="circle",
opacity=1.0,
color="purple",
)
scatter = go.Scatter3d(
name=game,
x=game_data["x"],
y=game_data["y"],
z=game_data["z"],
mode="markers",
marker=marker_style,
text=game_data["name"],
hoverinfo="text",
)
data_out.append(scatter)
fig_out = {"data": data_out, "layout": layout_out}
return fig_out
|
17ad05f2c7cc3413c145009fb72aed366ec9ab49
| 3,645,199
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.