content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
def split_person_name(name):
"""
A helper function. Split a person name into a first name and a last name.
Example.
>>> split_person_name("Filip Oliver Klimoszek")
("Filip Oliver", "Klimoszek")
>>> split_person_name("Klimoszek")
("", "Klimoszek")
"""
parts = name.split(" ")
return " ".join(parts[:-1]), parts[-1] | 86b7c7cec1e7772437f41f11437834cfa34051c7 | 3,631,300 |
def readFile(file):
"""Reads file and returns lines from file.
Args:
string: file name
Returns:
list: lines from file
"""
fin = open(file)
lines = fin.readlines()
fin.close()
return lines | 52c62e6c97caad053cd6619935d8d3674cc3b8cb | 3,631,301 |
def vec_rotate_left(x):
"""Circular left shift the contents of the vector
Args:
x (jax.numpy.ndarray): A line vector.
Returns:
jax.numpy.ndarray: Left rotated x.
"""
return jnp.roll(x, -1) | 58bc95e73ba45829c588c07682f1b04f9f2b6f30 | 3,631,302 |
def play_game(my_play='PUT',game_id='PUT'):
"""Submit a play to a game. Return results of the game."""
# If no my_play then something is wrong
# If no game_id and no match_id then create a new game and play it
# If game_id then play that game
# If match_id and no game_id then .. idk .. tea?
# Put together all the values we need and load the game
with rpsGame( play1=my_play ) as thisGame:
result = thisGame.toJSON()
return result | 50bd11a8f89f68ca981eecc96c6c225cd84fd1c7 | 3,631,303 |
def get_inputs(seq_len):
"""Get input layers.
See: https://arxiv.org/pdf/1810.04805.pdf
:param seq_len: Length of the sequence or None.
"""
names = ['Token', 'Segment', 'Masked']
return [keras.layers.Input(
batch_shape=(1, seq_len,),
name='Input-%s' % name,
) for name in names] | 585d6bae73b7b9f4fc6e9dcafd85ee0e4be88b44 | 3,631,304 |
import base64
def get_credentials(args):
"""Read credentials from args"""
# cmdline credentials override those stored in config file
if args.api_key or args.api_secret:
if not args.api_key or not args.api_secret:
raise AuthError(
(
"Both --key and --secret must be provided "
"when passing auth tokens from commandline."
)
)
return args.api_key, args.api_secret
key, secret = read_from_file()
if not key:
raise AuthError(
(
"Credentials not configured. "
"Try `tiktalik init-auth`, or use --key and --secret."
)
)
return key, base64.b64decode(secret) | f4f69b20e5ea2fe58a8ee22920c7faeea8738aae | 3,631,305 |
import logging
def GetRange(spreadsheet_id, sheet_name, range_in_sheet):
"""Gets the given range in the given spreadsheet.
Args:
spreadsheet_id: The id from Google Sheets, like
https://docs.google.com/spreadsheets/d/<THIS PART>/
sheet_name: The name of the sheet to get, from the bottom tab.
range_in_sheet: The range, such as "A1:F14"
"""
credentials = GoogleCredentials.get_application_default()
service = discovery.build(
'sheets',
'v4',
credentials=credentials,
discoveryServiceUrl=DISCOVERY_URL)
sheet_range = '%s!%s' % (sheet_name, range_in_sheet)
result = list(service.spreadsheets().values()).get(
spreadsheetId=spreadsheet_id, range=sheet_range).execute()
values = result.get('values', [])
if not values:
# Error reporting is not spectacular. Looks like values will just be None.
# But they could be None if there wasn't any data, either. So log it
# and still return the None value.
logging.error('Could not get values for %s of %s', sheet_range, sheet_name)
return values | 08eaa5f761679622b6561c7ed56d41ef725c940e | 3,631,306 |
def format_price(raw_price):
"""Formats the price to account for bestbuy's raw price format
Args:
raw_price(string): Bestbuy's price format (ex: $5999 is $59.99)
Returns:
string: The formatted price
"""
formatted_price = raw_price[:len(raw_price) - 2] + "." + raw_price[len(raw_price) - 2:]
return formatted_price | a3b0adc94421334c3f1c4fe947329d329e68990e | 3,631,307 |
import os
def get_directory(directory=None):
"""Get directory to work with."""
# Set variable fdir = current directory, if user didn't specify another dir
if not directory:
fdir = os.getcwd()
# Set variable fdir = directory chosen by the user, if a dir is specified
else:
fdir = os.path.realpath(os.path.expanduser(directory))
# Make sure that the directory exists. Otherwise, print error and exit
if not os.path.isdir(fdir):
raise ValueError("Directory doesn't exist. Check --directory.")
return fdir | 9b83f5502cea6ff908b7528c2ae480d9072ccd79 | 3,631,308 |
def access_app(app_label, *permissions):
"""
Returns a scope that represents access for the given
permissions to the given app.
"""
return _make_grant(
(
app_label,
),
permissions,
) | ef9ad2827cd38d70760618c7f3af8ab645a3b22f | 3,631,309 |
def mock_device_with_capabilities(monkeypatch):
"""A function to create a mock device with non-empty observables"""
with monkeypatch.context() as m:
m.setattr(Device, '__abstractmethods__', frozenset())
m.setattr(Device, '_capabilities', mock_device_capabilities)
def get_device(wires=1):
return Device(wires=wires)
yield get_device | d21e319645ce4ae59db1cea9c38c1e5bc8bf967f | 3,631,310 |
def replicated_data(index):
"""Whether data[index] is a replicated data item"""
return index % 2 == 0 | 26223e305d94be6e092980c0eb578e138cfa2840 | 3,631,311 |
def create_source_list(uris_list):
"""
Create a source_list object
Adds list of uris to soure_list object.
@arg uris_list List of list of GCS uris
@returns A source_list object
@example
uris_list = [["gs://my-bucket/my-image-1.tif"], ["gs://my-bucket/my-image-2.tif"]]
print(create_source_list(uris_list).serialize())
"""
return(manifest.Sources(
[{"uris": manifest.Uris([tmp1])} for tmp1 in uris_list]
)) | cd5ca853d1c388d5be0c5591a64eff4120af5b3e | 3,631,312 |
def sigmoid_rampup(current, rampup_length):
""" Exponential rampup from https://arxiv.org/abs/1610.02242 .
"""
if rampup_length == 0:
return 1.0
else:
current = np.clip(current, 0.0, rampup_length)
phase = 1.0 - current / rampup_length
return float(np.exp(-5.0 * phase * phase)) | a003cb14073b14789e221197d42f818ee29b863b | 3,631,313 |
def photos_restaurants():
"""returns photos"""
return render_template('photos.html') | f0404e9cd0cd97018f64f415612180828afaef1d | 3,631,314 |
def encode(obj, outtype='json', raise_error=False):
""" encode objects, via encoder plugins, to new types
Parameters
----------
outtype: str
use encoder method to_<outtype> to encode
raise_error : bool
if True, raise ValueError if no suitable plugin found
Examples
--------
>>> load_builtin_plugins('encoders')
[]
>>> from decimal import Decimal
>>> encode(Decimal('1.3425345'))
{'_python_Decimal_': '1.3425345'}
>>> encode(Decimal('1.3425345'),outtype='str')
'1.3425345'
>>> encode(set([1,2,3,4,4]))
{'_python_set_': [1, 2, 3, 4]}
>>> encode(set([1,2,3,4,4]),outtype='str')
'{1, 2, 3, 4}'
>>> unload_all_plugins()
"""
for encoder in get_plugins('encoders').values():
if (isinstance(obj, encoder.objclass)
and hasattr(encoder, 'to_{}'.format(outtype))):
return getattr(encoder, 'to_{}'.format(outtype))(obj)
break
if raise_error:
raise ValueError(
"No JSON serializer is available for"
"{0} (of type {1})".format(obj, type(obj)))
else:
return obj | 7408fc616c7b1c99a47a33b55bf9c0cabcd1cf70 | 3,631,315 |
def minorify_scale(scale):
"""Turns a major scale into a minor scale"""
return rotate(scale, 5) | a0f2e5d7307095eb6c6b426a32c7958082d1eff9 | 3,631,316 |
def _get_dtype_maps():
""" Get dictionaries to map numpy data types to ITK types and the
other way around.
"""
# Define pairs
tmp = [ (np.float32, 'MET_FLOAT'), (np.float64, 'MET_DOUBLE'),
(np.uint8, 'MET_UCHAR'), (np.int8, 'MET_CHAR'),
(np.uint16, 'MET_USHORT'), (np.int16, 'MET_SHORT'),
(np.uint32, 'MET_UINT'), (np.int32, 'MET_INT'),
(np.uint64, 'MET_ULONG'), (np.int64, 'MET_LONG') ]
# Create dictionaries
map1, map2 = {}, {}
for np_type, itk_type in tmp:
map1[np_type.__name__] = itk_type
map2[itk_type] = np_type.__name__
# Done
return map1, map2 | a5816325737a97054764b0b266353053cf83b025 | 3,631,317 |
def get_user_roles_common(user):
"""Return the users role as saved in the db."""
return user.role | cf25f029325e545f5d7685e6ac19e0e09105d65a | 3,631,318 |
def getlist(self, option: str, fallback: list=None, *, raw: bool=False, vars: dict=None) -> list:
"""
Converts a SectionProxy cvs option to a list
:param option: the option to get
:param fallback: default value, if option does not exist
:param raw: True to disable interpolation
:param vars: additional substitutions
:return: a list corresponding to the option
"""
# pylint: disable=protected-access
return self._parser.getlist(self._name, option, raw=raw, vars=vars, fallback=fallback) | b451c48bfaa0dc1cf51f6ffd52866a9d5c1ad761 | 3,631,319 |
def _schedule_spatial_pack(cfg, s, output, conv, data_vec, kernel_vec):
"""schedule the spatial packing for conv2d"""
data = s[data_vec].op.input_tensors[0]
max_unroll = 16
vec_size = [1, 2, 4, 8, 16]
# get tunable parameters (they are defined in compute)
BC, TC, VC = cfg["tile_co"].size
BH, TH, VH = cfg["tile_oh"].size
BW, TW, VW = cfg["tile_ow"].size
# schedule padding
if isinstance(data.op, tvm.te.ComputeOp) and "pad" in data.op.tag:
data_pad = data
s[data_pad].compute_inline()
# schedule data packing
if isinstance(data_vec.op, tvm.te.ComputeOp) and data_vec.op.name == "data_vec_undilated":
_, h, w, ci, _, _, vh, vw = s[data_vec].op.axis
else:
_, h, w, ci, vh, vw = s[data_vec].op.axis
tile_and_bind3d(s, data_vec, h, w, ci, 1)
if vh.dom.extent.value < max_unroll:
s[data_vec].unroll(vh)
if vw.dom.extent.value < max_unroll:
s[data_vec].unroll(vw)
if isinstance(kernel_vec.op, tvm.te.ComputeOp) and kernel_vec.name == "kernel_vec":
if not autotvm.GLOBAL_SCOPE.in_tuning:
max_threads = tvm.target.Target.current(allow_none=False).max_num_threads
co, ci, kh, kw, vc = s[kernel_vec].op.axis
fused = s[kernel_vec].fuse(co, ci, kh, kw, vc)
fused, vec = s[kernel_vec].split(fused, VC)
bb, tt = s[kernel_vec].split(fused, max_threads)
s[kernel_vec].bind(bb, te.thread_axis("blockIdx.x"))
s[kernel_vec].bind(tt, te.thread_axis("threadIdx.x"))
if VC in vec_size:
s[kernel_vec].vectorize(vec)
# schedule convolution
n, c, h, w, vh, vw, vc = s[conv].op.axis
kc, kh, kw = s[conv].op.reduce_axis
cfg["reorder_0"].apply(s, conv, [n, c, h, w, kc, kh, kw, vh, vw, vc])
tile_and_bind3d(s, conv, c, h, w, TC, TH, TW)
cfg["ann_reduce"].apply(
s,
conv,
[kh, kw],
axis_lens=[get_const_int(kernel_vec.shape[2]), get_const_int(kernel_vec.shape[3])],
max_unroll=max_unroll,
)
cfg["ann_spatial"].apply(
s,
conv,
[vh, vw, vc],
axis_lens=[VH, VW, VC],
max_unroll=max_unroll,
vec_size=vec_size,
cfg=cfg,
)
# schedule output
if output.op not in s.outputs: # has bias
s[output].compute_inline()
output = s.outputs[0]
_, co, oh, ow = s[output].op.axis
tile_and_bind3d(s, output, co, oh, ow, TC, TH, TW)
return s | 91f3cb0b442b1b35fbdcb532b0d229b05a56b09f | 3,631,320 |
from .spectral import TemplateSpectralModel
from .spatial import ConstantSpatialModel
def create_fermi_isotropic_diffuse_model(filename, **kwargs):
"""Read Fermi isotropic diffuse model.
See `LAT Background models <https://fermi.gsfc.nasa.gov/ssc/data/access/lat/BackgroundModels.html>`_
Parameters
----------
filename : str
filename
kwargs : dict
Keyword arguments forwarded to `TemplateSpectralModel`
Returns
-------
diffuse_model : `SkyModel`
Fermi isotropic diffuse sky model.
"""
vals = np.loadtxt(make_path(filename))
energy = u.Quantity(vals[:, 0], "MeV", copy=False)
values = u.Quantity(vals[:, 1], "MeV-1 s-1 cm-2", copy=False)
spatial_model = ConstantSpatialModel()
spectral_model = TemplateSpectralModel(energy=energy, values=values, **kwargs)
return SkyModel(
spatial_model=spatial_model,
spectral_model=spectral_model,
name="fermi-diffuse-iso",
) | 6233fa84e2234722587c17289522e61e8fcc453b | 3,631,321 |
def get_auto_sync(admin_id):
"""Method to return status of the auto synchronization statement.
Args:
admin_id (str): Root privileges flag.
"""
return r_synchronizer.is_sync_auto() | 897a30c35eb115e359dae844a81155bfa3b93b12 | 3,631,322 |
def create_inchi_groups(ctfile):
"""Organize `InChI` into groups based on their identical `InChI` string and similar coupling type.
:param ctfile: `SDfile` instance.
:type ctfile: :class:`~ctfile.ctfile.SDfile`
:return: Dictionary of related `InChI` groups.
:rtype: :rtype: :py:class:`dict`
"""
inchi_groups = defaultdict(list)
for entry_id, entry in ctfile.items():
inchi_str = entry["data"]["InChI"][0]
descr = coupling_descr(coupling_types=entry["data"]["CouplingType"])
inchi_groups[(inchi_str, descr)].append(entry_id)
return inchi_groups | 819239eac4298be1de63510faf67e282ee20bb91 | 3,631,323 |
def qmax_statistics(sample_q, sample, uncertainty, qmax, qmin, qmaxinst,
relative_max_uncertainty,):
"""
"""
#TODO: Include background statistics in this?
if qmax is 'statistics':
old_settings = np.seterr(divide='ignore')
uncertainty_percent_array = uncertainty / sample * 100
np.seterr(**old_settings)
uncertainty_percent_array[np.isnan(uncertainty_percent_array)] = 0
uncertainty_percent_array[np.isinf(uncertainty_percent_array)] = 0
qmin_index = np.where(sample_q == qmin)[0]
uncertainty_first_peak = signal.argrelmin(uncertainty)[0][0]
if qmin_index > uncertainty_first_peak:
starting_q = qmin_index
else:
starting_q = uncertainty_first_peak
if qmaxinst is 0:
qmaxinst = float(np.amax(sample_q))
qmaxinst_index = np.where(sample_q == qmaxinst)[0]
if uncertainty_percent_array[qmaxinst_index] > 100:
qmaxinst_index = np.where(uncertainty_percent_array[starting_q:]
== 100)
print qmaxinst_index
# find the first position where the uncertainty is above the threshold
try:
uncertainty_max = np.max(uncertainty_percent_array[
starting_q:qmaxinst_index])
for arr_index, value in enumerate(uncertainty_percent_array[
starting_q:qmaxinst_index]):
relative_uncertainty = value / uncertainty_max
if relative_uncertainty >= relative_max_uncertainty:
qmax_index = arr_index + starting_q
break
except StopIteration:
print 'qmax_error'
print relative_max_uncertainty
print sample_q
qmax = float(sample_q[qmax_index])
print 'q=', qmax
return qmax | 99c905d15b5fa651c5c0d893835881d0e2d7dadc | 3,631,324 |
def loocvRF(data, idcolumn, outcomevar, dropcols=[], numestimators=1000, fs=0.02):
"""
Main loocv RF function that calls other functions to do RF feature selection, training, and testing.
Args:
data (pandas DataFrame): This is a dataframe containing each participant's features and outcome variables
idcolumn (string): This is the column name of your column containing your participant number or ID (case sensitive)
outcomevar (string): This is the column name of your outcome variable (case sensitive)
dropcols (list): This is a list containing strings of each column you wish to drop in your dataframe. Default is empty list [].
numestimators (integer): The number of trees you want built in your RF. Default=1000.
fs (float): The cutoff importance for feature selection. Anything below this importance will be removed for the RF training.
Returns:
errors (list): This is a list with the absolute error between the predicted value and actual value for each fold.
meanrmse (float): This is the mean root mean squared error (RMSE) over all of the folds
stdrmse (float): This is the standard deviation of the root mean squared error (RMSE) over all of the folds
meanrmse (float): This is the mean mean average percent error (MAPE) over all of the folds
meanrmse (float): This is the standard deviation of the mean average percent error (MAPE) over all of the folds
importances(pandas DataFrame): This is a pandas DataFrame with 3 columns: value (feature), importances (importance of the feature), and id (fold over which this feature importance was derived)
"""
# Make list of all ID's in idcolumn
IDlist = list(data[idcolumn])
drop = [idcolumn]
drop = drop + dropcols
# Initialize empty lists and dataframe
errors = []
rmse = []
mape = []
importances = pd.DataFrame(columns=['value', 'importances', 'id'])
# Run LOOCV Random Forest!
for i in IDlist:
er, rm, ma, imp= RFLOOCV(data, i, outcomevar, drop, idcolumn, numestimators, fs)
errors.append(er)
rmse.append(rm)
mape.append(ma)
importances = importances.append(imp)
idt = str(i)
print('...' + idt + ' processing complete.')
# Compute mean and std RMSE, MAPE
meanrmse = np.mean(rmse)
stdrmse = np.std(rmse)
meanmape = np.mean(mape)
stdmape = np.std(mape)
# Print RMSE, MAPE
print('Mean RMSE:' + str(meanrmse))
print('Std RMSE:' + str(stdrmse))
print('Mean MAPE:' + str(meanmape))
print('Std MAPE:' + str(stdmape))
return errors, meanrmse, stdrmse, meanmape, stdmape, importances | 3681923d34334e16490586e143cb29dd9b426461 | 3,631,325 |
from typing import Union
def sqla_session(x: Union['db_url', 'engine']):
"""
Do a pile of sane defaults to get a sqla session. Example usage:
db = sqla_session(...)
df = pd.read_sql(sql=..., con=db.bind)
"""
# Resolve args
if isinstance(x, str):
db_url = x
if '/' not in db_url:
db_url = f'postgres://localhost/{db_url}'
eng = sqla.create_engine(
db_url,
convert_unicode=True,
)
elif isinstance(x, sqla.engine.base.Engine):
eng = x
else:
raise ValueError(f"Expected db_url or engine, got: {x!r}")
# Make session
return sqlo.scoped_session(
sqlo.sessionmaker(
autocommit=True,
bind=eng,
),
) | 4a7b6fa07d360a884d60982083a4a70a7699dd1c | 3,631,326 |
def box_iou(box1, box2, order='xyxy'):
"""Compute the intersection over union of two set of boxes.
The default box order is (xmin, ymin, xmax, ymax).
Args:
box1: (tf.tensor) bounding boxes, sized [A, 4].
box2: (tf.tensor) bounding boxes, sized [B, 4].
order: (str) box order, either 'xyxy' or 'xywh'.
Return:
(tensor) iou, sized [A, B].
Reference:
https://github.com/chainer/chainercv/blob/master/chainercv/utils/bbox/bbox_iou.py
"""
if order == 'xywh':
box1, box2 = [change_box_order(i, 'xywh2xyxy') for i in [box1, box2]]
# A: #box1, B: #box2
lt = tf.maximum(box1[:, None, :2], box2[:, :2]) # [A, B, 2], coordinates left-top
rb = tf.minimum(box1[:, None, 2:], box2[:, 2:]) # [A, B, 2], coordinates right-bottom
wh = tf.clip_by_value(rb - lt, # [A, B, 2], only clip the minimum
clip_value_min=0, clip_value_max=tf.float32.max)
inter = wh[:, :, 0] * wh[:, :, 1] # [A, B]
area1 = (box1[:, 2] - box1[:, 0]) * (box1[:, 3] - box1[:, 1]) # [A,]
area2 = (box2[:, 2] - box2[:, 0]) * (box2[:, 3] - box2[:, 1]) # [B,]
iou = inter / (area1[:, None] + area2 - inter)
return iou | 03b40728a52cc4b825e2e9473ce5ada6dfa07d2e | 3,631,327 |
def get_bmi_category(df):
"""
This function adds the BMI category and Health risk based on the BMI value
:param df: input dataframe with BMI values
:return: Dataframe with BMI category and Health risk derived from their respective BMI values
"""
return df.withColumn('BMI Category', F.when(df.BMI <= 18.4, "Underweight")
.when((df.BMI >= 18.5) & (df.BMI <= 24.9), "Normal weight")
.when((df.BMI >= 25) & (df.BMI <= 29.9), "Overweight")
.when((df.BMI >= 30) & (df.BMI <= 34.9), "Moderately obese")
.when((df.BMI >= 35) & (df.BMI <= 39.9), "Severely obese")
.when((df.BMI >= 40), "Very severely obese")
.otherwise('Undefined'))\
.withColumn('Health risk', F.when(df.BMI <= 18.4, "Malnutrition risk")
.when((df.BMI >= 18.5) & (df.BMI <= 24.9), "Low risk")
.when((df.BMI >= 25) & (df.BMI <= 29.9), "Enhanced risk")
.when((df.BMI >= 30) & (df.BMI <= 34.9), "Medium risk")
.when((df.BMI >= 35) & (df.BMI <= 39.9), "High risk")
.when((df.BMI >= 40), "Very high risk")
.otherwise('Undefined')) | 85574ae7e1b9887b86aa06881e6950d30e2e2aea | 3,631,328 |
import os
import shutil
def setup(projectdir='.', resourcedir='mm'):
"""Initialise a default modelmanager project in the current directory."""
resourcedir = osp.join(projectdir, resourcedir)
settings_path = osp.join(resourcedir, SettingsManager.settings_file_name)
print('Initialising a new modelmanager project in: %s\n' % projectdir +
'with settings file in: %s' % settings_path)
# create projectdir if not existing
if not osp.exists(projectdir):
os.mkdir(projectdir)
# create resource dir if it does not exist, raise error otherwise
ermg = ('The modelmanager resource directory seems to exist already:\n' +
resourcedir)
assert not osp.exists(resourcedir), ermg
default_resources = osp.join(osp.dirname(__file__), 'resources')
shutil.copytree(default_resources, resourcedir)
# load project and update/create database
pro = Project(projectdir)
return pro | 284317ed6a9c177a4489847220f4c40a4b4b4dff | 3,631,329 |
def create_mm_sim(molecule):
"""Create vacuum simulation system"""
platform = Platform.getPlatformByName('CPU')
properties={}
properties["Threads"]="2"
integrator = LangevinIntegrator(temperature, collision_rate, stepsize)
topology = molecule.to_topology()
system = forcefield.create_openmm_system(topology)
sim = Simulation(topology, system, integrator, platform=platform, platformProperties=properties)
molecule.generate_conformers()
sim.context.setPositions(molecule.conformers[0])
sim.minimizeEnergy()
sim.context.setVelocitiesToTemperature(temperature)
return sim | 7023320e9344bf9601844917692f36650ee57376 | 3,631,330 |
def team_event_awards(team_key: TeamKey, event_key: EventKey) -> Response:
"""
Returns a list of awards for a team at an event.
"""
track_call_after_response("team/event/awards", f"{team_key}/{event_key}")
awards = TeamEventAwardsQuery(team_key=team_key, event_key=event_key).fetch_dict(
ApiMajorVersion.API_V3
)
return profiled_jsonify(awards) | 942f6576bec422a84d9d1ad220b38a050ff4b466 | 3,631,331 |
def version_match(ms_version, mi_version):
"""Judge if the version of Mindinsight and Mindspore is matched."""
if not ms_version:
ms_version = MS_VERSION
# the debugger version in MS 1.4.xxx is still 1.3.xxx
if mi_version.startswith('1.4.') and ms_version.startswith('1.3.'):
return True
mi_major, mi_minor = mi_version.split('.')[:2]
ms_major, ms_minor = ms_version.split('.')[:2]
return mi_major == ms_major and mi_minor == ms_minor | 68d74482eb693794cedcf3e89063f97cab85110a | 3,631,332 |
def add_to_tfrecord(coco, img_id, img_dir, coder, writer, is_train):
"""
Add each "single person" in this image.
coco - coco API
Returns:
The number of people added.
"""
# Get annotation id for this guy
# Cat ids is [1] for human..
ann_id = coco.getAnnIds(imgIds=img_id, catIds=[1], iscrowd=False)
anns = coco.loadAnns(ann_id)
# coco.showAnns(anns)
filtered_anns, kps, bboxes, centers, masks = get_anns_details(
anns, coco, min_vis=6, min_max_height=60)
# Figure out the scale and pack each one in a tuple
people = parse_people(kps, centers, masks)
if len(people) == 0:
# print('No single persons in img %d' % img_id)
return 0
# Add each people to tf record
img_data = coco.loadImgs(img_id)[0]
image_path = join(img_dir, img_data['file_name'])
with tf.gfile.FastGFile(image_path, 'rb') as f:
image_data = f.read()
image = coder.decode_jpeg(image_data)
for joints, scale, pos, mask in people:
# Scale image:
image_scaled, scale_factors = resize_img(image, scale)
height, width = image_scaled.shape[:2]
joints_scaled = np.copy(joints)
joints_scaled[0, :] *= scale_factors[0]
joints_scaled[1, :] *= scale_factors[1]
# center = pos * scale_factors
visible = joints[2, :].astype(bool)
min_pt = np.min(joints_scaled[:2, visible], axis=1)
max_pt = np.max(joints_scaled[:2, visible], axis=1)
center = (min_pt + max_pt) / 2.
## Crop 400x400 around this image..
margin = 200
start_pt = np.maximum(center - margin, 0).astype(int)
end_pt = (center + margin).astype(int)
end_pt[0] = min(end_pt[0], width)
end_pt[1] = min(end_pt[1], height)
image_scaled = image_scaled[start_pt[1]:end_pt[1], start_pt[0]:end_pt[
0], :]
# Update others oo.
joints_scaled[0, :] -= start_pt[0]
joints_scaled[1, :] -= start_pt[1]
center -= start_pt
height, width = image_scaled.shape[:2]
# Vis:
"""
import matplotlib.pyplot as plt
plt.ion()
plt.clf()
fig = plt.figure(1)
ax = fig.add_subplot(121)
image_with_skel = draw_skeleton(image, joints[:2, :], vis=visible, radius=(np.mean(image.shape[:2]) * 0.01).astype(int))
ax.imshow(image_with_skel)
ax.axis('off')
# ax.imshow(image)
# ax.scatter(joints[0, visible], joints[1, visible])
# ax.scatter(joints[0, ~visible], joints[1, ~visible], color='green')
ax.scatter(pos[0], pos[1], color='red')
ax = fig.add_subplot(122)
image_with_skel_scaled = draw_skeleton(image_scaled, joints_scaled[:2, :], vis=visible, radius=max(4, (np.mean(image_scaled.shape[:2]) * 0.01).astype(int)))
ax.imshow(image_with_skel_scaled)
ax.scatter(center[0], center[1], color='red')
# ax.imshow(image_scaled)
# ax.scatter(joints_scaled[0, visible], joints_scaled[1, visible])
# ax.scatter(pos_scaled[0], pos_scaled[1], color='red')
ax.axis('on')
plt.draw()
plt.pause(0.01)
"""
# Encode image:
image_data_scaled = coder.encode_jpeg(image_scaled)
example = convert_to_example(image_data_scaled, image_path, height,
width, joints_scaled, center)
writer.write(example.SerializeToString())
# Finally return how many were written.
return len(people) | 2bed504df8c65633b69ad520596cf65bced29d12 | 3,631,333 |
def partial_es(Y_idx, X_idx, pred, data_in, epsilon=0.0001):
"""
The analysis on the single-variable dependency in the neural network.
The exact partial-related calculation may be highly time consuming, and so the estimated calculation can be used in the bad case.
Args:
Y_idx: index of Y to access the target variable of interest
X_idx: index of X to access the independent variable of a neural network
data_in: the specified data of input layer
pred: the specified predictive model
Returns:
The first-order derivative of Y on X for the specified X index and Y index
"""
eps = epsilon
y1 = pred(data_in)
data_in[X_idx] += eps
y2 = pred(data_in)
return (y2[Y_idx] - y1[Y_idx]) / eps | 12186469b27bebea4735372e2b45f463bbfbaff1 | 3,631,334 |
def process_source_text(
source_text: str,
endpoint_config: submanager.models.config.FullEndpointConfig,
) -> str:
"""Perform text processing operations on the source text."""
source_text = submanager.sync.utils.replace_patterns(
source_text,
endpoint_config.replace_patterns,
)
source_text = submanager.sync.utils.truncate_lines(
source_text,
endpoint_config.truncate_lines,
)
return source_text | 318a66717008a57c0a975359092a925d55dfe44a | 3,631,335 |
def angle(v1, v2, deg=False):
"""
Angle between two N dimmensional vectors.
:param v1: vector 1.
:param v2: vector 2.
:param deg: if True angle is in Degrees, else radians.
:return: angle in radians.
Example::
>>> angle_between((1, 0, 0), (0, 1, 0))
1.5707963267948966
>>> angle_between((1, 0, 0), (1, 0, 0))
0.0
>>> angle_between((1, 0, 0), (-1, 0, 0))
3.141592653589793
.. note:: obtained from http://stackoverflow.com/a/13849249/5288758
and tested in http://onlinemschool.com/math/assistance/vector/angl/
"""
# http://stackoverflow.com/a/13849249/5288758
# test against http://onlinemschool.com/math/assistance/vector/angl/
v1_u = unit_vector(v1)
v2_u = unit_vector(v2)
a = np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0))
if deg:
return np.rad2deg(a) # *180.0/np.pi
return a | 0d85cc76085468401fcb805d0df5a83862791d49 | 3,631,336 |
def random_pure_actions(nums_actions, random_state=None):
"""
Return a tuple of random pure actions (integers).
Parameters
----------
nums_actions : tuple(int)
Tuple of the numbers of actions, one for each player.
random_state : int or np.random.RandomState, optional
Random seed (integer) or np.random.RandomState instance to set
the initial state of the random number generator for
reproducibility. If None, a randomly initialized RandomState is
used.
Returns
-------
action_profile : Tuple(int)
Tuple of actions, one for each player.
"""
random_state = check_random_state(random_state)
action_profile = tuple(
[random_state.randint(num_actions) for num_actions in nums_actions]
)
return action_profile | 300c77583d60e0fa5d5be240ae1beb8c4555db22 | 3,631,337 |
import os
def get_my_process():
"""get process object of current process
Returns:
[psutil.Process] -- process object
"""
return get_process_object(os.getpid()) | 0c519d1d2b2d19c81413f80f7bc070824890c9ff | 3,631,338 |
import pandas
import numpy
def prepare_and_store_dataframe(test_df: pandas.DataFrame, current_datetime: str, prediction: numpy.ndarray,
eval_identity: str, df_output_dir: str):
"""Prepares a dataframe that includes the testing data (timestamp, value), the detected anomalies and the labeled
anomalies from the dataset and stores this as a .pkl file on the disk
:param test_df: Dataframe that includes the used testing data
:param current_datetime: Current datetime as string to be included in filename
:param prediction: The predicted anomalies as numpy array
:param eval_identity: The evaluation identity, consists of the dataset and the used algorithm, is used in filename
:param df_output_dir The output directory for resulting pickled dataframe
"""
df = test_df.copy(deep=True)
df["prediction"] = prediction
df["timestamp"] = df.index
"""Pickle Dataframe and store to file"""
df.to_pickle(df_output_dir + eval_identity + "-" + current_datetime + ".pkl")
return df | a2aa5d9ffb9ec495abb96ddebc820dd351392b1a | 3,631,339 |
def read_terrace_centrelines(DataDirectory, shapefile_name):
"""
This function reads in a shapefile of terrace centrelines
using shapely and fiona
Args:
DataDirectory (str): the data directory
shapefile_name (str): the name of the shapefile
Returns: shapely polygons with terraces
Author: FJC
"""
Lines = {}
with fiona.open(DataDirectory+shapefile_name, 'r') as input:
for f in input:
this_line = LineString(shape(f['geometry']))
this_id = f['properties']['id']
Lines[this_id] = this_line
return Lines | bd527dc9c890f3e0efd7076803ce1288f2643062 | 3,631,340 |
def isfloat(s):
"""
Checks whether the string ``s`` represents a float.
:param s: the candidate string to test
:type s: ``str``
:return: True if s is the string representation of a number
:rtype: ``bool``
"""
try:
x = float(s)
return True
except:
return False | 2233d0a06b9ff0be74f76ef2fce31c816f68584c | 3,631,341 |
import aiohttp
async def _fetch_team_info(team_id=None):
"""Get general team information"""
url = f"{BASE_URL}teams/{team_id}"
async with aiohttp.ClientSession() as session:
data = await _fetch_data(session, url)
team_info = data['teams'][0]
return team_info | b846b71cf65d2179c59f108c3c0e27ff4eb25149 | 3,631,342 |
def numeric(typ):
"""Check whether `typ` is a numeric type"""
return typ.tcon in (Bool, Int, Float, Complex) | b03a2042072a084e8482e5782ffc074512bf52e2 | 3,631,343 |
def quadtree_point_in_polygon(
poly_quad_pairs,
quadtree,
point_indices,
points_x,
points_y,
poly_offsets,
ring_offsets,
poly_points_x,
poly_points_y,
):
""" Test whether the specified points are inside any of the specified
polygons.
Uses the table of (polygon, quadrant) pairs returned by
``cuspatial.join_quadtree_and_bounding_boxes`` to ensure only the points
in the same quadrant as each polygon are tested for intersection.
This pre-filtering can dramatically reduce number of points tested per
polygon, enabling faster intersection-testing at the expense of extra
memory allocated to store the quadtree and sorted point_indices.
Parameters
----------
poly_quad_pairs: cudf.DataFrame
Table of (polygon, quadrant) index pairs returned by
``cuspatial.join_quadtree_and_bounding_boxes``.
quadtree : cudf.DataFrame
A complete quadtree for a given area-of-interest bounding box.
point_indices : cudf.Series
Sorted point indices returned by ``cuspatial.quadtree_on_points``
points_x : cudf.Series
x-coordinates of points used to construct the quadtree.
points_y : cudf.Series
y-coordinates of points used to construct the quadtree.
poly_offsets : cudf.Series
Begin index of the first ring in each polygon.
ring_offsets : cudf.Series
Begin index of the first point in each ring.
poly_points_x : cudf.Series
Polygon point x-coodinates.
poly_points_y : cudf.Series
Polygon point y-coodinates.
Returns
-------
result : cudf.DataFrame
Indices for each intersecting point and polygon pair.
point_offset : cudf.Series
Indices of each point that intersects with a polygon.
polygon_offset : cudf.Series
Indices of each polygon with which a point intersected.
"""
(
points_x,
points_y,
poly_points_x,
poly_points_y,
) = normalize_point_columns(
as_column(points_x),
as_column(points_y),
as_column(poly_points_x),
as_column(poly_points_y),
)
return DataFrame._from_table(
spatial_join.quadtree_point_in_polygon(
poly_quad_pairs,
quadtree,
as_column(point_indices, dtype="uint32"),
points_x,
points_y,
as_column(poly_offsets, dtype="uint32"),
as_column(ring_offsets, dtype="uint32"),
poly_points_x,
poly_points_y,
)
) | 898e8d4da600da559605146a451093c0452ce6cc | 3,631,344 |
from typing import Dict
def obtain_treasury_maturities(treasuries: Dict) -> pd.DataFrame:
"""Obtain treasury maturity options [Source: EconDB]
Parameters
----------
treasuries: dict
A dictionary containing the options structured {instrument : {maturities: {abbreviation : name}}}
Returns
----------
df: pd.DataFrame
Contains the name of the instruments and a string containing all options.
"""
instrument_maturities = {
instrument: ", ".join(values["maturities"].keys())
for instrument, values in treasuries["instruments"].items()
}
df = pd.DataFrame.from_dict(instrument_maturities, orient="index")
df.loc["average"] = "Defined by function"
return df | 02dfc478be14cf63938b43420c61b359baf06ab3 | 3,631,345 |
def graph(gra):
""" write a molecular graph to a string
"""
gra_str = automol.graph.string(gra)
return gra_str | 635ee0fcad2aa1b5d2542c9d89a492692efbcf0a | 3,631,346 |
def diag(v, k=0):
"""
Extract a diagonal or construct a diagonal array.
See syntax here: https://numpy.org/doc/stable/reference/generated/numpy.diag.html
"""
if not is_casadi_type(v):
return _onp.diag(v, k=k)
else:
if k != 0:
raise NotImplementedError(
"Should be super possible, just haven't had the need yet."
)
if 1 in v.shape:
return _cas.diag(v)
elif v.shape[0] == v.shape[1]:
raise NotImplementedError(
"Should be super possible, just haven't had the need yet."
)
else:
raise ValueError("Cannot return the diagonal of a non-square matrix.") | 983ff1bd5c753d40b44f8fe38dadab9febfccffa | 3,631,347 |
def which_set(connections_list_of_dics):
"""
"""
set_of_derivations=get_set_of_derivations(connections_list_of_dics)
list_of_derivations=[]
list_of_derivations.append("all")
list_of_derivations.append("each")
for this_deriv in list(set_of_derivations):
list_of_derivations.append(this_deriv)
list_of_derivations.append("EXIT")
print(' ')
# http://stackoverflow.com/questions/6410982/enumerate-items-in-a-list-so-a-user-can-select-the-numeric-value
for item in enumerate(list_of_derivations):
print "[%d] %s" % item
try:
idx = int(raw_input("\nEnter the derivation's number: "))
except ValueError:
print "You fail at typing numbers; provide an integer."
try:
which_set_name = list_of_derivations[idx]
except IndexError:
print "Try a number in range next time."
print("selected: "+which_set_name)
return which_set_name | d4b93396a9966d532c4a46acb88467d8cbe00914 | 3,631,348 |
def ensure_derived_space(func):
"""
Decorator for Surface functions that require ImageSpace arguments.
Internally, Surface objecs store information indexed to a minimal
enclosing voxel grid (referred to as the self.index_grid) based on
some arbitrary ImageSpace. When interacting with other ImageSpaces,
this function ensures that two grids are compatible with each other.
"""
def ensured(self, *args):
if (not args) or (not isinstance(args[0], BaseSpace)):
raise RuntimeError("Function must be called with ImageSpace argument first")
if self.indexed is None:
raise RuntimeError("Surface must be indexed prior to using this function" +
"Call surface.index_on()")
if not self.indexed.space.derives_from(args[0]):
raise RuntimeError(
"Target space is not derived from surface's current index space."+
"Call surface.index_on with the target space first")
return func(self, *args)
return ensured | 3f293d0833fd7079a49eaf5ce2de53c76d238da8 | 3,631,349 |
def available_datasets():
"""
Returns the list of available datasets.
"""
return sorted(_datasets.keys()) | 77b1d451f89f76486d36a03008634fc1b5728150 | 3,631,350 |
def readable_date(input_date):
"""helper method to make a date object more readable
:param input_date: a date object
:return: more readable string representation of a date
"""
return "{} {}, {}".format(month_name[input_date.month], str(input_date.day), str(input_date.year)) | a22649298ef2fd488257091bfdf82a11f8de1846 | 3,631,351 |
def get_unicode_from_response(response):
"""Return the requested content back in unicode.
This will first attempt to retrieve the encoding from the response
headers. If that fails, it will use
:func:`requests_toolbelt.utils.deprecated.get_encodings_from_content`
to determine encodings from HTML elements.
.. code-block:: python
import requests
from requests_toolbelt.utils import deprecated
r = requests.get(url)
text = deprecated.get_unicode_from_response(r)
:param response: Response object to get unicode content from.
:type response: requests.models.Response
"""
tried_encodings = set()
# Try charset from content-type
encoding = utils.get_encoding_from_headers(response.headers)
if encoding:
try:
return str(response.content, encoding)
except UnicodeError:
tried_encodings.add(encoding.lower())
encodings = get_encodings_from_content(response.content)
for _encoding in encodings:
_encoding = _encoding.lower()
if _encoding in tried_encodings:
continue
try:
return str(response.content, _encoding)
except UnicodeError:
tried_encodings.add(_encoding)
# Fall back:
if encoding:
try:
return str(response.content, encoding, errors='replace')
except TypeError:
pass
return response.text | 3b7b9ced468e3a26cd7c322b6c0a6c0552215e7b | 3,631,352 |
def stack_layers(inputs, net_layers, kernel_initializer='glorot_uniform'):
"""Builds the architecture of the network by applying each layer specified in net_layers to inputs.
Args:
inputs: a dict containing input_types and input_placeholders for each key
and value pair, respecively.
net_layers: a list of dicts containing all layers to be used in the
network, where each dict describes one such layer. each dict requires the
key 'type'. all other keys are dependent on the layer type.
kernel_initializer: initialization configuration passed to keras (see keras
initializers).
Returns:
outputs: a dict formatted in much the same way as inputs. it
contains input_types and output_tensors for each key and value pair,
respectively, where output_tensors are the outputs of the
input_placeholders in inputs after each layer in net_layers is applied.
"""
outputs = dict()
for key in inputs:
outputs[key] = inputs[key]
for layer in net_layers:
# check for l2_reg argument
l2_reg = layer.get('l2_reg')
if l2_reg:
l2_reg = l2(layer['l2_reg'])
# create the layer
if layer['type'] in [
'softplus', 'softsign', 'softmax', 'tanh', 'sigmoid', 'relu', 'selu'
]:
l = layers.Dense(
layer['size'],
activation=layer['type'],
kernel_initializer=kernel_initializer,
kernel_regularizer=l2_reg,
name=layer.get('name'))
elif layer['type'] == 'None':
l = layers.Dense(
layer['size'],
kernel_initializer=kernel_initializer,
kernel_regularizer=l2_reg,
name=layer.get('name'))
elif layer['type'] == 'Conv2D':
l = layers.Conv2D(
layer['channels'],
kernel_size=layer['kernel'],
activation='relu',
data_format='channels_last',
kernel_regularizer=l2_reg,
name=layer.get('name'))
elif layer['type'] == 'BatchNormalization':
l = layers.BatchNormalization(name=layer.get('name'))
elif layer['type'] == 'MaxPooling2D':
l = layers.MaxPooling2D(
pool_size=layer['pool_size'],
data_format='channels_first',
name=layer.get('name'))
elif layer['type'] == 'Dropout':
l = layers.Dropout(layer['rate'], name=layer.get('name'))
elif layer['type'] == 'Flatten':
l = layers.Flatten(name=layer.get('name'))
else:
raise ValueError("Invalid layer type '{}'".format(layer['type']))
# apply the layer to each input in inputs
for k in outputs:
outputs[k] = l(outputs[k])
return outputs | a010ec3e1c02978c28c2df2f947f1360ccb35deb | 3,631,353 |
def instance_gpu() -> str:
"""
Returns the GPU for the Colab instance.
:return: The GPU model
"""
devices = device_lib.list_local_devices()
gpu = [x.physical_device_desc for x in devices if x.device_type == "GPU"][0]
return gpu.split(",")[1].split(":")[1].strip() | 37af910eb3bac5b089f28ee73fb9040b686b516e | 3,631,354 |
def _get_expected_samples(A_s, b_s, mu_0, sample_shape) -> np.ndarray:
"""
Given an initial `mu_0`, calculate the expected samples from an almost-deterministic
`StateSpaceModel`.
"""
*batch_shape, transitions, state_dim = b_s.shape
means_list = [mu_0]
for i in range(transitions):
means_list.append(
np.einsum("...jk,...k->...j", A_s[..., i, :, :], means_list[-1]) + b_s[..., i, :]
)
# [... 1, num_transitions, state_dim]
means = np.stack(means_list, axis=-2)
# sample_shape +[num_transitions, state_dim]
sample_shape = [sample_shape,] if isinstance(sample_shape, int) else list(sample_shape)
expected_samples = np.broadcast_to(
means, sample_shape + batch_shape + [transitions + 1, state_dim]
)
return expected_samples | 46a88611ae0a04851474fbbe5605a523e3e42a6a | 3,631,355 |
import ast
def local_vars(fn: ast.AST):
"""Returns a set of all function local variables."""
return set(_locals_impl(fn)) | c51290884099957063be9bc0814dca13ceb7566e | 3,631,356 |
def swap(size: int, target0: int, target1: int) -> Matrix:
"""
Construct swap gate which swaps two states
:param int size: total number of qubits in circuit
:param int target0: The first target bit to swap
:param int target1: The second target bit to swap
returns:
Matrix: Matrix representing the gate
"""
# Can be optimized further
assert size > 1, "need minimum of two qbits"
assert target0 != target1, "swap targets must be different"
bit_bounds = range(size)
assert target0 in bit_bounds, "first target bit out of range"
assert target1 in bit_bounds, "second target bit out of range"
target0, target1 = sorted((target0, target1))
n = 2 ** size
swapgate: Matrix = DefaultMatrix.zeros(n, n)
for i in range(2**size):
bit_str = (bin(i)[2:].zfill(size))
swapbit_str = (
bit_str[0:target0] +
bit_str[target1] +
bit_str[target0+1:target1] +
bit_str[target0] +
bit_str[target1+1:]
)
bit = int(bit_str, 2)
swapbit = int(swapbit_str, 2)
vec_entries: MATRIX = [[0] for _ in range(2**size)]
swapvec_entries: MATRIX = [[0] for _ in range(2**size)]
vec_entries[bit] = [1]
swapvec_entries[swapbit] = [1]
vector = DefaultMatrix(vec_entries)
swapvector = DefaultMatrix(swapvec_entries)
# Outer product to create matrix
swapgate += swapvector*vector.transpose()
return swapgate | a34d15c5b74ad49b01b6dfe894f640982c88d4fe | 3,631,357 |
def get_ELS_file_name(dt, remove_extension=False):
"""
>>> get_ELS_file_name('28-06-2004/22:00')
'ELS_200418018_V01.DAT'
>>> get_ELS_file_name('28-06-2004/09:00')
'ELS_200418006_V01.DAT'
>>> get_ELS_file_name('29-06-2004/09:00')
'ELS_200418106_V01.DAT'
>>> get_ELS_file_name('29-06-2005/09:00')
'ELS_200518006_V01.DAT'
>>> get_ELS_file_name('30-06-2005/09:00')
'ELS_200518106_V01.DAT'
>>> get_ELS_file_name(datetime(year=2004, month=1, day=1))
'ELS_200400100_V01.DAT'
>>> get_ELS_file_name(datetime(year=2004, month=1, day=2))
'ELS_200400200_V01.DAT'
"""
try:
dt = convert_to_dt(dt)
except TypeError:
pass
def doy_map(doy):
return '0' * (3 - len(str(doy))) + str(doy)
def hour_map(hour):
def expand(num):
if num == 0:
return '00'
elif num == 1:
return '06'
elif num == 2:
return '12'
elif num == 3:
return '18'
return expand(hour // 6)
if remove_extension:
basestring = 'ELS_%d%s%s_V01'
else:
basestring = 'ELS_%d%s%s_V01.DAT'
return basestring % (year(dt), doy_map(day_of_year(dt)), hour_map(hour(dt))) | f6a9f0dfff3501379f94e55e3fecdf2033400db2 | 3,631,358 |
def percentage_to_float(x):
"""Convert a string representation of a percentage to float.
>>> percentage_to_float('55%')
0.55
Args:
x: String representation of a percentage
Returns:
float: Percentage in decimal form
"""
return float(x.strip('%')) / 100 | 6c1aeac99278963d3dd207d515e72b6e1e79f09f | 3,631,359 |
def _naics_code_to_name(naics_val: str) -> str:
"""Converts NAICS codes to their industry using the _NAICS_MAP.
Args:
naics_val: A NAICS string literal to process.
Expected syntax of naics_val - NAICS/{codes}
'-' can be used to denote range of codes that may or may not belong
to the same industry. For eg, 44-45 will be mapped to 'RetailTrade'.
'_' can be used to represent multiple industries. For eg, 51_52 will
be mapped to 'InformationFinanceInsurance'. A combination of '-' and
'_' is acceptable.
Returns:
A string with all NAICS codes changed to their respective industry.
This string can be used in dcid generation. Returns None if the string
is empty or if the string does not follow the expected syntax.
"""
# Helper function to process NAICS ranges
def _process_naics_range(range_str: str) -> str:
industry_str = ''
match = _NAICS_RANGE_REGEX.search(range_str)
m_dict = match.groupdict()
lower_limit = int(m_dict['lower_limit'])
upper_limit = int(m_dict['upper_limit'])
prev_str = None # To ensure the same industry is not added twice
for code in range(lower_limit, upper_limit + 1):
code_str = str(code)
if code_str in _NAICS_MAP and prev_str != _NAICS_MAP[code_str]:
industry_str = industry_str + _NAICS_MAP[code_str]
prev_str = _NAICS_MAP[code_str]
else:
continue
return industry_str
if naics_val:
processed_str = 'NAICS'
# Remove namespaces
naics_val = naics_val[naics_val.find(':') + 1:]
# Strip NAICS/
naics_val = naics_val.replace('NAICS/', '')
matches = _NAICS_CODE_REGEX.findall(naics_val)
if not matches:
return None
for match_str in matches:
if match_str.find('-') != -1: # Range
industry_str = _process_naics_range(match_str)
else:
industry_str = _NAICS_MAP[match_str]
processed_str = processed_str + industry_str
return processed_str
return None | 96e5f7d951c81337ee3d431f765a98c6d12f737f | 3,631,360 |
import copy
def threshold_distribution(distribution, target_bin=128):
"""
Return the best threshold value.
Ref: https://github.com//apache/incubator-mxnet/blob/master/python/mxnet/contrib/quantization.py
Args:
distribution: list, activations has been processed by histogram and normalize,size is 2048
target_bin: int, the num of bin that is used by quantize, Int8 default value is 128
Returns:
target_threshold: int, num of bin with the minimum KL
"""
distribution = distribution[1:]
length = distribution.size
threshold_sum = sum(distribution[target_bin:])
kl_divergence = np.zeros(length - target_bin)
for threshold in range(target_bin, length):
sliced_nd_hist = copy.deepcopy(distribution[:threshold])
# generate reference distribution p
p = sliced_nd_hist.copy()
p[threshold-1] += threshold_sum
threshold_sum = threshold_sum - distribution[threshold]
# is_nonzeros[k] indicates whether hist[k] is nonzero
is_nonzeros = (p != 0).astype(np.int64)
#
quantized_bins = np.zeros(target_bin, dtype=np.int64)
# calculate how many bins should be merged to generate quantized distribution q
num_merged_bins = sliced_nd_hist.size // target_bin
# merge hist into num_quantized_bins bins
for j in range(target_bin):
start = j * num_merged_bins
stop = start + num_merged_bins
quantized_bins[j] = sliced_nd_hist[start:stop].sum()
quantized_bins[-1] += sliced_nd_hist[target_bin * num_merged_bins:].sum()
# expand quantized_bins into p.size bins
q = np.zeros(sliced_nd_hist.size, dtype=np.float64)
for j in range(target_bin):
start = j * num_merged_bins
if j == target_bin - 1:
stop = -1
else:
stop = start + num_merged_bins
norm = is_nonzeros[start:stop].sum()
if norm != 0:
q[start:stop] = float(quantized_bins[j]) / float(norm)
q[p == 0] = 0
# p = _smooth_distribution(p) # with some bugs, need to fix
# q = _smooth_distribution(q)
p[p == 0] = 0.0001
q[q == 0] = 0.0001
# calculate kl_divergence between q and p
kl_divergence[threshold - target_bin] = stats.entropy(p, q)
min_kl_divergence = np.argmin(kl_divergence)
threshold_value = min_kl_divergence + target_bin
return threshold_value | 8831842d2d09d73cefedeb3c954911e770a55f49 | 3,631,361 |
from typing import Optional
import re
def get_pragma_spec(source: str) -> Optional[NpmSpec]:
"""
Extracts pragma information from Solidity source code.
Args:
source: Solidity source code
Returns: NpmSpec object or None, if no valid pragma is found
"""
pragma_match = next(re.finditer(r"(?:\n|^)\s*pragma\s*solidity\s*([^;\n]*)", source), None)
if pragma_match is None:
return None # Try compiling with latest
pragma_string = pragma_match.groups()[0]
pragma_string = " ".join(pragma_string.split())
try:
return NpmSpec(pragma_string)
except ValueError:
return None | 8a5af024c1105a52140b2bfefb583b67568964d5 | 3,631,362 |
def delete_form(context, *args, **kwargs):
"""Тег формы удаления объекта.
"""
action = (args[0] if len(args) > 0
else kwargs.get('action'))
if action is None:
raise TemplateSyntaxError(
"delete_form template tag "
"requires at least one argument: "
"action, which is a URL.")
method = (args[1] if len(args) > 1
else kwargs.get('method'))
display_object = kwargs.get(
'object', context.get('object'))
if display_object is None:
raise TemplateSyntaxError(
"delete_form needs object "
"manually specified in this case.")
if hasattr(display_object, 'name'):
object_name = display_object.name
else:
object_name = str(display_object)
object_type = kwargs.get(
'obj_type',
verbose_name(display_object),
)
form = context.get('form')
return {
'action': action,
'method': method,
'object': display_object,
'object_name': object_name,
'object_type': object_type,
'form': form,
} | c99736384eb149869bc4110e90427fe21beaecc7 | 3,631,363 |
def _ligandscout_xml_tree(pharmacophore):
""" Get an xml element tree necesary to create a ligandscout pharmacophore.
Parameters
----------
pharmacophore : openpharmacophore.Pharmacophore
Pharmacophore object that will be saved to a file.
Returns
-------
tree : xml.etree.ElementTree
The element tree.
"""
Feature = namedtuple("Feature", ["name", "id"])
feature_mapper = { # dictionary to map openpharmacophore features to ligandscout
"aromatic ring": Feature("AR", "ai_"),
"hydrophobicity": Feature("H", "hi_"),
"hb acceptor": Feature("HBA", "ha_"),
"hb donor": Feature("HBD", "hd_"),
"excluded volume": Feature("exclusion", "ev_"),
"positive charge": Feature("PI", "pi_"),
"negative charge": Feature("NI", "ni_"),
}
tree = ET.ElementTree("tree")
document = ET.Element("pharmacophore")
document.set("name", "pharmacophore.pml")
document.set("pharmacophoreType", "LIGAND_SCOUT")
for i, element in enumerate(pharmacophore.elements):
try:
feat_name = feature_mapper[element.feature_name].name
except: # skip features not supported by ligandscout
continue
coords = puw.get_value(element.center, to_unit="angstroms")
x = str(coords[0])
y = str(coords[1])
z = str(coords[2])
radius = str(puw.get_value(element.radius, to_unit="angstroms"))
feat_id = feature_mapper[element.feature_name].id + str(i + 1)
is_point = (feat_name == "PI" or feat_name == "NI" or feat_name == "H"
or feat_name == "HBD" or feat_name == "HBA")
is_vector = element.has_direction and (feat_name == "HBD" or feat_name == "HBA")
if is_vector:
direction = coords - element.direction
dir_x = str(direction[0])
dir_y = str(direction[1])
dir_z = str(direction[2])
vector = ET.SubElement(document, "vector")
# Set vector attributes
vector.set("name", feat_name)
vector.set("featureId", feat_id)
vector.set("pointsToLigand", "false")
vector.set("hasSyntheticProjectedPoint", "false")
vector.set("optional", "false")
vector.set("disabled", "false")
vector.set("weight", "1.0")
# Add origin tag
origin = ET.SubElement(vector, "origin")
origin.set("x3", x)
origin.set("y3", y)
origin.set("z3", z)
origin.set("tolerance", radius)
# Add target tag
target = ET.SubElement(vector, "target")
target.set("x3", dir_x)
target.set("y3", dir_y)
target.set("z3", dir_z)
target.set("tolerance", radius)
elif is_point:
point = ET.SubElement(document, "point")
# Set point attributes
point.set("name", feat_name)
point.set("featureId", feat_id)
point.set("optional", "false")
point.set("disabled", "false")
point.set("weight", "1.0")
# Add position tag
position = ET.SubElement(point, "position")
position.set("x3", x)
position.set("y3", y)
position.set("z3", z)
position.set("tolerance", radius)
elif feat_name == "AR":
direction = element.direction
dir_x = str(direction[0])
dir_y = str(direction[1])
dir_z = str(direction[2])
plane = ET.SubElement(document, "plane")
# Set plane attributes
plane.set("name", feat_name)
plane.set("featureId", feat_id)
plane.set("optional", "false")
plane.set("disabled", "false")
plane.set("weight", "1.0")
# Add position tag
position = ET.SubElement(plane, "position")
position.set("x3", x)
position.set("y3", y)
position.set("z3", z)
position.set("tolerance", radius)
# Add normal tag
normal = ET.SubElement(plane, "normal")
normal.set("x3", dir_x)
normal.set("y3", dir_y)
normal.set("z3", dir_z)
normal.set("tolerance", radius)
elif feat_name == "exclusion":
volume = ET.SubElement(document, "volume")
# Set volume attributes
volume.set("type", "exclusion")
volume.set("featureId", feat_id)
volume.set("optional", "false")
volume.set("disabled", "false")
volume.set("weight", "1.0")
# Add position tag
position = ET.SubElement(volume, "position")
position.set("x3", x)
position.set("y3", y)
position.set("z3", z)
position.set("tolerance", radius)
tree._setroot(document)
return tree, document | c9e8b09a103917ceb6242dd74de4c93198cf841d | 3,631,364 |
def get_form(line):
"""
gets the form of the word,
can use instead of getLemma
TODO: pick a function naming convention and stick with it
"""
if line == "":
return ""
s = line.split("\t")
return s[0] | b16e1d38d45833dd75863232deab362a4d4fb58a | 3,631,365 |
import time
import requests
def request_get_content(url: str, n_retry: int = 3) -> bytes:
"""Retrieve the binary content at url.
Retry on connection errors.
"""
t0 = time.time()
for i in range(1, n_retry + 1):
try:
r = _session().get(url)
r.raise_for_status()
break
except requests.exceptions.RequestException as e:
# Sleep and try again on error, unless it's a 404.
message = e.args[0] if isinstance(e.args[0], str) else ""
if i == n_retry or "Client Error" in message:
raise e
time.sleep(10 * 2 ** i)
dl_time = time.time() - t0
print('Downloaded {} in {} seconds'.format(url, dl_time))
return r.content | 6fc3882243b4d23f7311ab9d7b5a1bf946a801d7 | 3,631,366 |
def _escape_special_chars(content):
"""No longer used."""
content = content.replace("\N{RIGHT-TO-LEFT OVERRIDE}", "")
if len(content) > 300: # https://github.com/discordapp/discord-api-docs/issues/1241
content = content[:300] + content[300:].replace('@', '@ ')
return content | 816fc3ba15150c3e254a17d1a021d1ddee11e49f | 3,631,367 |
def gather(results_dir):
"""Move all of the files and directories from the present working directory
into results_dir.
If results_dir doesn't exist, create it.
Delete any symbolic links so that the present working directory is empty.
:param results_dir: Path of the directory into which to store the run
results.
:type results_dir: :py:class:`pathlib.Path`
"""
return gather_plugin.gather(results_dir) | 375154347ea57147d236c4fbf1da01791e654684 | 3,631,368 |
def system_dynamics(t, x, params,):
"""
Parameters
----------
x0 : State vector
t : Current time step
params : Simulation parameters
Returns
-------
dx : State vector dynamics for time step integration
"""
# Extract state variables and parameters
# Python starts counting with 0 like any sane programming language!
x1, x2, = x
# Params is a dictionary with key and value pairs
m = params['mass']
g = params['gravity']
l = params['length']
k = params['friction']
# Solve system dynamics for the current time step
dot_x1 = x2
dot_x2 = - (g/l) * np.sin(x1) - (k/m) * x2
# Store each state variable into array
dx = np.array([dot_x1, dot_x2])
return dx | 1d9ef4f2ff304f14f961620af3ec646a3a6ad1b3 | 3,631,369 |
def key():
"""Connection key"""
return ConnectionKey('localhost', 80, False, None, None, None, None) | 4af1cc0619db168f9e9110095accab1836031bd4 | 3,631,370 |
import ipdb
def _check_deviation(indexesv,
xdatav,
ydatav,
yarray_,
ii,
start_,
end_,
mbf,
dev_thresh,
no_data,
baseline,
low_values,
valid_samples,
predict_linear,
max_days,
outlier_info):
"""
Checks each sample's deviation from the window linear fit
"""
OutlierData = namedtuple('OutlierData', 'col_pos xdata ydata alpha beta1 beta2')
max_dev = -1e9
outlier_count = 0
dhalf = int(start_ + ((end_ - start_) / 2.0))
dmax = abs(float(dhalf - start_))
if predict_linear:
beta1 = mbf[ii, 0]
beta2 = 0.0
alpha = mbf[ii, 1]
xvalue = xdatav[1]
yvalue = ydatav[1]
if (low_values < yvalue <= baseline) and (low_values < ydatav[0] <= baseline) and (low_values < ydatav[2] <= baseline):
outlier_count = 0
else:
yhat = linear_adjustment(int(xdatav[0]), int(xdatav[1]), int(xdatav[2]), ydatav[0], ydatav[2])
if (low_values < yvalue <= 0.4) and (max(ydatav[0], ydatav[2]) >= 0.45) and (ydatav[0] > yvalue < ydatav[2]):
# Protect crop cycles
w1 = 1.0 - (max(ydatav[0], ydatav[2]))**0.5
elif (yvalue >= 0.45) and (min(ydatav[0], ydatav[2]) <= baseline):
# Protect high outliers relative to baseline
w1 = 0.1
elif ((low_values < yvalue <= baseline) and (low_values <= ydatav[0] <= baseline)) or ((low_values <= yvalue <= baseline) and (low_values <= ydatav[2] <= baseline)):
# Protect baseline values
w1 = 0.1
else:
# Protect high values
w1 = 1.0 if yhat >= yvalue else 0.33
# Weight the distance
max_dist = max(abs(xdatav[1] - xdatav[0]), abs(xdatav[1] - xdatav[2]))
scaled_days = 1.0 - scale_min_max(max_dist, 0.1, 0.9, 0.0, max_days)
w2 = scale_min_max(logistic_func(scaled_days, 0.5, 10.0), 0.1, 1.0, 0.0, 1.0)
w = w1 * w2
# Get the weighted deviation
dev = abs(prop_diff(yvalue, yhat)) * w
if not np.isinf(dev) and not np.isnan(dev):
# Remove the outlier
if dev > dev_thresh:
# This check avoids large dropoffs from senescence to valleys
if (prop_diff(ydatav[0], ydatav[1]) < -0.9) and (prop_diff(ydatav[0], ydatav[2]) < -0.9):
outlier_count = 0
# This check avoids large increases from valleys to green-up
elif (prop_diff(ydatav[2], ydatav[1]) < -0.9) and (prop_diff(ydatav[2], ydatav[0]) < -0.9):
outlier_count = 0
else:
if ydatav[0] < yvalue > ydatav[2]:
# Local spike
max_outlier = indexesv[1]
outlier_count = 1
elif ydatav[0] > yvalue < ydatav[2]:
max_outlier = indexesv[1]
outlier_count = 1
else:
for jj in range(1, valid_samples-1):
xvalue = xdatav[jj]
yvalue = ydatav[jj]
# Sample prediction, clipped to 0-1
yhat = clip(mbf[ii, 0] * xvalue + mbf[ii, 1] * pow(xvalue, 2) + mbf[ii, 2], 0.0, 1.0)
# Give a lower weight for low values
# if yarray_[ii, jj] <= low_values:
# w2 = 1.0
if low_values < yvalue <= baseline:
continue
# Give a lower weight for values above prediction
w1 = 1.0 if yhat >= yvalue else 0.25
# Give a lower weight for values at prediction ends
w2 = 1.0 - scale_min_max(abs(float(dhalf) - float(indexesv[jj])), 0.1, 1.0, 0.0, dmax)
# Combine the weights
w = w1 * w2
# Get the weighted deviation
dev = abs(prop_diff(yvalue, yhat)) * w
if not np.isinf(dev) and not np.isnan(dev):
# Remove the outlier
if dev > dev_thresh:
# This check avoids large dropoffs from senescence to valleys
if (prop_diff(ydatav[0], ydatav[2]) < -0.9) and (prop_diff(ydatav[0], ydatav[3]) < -0.9):
outlier_count += 0
# This check avoids large increases from valleys to green-up
elif (prop_diff(ydatav[4], ydatav[2]) < -0.9) and (prop_diff(ydatav[4], ydatav[1]) < -0.9):
outlier_count += 0
else:
ipdb.set_trace()
if dev > max_dev:
max_dev = dev
max_outlier = indexesv[jj]
outlier_count += 1
beta1 = mbf[ii, 0]
beta2 = mbf[ii, 1]
alpha = mbf[ii, 2]
xdatav = xdatav[1:4]
ydatav = ydatav[1:4]
# Restrict outlier removal to one value
if outlier_count > 0:
outlier_data = OutlierData(col_pos=start_, xdata=xdatav, ydata=ydatav, alpha=alpha, beta1=beta1, beta2=beta2)
outlier_info.append(outlier_data)
yarray_[ii, max_outlier] = yhat
return outlier_info, yarray_ | 541acff0c88b3912e185989348d4c81fa4275507 | 3,631,371 |
def crop_boxes_inv(cropped_boxes, crop_shape):
"""
Inverse operation of crop_boxes
"""
crop_x1 = crop_shape[0]
crop_y1 = crop_shape[1]
raw_boxes = np.zeros_like(cropped_boxes)
raw_boxes[:, 0::4] = cropped_boxes[:, 0::4] + crop_x1
raw_boxes[:, 1::4] = cropped_boxes[:, 1::4] + crop_y1
raw_boxes[:, 2::4] = cropped_boxes[:, 2::4] + crop_x1
raw_boxes[:, 3::4] = cropped_boxes[:, 3::4] + crop_y1
return raw_boxes | 309eba1ddde6a9474bab132b5f2deaae75b772e4 | 3,631,372 |
from typing import OrderedDict
import inspect
def build_paramDict(cur_func):
"""
This function iterates through all inputs of a function,
and saves the default argument names and values into a dictionary.
If any of the default arguments are functions themselves, then recursively (depth-first) adds an extra field to
the dictionary, named <funcName + "_params">, that contains its inputs and arguments.
The output of this function can then be passed as a "kwargs" object to the highest level function,
which will then pass the parameter values to the lower dictionary levels appropriately
"""
paramDict = OrderedDict()
allArgs = inspect.getfullargspec(cur_func)
# Check if there are any default parameters, if no, just return empty dict
if allArgs.defaults is None:
return paramDict
for argname, argval in zip(allArgs.args[-len(allArgs.defaults):], allArgs.defaults):
# Save the default argument
paramDict[argname] = argval
# If the default argument is a function, inspect it for further
if callable(argval):
# print(argname)
paramDict[argname+"_params"] = build_paramDict(argval)
return paramDict | b62daf5ffe7b9211d898d26dc754875459dbe1ba | 3,631,373 |
def auto_gen_message(open, fill, close):
"""
Produces the auto-generated warning header with language-spcific syntax
open - str - The language-specific opening of the comment
fill - str - The values to fill the background with
close - str - The language-specific closing of the comment
"""
assert open or fill or close
message = AUTO_GEN_MESSAGE.strip()
if open:
message = message.replace(MESSAGE_FILL * len(open), open, 1)
if close:
message = reverse(reverse(message).replace(MESSAGE_FILL * len(close), close[::-1], 1))
if fill:
message = message.replace(MESSAGE_FILL * len(fill), fill)
return message | e72ff3760ea78efb969f5c457caca726e070a387 | 3,631,374 |
def neighbor(matrix, taxa=None, distances=True):
"""
Function clusters data according to the Neighbor-Joining algorithm \
(:evobib:`Saitou1987`).
"""
clusters = dict([(i, [i]) for i in range(len(taxa))])
formatter = "({0}:{2:.4f},{1}:{3:.4f})" if distances else "({0},{1})"
taxa = check_language_names(taxa) or [
"t_" + str(i + 1) for i in range(len(matrix[0]))
]
tracer = dict([(tuple([a]), b[0]) for (a, b) in clusters.items()])
newick = dict([(i, taxa[i]) for i in range(len(matrix[0]))])
tree = []
neighbor_recursive(
clusters,
matrix,
tree,
[[c for c in l] for l in matrix],
dict([(tuple([a]), b[0]) for (a, b) in clusters.items()]),
)
# create different output, depending on the options for the inclusion of
# distances or topology only
for i, (a, b, c, d) in enumerate(tree):
newick[len(taxa) + i] = formatter.format(newick[a], newick[b], c, d)
newick_string = newick[max(newick.keys())] + ";"
return newick_string | 8bd655082cb6c5e1b9ba7efdda5241ea0943782c | 3,631,375 |
def mixed_type_frame():
"""
Fixture for DataFrame of float/int/string columns with RangeIndex
Columns are ['a', 'b', 'c', 'float32', 'int32'].
"""
return DataFrame({'a': 1., 'b': 2, 'c': 'foo',
'float32': np.array([1.] * 10, dtype='float32'),
'int32': np.array([1] * 10, dtype='int32')},
index=np.arange(10)) | 7a07b77413839104b687e095b8805a205f3b14fc | 3,631,376 |
def dar_state():
"""Get DAR state
"""
return jsonify(state=dar.state) | df188f3f9c37e011f820453740f9758adf2dabb9 | 3,631,377 |
def has_prefix(sub_s):
"""
:param sub_s: the list which includes the permutations of string's alphabet
:return: if the permutations of string's alphabet not exists in dictionary
"""
global d
for word in d:
if d[word].startswith(sub_s):
return True
return False | 07c4636e1e85029c8cc5e5d8450ceae1a6511846 | 3,631,378 |
def MaskStringWithIPs(string):
"""Mask all private IP addresses listed in a string."""
ips = ExtractIPsFromString(string)
for ip in ips:
use_bits = IsPrivateIP(ip)
if use_bits:
masked_ip = MaskIPBits(ip, use_bits)
string = string.replace(ip, masked_ip)
return string | b90f194cd038c1979b38ac57c8e30326a19ca4b8 | 3,631,379 |
def DiagGaussian_UnifBins(mean, stdd, bin_min, bin_max, coding_prec, n_bins, rebalanced=True):
"""
Codec for data from a diagonal Gaussian with uniform bins.
rebalanced=True will ensure no zero frequencies, but is slower.
"""
if rebalanced:
bins = np.linspace(bin_min, bin_max, n_bins)
bins = np.broadcast_to(np.moveaxis(bins, 0, -1), mean.shape + (n_bins,))
cdfs = norm.cdf(bins, mean[..., np.newaxis], stdd[..., np.newaxis])
cdfs[..., 0] = 0
cdfs[..., -1] = 1
pmfs = cdfs[..., 1:] - cdfs[..., :-1]
buckets = _cumulative_buckets_from_probs(pmfs, coding_prec)
enc_statfun = _cdf_to_enc_statfun(_cdf_from_cumulative_buckets(buckets))
dec_statfun = _ppf_from_cumulative_buckets(buckets)
else:
bin_width = (bin_max - bin_min)/float(n_bins)
def cdf(idx):
bin_ub = bin_min + idx * bin_width
return _nearest_int(norm.cdf(bin_ub, mean, stdd) * (1 << coding_prec))
def ppf(cf):
x_max = norm.ppf((cf + 0.5) / (1 << coding_prec), mean, stdd)
bin_idx = np.floor((x_max - bin_min) / bin_width)
return np.uint64(np.minimum(n_bins-1, bin_idx))
enc_statfun = _cdf_to_enc_statfun(cdf)
dec_statfun = ppf
return NonUniform(enc_statfun, dec_statfun, coding_prec) | d54464e8a4bf2e5f93ee228b19b9de92e00dfafd | 3,631,380 |
def GetUserFansCount(user_url: str) -> int:
"""获取用户粉丝数
Args:
user_url (str): 用户个人主页 Url
Returns:
int: 用户粉丝数
"""
AssertUserUrl(user_url)
AssertUserStatusNormal(user_url)
json_obj = GetUserJsonDataApi(user_url)
result = json_obj["followers_count"]
return result | 057a732bff7ae74896b598022d57754e034a02af | 3,631,381 |
from typing import Counter
def majority_vote(labels):
"""assumes labels sorted by distance ASC"""
vote_counts = Counter(labels)
winner, winner_count = vote_counts.most_common(1)[0]
num_winners = len([count
for count in vote_counts.values()
if count == winner_count])
if num_winners == 1:
return winner
else:
return majority_vote(labels[:-1]) | f56aede57a08ee4d9190e3b69daa48a7946fcb99 | 3,631,382 |
from typing import Union
from typing import Literal
def primitive_vertices_sphere(
radius: Floating = 0.5,
segments: Integer = 8,
intermediate: Boolean = False,
origin: ArrayLike = np.array([0, 0, 0]),
axis: Union[Literal["+z", "+x", "+y", "yz", "xz", "xy"], str] = "+z",
) -> NDArray:
"""
Returns the vertices of a latitude-longitude sphere primitive.
Parameters
----------
radius
Sphere radius.
segments
Latitude-longitude segments, if the ``intermediate`` argument is
*True*, then the sphere will have one less segment along its longitude.
intermediate
Whether to generate the sphere vertices at the center of the faces
outlined by the segments of a regular sphere generated without
the ``intermediate`` argument set to *True*. The resulting sphere is
inscribed on the regular sphere faces but possesses the same poles.
origin
Sphere origin on the construction plane.
axis
Axis (or normal of the plane) the poles of the sphere will be aligned
with.
Returns
-------
:class:`numpy.ndarray`
Sphere primitive vertices.
Notes
-----
- The sphere poles have latitude segments count - 1 co-located vertices.
Examples
--------
>>> primitive_vertices_sphere(segments=4) # doctest: +ELLIPSIS
array([[[ 0.0000000...e+00, 0.0000000...e+00, 5.0000000...e-01],
[ -3.5355339...e-01, -4.3297802...e-17, 3.5355339...e-01],
[ -5.0000000...e-01, -6.1232340...e-17, 3.0616170...e-17],
[ -3.5355339...e-01, -4.3297802...e-17, -3.5355339...e-01],
[ -6.1232340...e-17, -7.4987989...e-33, -5.0000000...e-01]],
<BLANKLINE>
[[ 0.0000000...e+00, 0.0000000...e+00, 5.0000000...e-01],
[ 2.1648901...e-17, -3.5355339...e-01, 3.5355339...e-01],
[ 3.0616170...e-17, -5.0000000...e-01, 3.0616170...e-17],
[ 2.1648901...e-17, -3.5355339...e-01, -3.5355339...e-01],
[ 3.7493994...e-33, -6.1232340...e-17, -5.0000000...e-01]],
<BLANKLINE>
[[ 0.0000000...e+00, 0.0000000...e+00, 5.0000000...e-01],
[ 3.5355339...e-01, 0.0000000...e+00, 3.5355339...e-01],
[ 5.0000000...e-01, 0.0000000...e+00, 3.0616170...e-17],
[ 3.5355339...e-01, 0.0000000...e+00, -3.5355339...e-01],
[ 6.1232340...e-17, 0.0000000...e+00, -5.0000000...e-01]],
<BLANKLINE>
[[ 0.0000000...e+00, 0.0000000...e+00, 5.0000000...e-01],
[ 2.1648901...e-17, 3.5355339...e-01, 3.5355339...e-01],
[ 3.0616170...e-17, 5.0000000...e-01, 3.0616170...e-17],
[ 2.1648901...e-17, 3.5355339...e-01, -3.5355339...e-01],
[ 3.7493994...e-33, 6.1232340...e-17, -5.0000000...e-01]]])
"""
axis = PLANE_TO_AXIS_MAPPING.get(axis, axis).lower()
axis = validate_method(
axis, ["+x", "+y", "+z"], '"{0}" axis invalid, it must be one of {1}!'
)
if not intermediate:
theta = np.tile(
np.radians(np.linspace(0, 180, segments + 1)),
(int(segments) + 1, 1),
)
phi = np.transpose(
np.tile(
np.radians(np.linspace(-180, 180, segments + 1)),
(int(segments) + 1, 1),
)
)
else:
theta = np.tile(
np.radians(np.linspace(0, 180, segments * 2 + 1)[1::2][1:-1]),
(int(segments) + 1, 1),
)
theta = np.hstack(
[
zeros((segments + 1, 1)),
theta,
full((segments + 1, 1), np.pi),
]
)
phi = np.transpose(
np.tile(
np.radians(np.linspace(-180, 180, segments + 1))
+ np.radians(360 / segments / 2),
(int(segments), 1),
)
)
rho = ones(phi.shape) * radius
rho_theta_phi = tstack([rho, theta, phi])
vertices = spherical_to_cartesian(rho_theta_phi)
# Removing extra longitude vertices.
vertices = vertices[:-1, :, :]
if axis == "+z":
pass
elif axis == "+y":
vertices = np.roll(vertices, 2, -1)
elif axis == "+x":
vertices = np.roll(vertices, 1, -1)
vertices += origin
return vertices | 4519d0629273eeb4391fb0fa49ba552c139acb98 | 3,631,383 |
def VtuDiff(vtu1, vtu2, filename = None):
"""
Generate a vtu with fields generated by taking the difference between the field
values in the two supplied vtus. Fields that are not common between the two vtus
are neglected. If probe is True, the fields of vtu2 are projected onto the cell
points of vtu1. Otherwise, the cell points of vtu1 and vtu2 must match.
"""
# Generate empty output vtu
resultVtu = vtu()
resultVtu.filename = filename
# If the input vtu point locations match, do not use probe
useProbe = not VtuMatchLocations(vtu1, vtu2)
if useProbe:
probe = VTU_Probe(vtu2.ugrid, vtu1.GetLocations())
# Copy the grid from the first input vtu into the output vtu
resultVtu.ugrid.DeepCopy(vtu1.ugrid)
# Find common field names between the input vtus and generate corresponding
# difference fields
fieldNames1 = vtu1.GetFieldNames()
fieldNames2 = vtu2.GetFieldNames()
for fieldName in fieldNames1:
field1 = vtu1.GetField(fieldName)
if fieldName in fieldNames2:
if useProbe:
field2 = probe.GetField(fieldName)
else:
field2 = vtu2.GetField(fieldName)
resultVtu.AddField(fieldName, field1-field2)
else:
resultVtu.RemoveField(fieldName)
# Also look for cell-based fields. This only works if we don't have
# to interpolate (both meshes are the same)
vtkdata=vtu1.ugrid.GetCellData()
fieldNames1 = [vtkdata.GetArrayName(i) for i in range(vtkdata.GetNumberOfArrays())]
vtkdata=vtu2.ugrid.GetCellData()
fieldNames2 = [vtkdata.GetArrayName(i) for i in range(vtkdata.GetNumberOfArrays())]
if useProbe:
# meshes are different - we can't interpolate cell-based fields so let's just remove them from the output
for fieldName in fieldNames1:
if fieldName=='vtkGhostLevels':
# this field should just be passed on unchanged
continue
resultVtu.RemoveField(fieldName)
else:
# meshes are the same - we can simply subtract
for fieldName in fieldNames1:
if fieldName=='vtkGhostLevels':
# this field should just be passed on unchanged
continue
elif fieldName in fieldNames2:
field1 = vtu1.GetField(fieldName)
field2 = vtu2.GetField(fieldName)
resultVtu.AddField(fieldName, field1-field2)
else:
resultVtu.RemoveField(fieldName)
return resultVtu | 5b3ce93ae70b32e112f66332bfeb101d3804b772 | 3,631,384 |
def senv(key, default=NoDefault, required=False, settings=None, _altered_defaults=None, _defaults=None):
"""
return the value for key by checking the following sources:
- the environment
- the settings dictionary
if the key is in _defaults but not in _altered_defaults, don't consider the value in settings
"""
_altered_defaults = _altered_defaults or []
_defaults = _defaults or []
settings = settings or {}
value = env(
key,
settings.get(key, default)
if (key not in _defaults or (key in _defaults and key in _altered_defaults))
else default,
)
if value == NoDefault:
if required:
raise ImproperlyConfigured((
"Missing required setting '{}' "
"(checked environment and settings)").format(key))
else:
value = None
return value | 7d73117e6b9a47bcf0266e82d86b98a3662c37cf | 3,631,385 |
def average_balance_observer(validator_type):
""" A function factory that returns an observer function"""
def obs_func(state):
validators = state["network"].validators
validator = validators[0]
head = br.specs.get_head(validator.store)
current_state = validator.store.block_states[head]
current_epoch = br.specs.get_current_epoch(current_state)
indices = [i for i, v in enumerate(validators) if validator_type in v.validator_behavior]
balances = [b for i, b in enumerate(current_state.balances) if i in indices]
return br.utils.eth2.gwei_to_eth((sum(balances))/ float(len(indices)))
return obs_func | f771f306d7cc3653e73fbb915ed7938ec35bcbeb | 3,631,386 |
def get_channel_members_names(channel):
"""Returns a list of all members of a channel. If the member has a nickname, the nickname is used instead of their name, otherwise their name is used"""
names = []
for member in channel.members:
if member.nick is None:
names.append(member.name)
else:
names.append(member.nick)
return names | 955ea4013841fe8aac52f0474a65e221795db571 | 3,631,387 |
import os
import time
import math
def _build( ):
"""
Build the project.
This step handles:
Checking library dependencies.
Checking which files need to be built.
And spawning a build thread for each one that does.
"""
if _guiModule:
_guiModule.run()
built = False
global _building
_building = True
for project in _shared_globals.sortedProjects:
for chunk in project.chunks:
if project.activeToolchain.Compiler().SupportsDummyObjects():
objs = []
for source in chunk:
obj = _utils.GetSourceObjPath(project, source)
if not os.access(obj, os.F_OK):
objs.append(obj)
project.activeToolchain.Compiler().MakeDummyObjects(objs)
linker_threads_blocked = _shared_globals.max_linker_threads - 1
for i in range( linker_threads_blocked ):
_shared_globals.link_semaphore.acquire(True)
for project in _shared_globals.sortedProjects:
_shared_globals.total_compiles += len( project._finalChunkSet )
_shared_globals.total_compiles += _shared_globals.total_precompiles
_shared_globals.current_compile = 1
projects_in_flight = set()
projects_done = set()
pending_links = set()
pending_builds = _shared_globals.sortedProjects
#projects_needing_links = set()
for project in pending_builds:
for plugin in project.plugins:
_utils.CheckRunBuildStep(project, plugin.preMakeStep, "plugin pre-make")
_shared_globals.globalPreMakeSteps.add(plugin.globalPreMakeStep)
_utils.CheckRunBuildStep(project, project.activeToolchain.preMakeStep, "toolchain pre-make")
_shared_globals.globalPreMakeSteps |= project.activeToolchain.GetGlobalPreMakeSteps()
for buildStep in project.preMakeSteps:
_utils.CheckRunBuildStep(project, buildStep, "project pre-make")
for buildStep in _shared_globals.globalPreMakeSteps:
if _utils.FuncIsEmpty(buildStep):
continue
log.LOG_BUILD( "Running global pre-make step {}".format(_utils.GetFuncName(buildStep)))
buildStep()
_shared_globals.starttime = time.time( )
_linkThread.start()
def ReconcilePostBuild():
LinkedSomething = True
while LinkedSomething:
LinkedSomething = False
for otherProj in list( projects_in_flight ):
with otherProj.mutex:
complete = otherProj.compilationCompleted
if complete >= len( otherProj._finalChunkSet ) + int(
otherProj.needsPrecompileC ) + int(
otherProj.needsPrecompileCpp ):
totaltime = (time.time( ) - otherProj.starttime)
minutes = math.floor( totaltime / 60 )
seconds = math.floor( totaltime % 60 )
log.LOG_BUILD(
"Compile of {0} ({3} {4}) took {1}:{2:02}".format( otherProj.outputName, int( minutes ),
int( seconds ), otherProj.targetName, otherProj.outputArchitecture ) )
otherProj.buildEnd = time.time()
projects_in_flight.remove( otherProj )
if otherProj.compilationFailed:
log.LOG_ERROR( "Build of {} ({} {}/{}) failed! Finishing up non-dependent build tasks...".format(
otherProj.outputName, otherProj.targetName, otherProj.outputArchitecture, otherProj.activeToolchainName ) )
otherProj.state = _shared_globals.ProjectState.FAILED
otherProj.linkQueueStart = time.time()
otherProj.linkStart = otherProj.linkQueueStart
otherProj.endTime = otherProj.linkQueueStart
continue
okToLink = True
if otherProj.reconciledLinkDepends:
for depend in otherProj.reconciledLinkDepends:
if depend not in projects_done:
dependProj = _shared_globals.projects[depend]
if not dependProj.shell and not dependProj.prebuilt:
okToLink = False
break
if okToLink:
_link( otherProj )
LinkedSomething = True
projects_done.add( otherProj.key )
else:
log.LOG_LINKER(
"Linking for {} ({} {}/{}) deferred until all dependencies have finished building...".format(
otherProj.outputName, otherProj.targetName, otherProj.outputArchitecture, otherProj.activeToolchainName ) )
otherProj.state = _shared_globals.ProjectState.WAITING_FOR_LINK
pending_links.add( otherProj )
for otherProj in list( pending_links ):
okToLink = True
for depend in otherProj.reconciledLinkDepends:
if depend not in projects_done:
dependProj = _shared_globals.projects[depend]
if not dependProj.shell and not dependProj.prebuilt:
okToLink = False
break
if okToLink:
_link( otherProj )
LinkedSomething = True
projects_done.add( otherProj.key )
pending_links.remove( otherProj )
while pending_builds:
theseBuilds = pending_builds
pending_builds = []
for project in theseBuilds:
for depend in project.srcDepends:
if depend not in projects_done:
pending_builds.append( project )
continue
projects_in_flight.add( project )
projectSettings.currentProject = project
project.starttime = time.time( )
for plugin in project.plugins:
_utils.CheckRunBuildStep(project, plugin.preBuildStep, "plugin pre-build")
plugin.preBuildStep(project)
_utils.CheckRunBuildStep(project, project.activeToolchain.preBuildStep, "toolchain pre-build")
for buildStep in project.preBuildSteps:
_utils.CheckRunBuildStep(project, buildStep, "project pre-build")
log.LOG_BUILD( "Building {} ({} {}/{})".format( project.outputName, project.targetName, project.outputArchitecture, project.activeToolchainName ) )
project.state = _shared_globals.ProjectState.BUILDING
project.startTime = time.time()
if project.precompile_headers( ):
for chunk in projectSettings.currentProject._finalChunkSet:
#not set until here because _finalChunkSet may be empty.
project._builtSomething = True
chunkFileStr = ""
if chunk in project.chunksByFile:
chunkFileStr = " {}".format( [ os.path.basename(piece) for piece in project.chunksByFile[chunk] ] )
elif chunk in project.splitChunks:
chunkFileStr = " [Split from {}_{}{}]".format(
project.splitChunks[chunk],
project.targetName,
project.activeToolchain.Compiler().GetObjExt()
)
built = True
obj = _utils.GetSourceObjPath(projectSettings.currentProject, chunk, sourceIsChunkPath=projectSettings.currentProject.ContainsChunk(chunk))
if not _shared_globals.semaphore.acquire( False ):
if _shared_globals.max_threads != 1:
log.LOG_INFO( "Waiting for a build thread to become available..." )
_shared_globals.semaphore.acquire( True )
ReconcilePostBuild()
if _shared_globals.interrupted:
Exit( 2 )
if not _shared_globals.build_success and _shared_globals.stopOnError:
log.LOG_ERROR("Errors encountered during build, finishing current tasks and exiting...")
_shared_globals.semaphore.release()
break
if _shared_globals.times:
totaltime = (time.time( ) - _shared_globals.starttime)
_shared_globals.lastupdate = totaltime
minutes = math.floor( totaltime / 60 )
seconds = math.floor( totaltime % 60 )
avgtime = sum( _shared_globals.times ) / (len( _shared_globals.times ))
esttime = totaltime + ((avgtime * (
_shared_globals.total_compiles - len(
_shared_globals.times ))) / _shared_globals.max_threads)
if esttime < totaltime:
esttime = totaltime
_shared_globals.esttime = esttime
estmin = math.floor( esttime / 60 )
estsec = math.floor( esttime % 60 )
log.LOG_BUILD(
"Compiling {0}{7}... ({1}/{2}) - {3}:{4:02}/{5}:{6:02}".format( os.path.basename( obj ),
_shared_globals.current_compile, _shared_globals.total_compiles, int( minutes ),
int( seconds ), int( estmin ),
int( estsec ), chunkFileStr ) )
else:
totaltime = (time.time( ) - _shared_globals.starttime)
minutes = math.floor( totaltime / 60 )
seconds = math.floor( totaltime % 60 )
log.LOG_BUILD(
"Compiling {0}{5}... ({1}/{2}) - {3}:{4:02}".format( os.path.basename( obj ),
_shared_globals.current_compile,
_shared_globals.total_compiles, int( minutes ), int( seconds ), chunkFileStr ) )
_utils.ThreadedBuild( chunk, obj, project ).start( )
_shared_globals.current_compile += 1
else:
projects_in_flight.remove( project )
log.LOG_ERROR( "Build of {} ({} {}/{}) failed! Finishing up non-dependent build tasks...".format(
project.outputName, project.targetName, project.outputArchitecture, project.activeToolchainName ) )
with project.mutex:
for chunk in project._finalChunkSet:
project.fileStatus[os.path.normcase(chunk)] = _shared_globals.ProjectState.ABORTED
_shared_globals.total_compiles -= len(project._finalChunkSet)
project.linkQueueStart = time.time()
project.linkStart = project.linkQueueStart
project.endTime = project.linkQueueStart
project.state = _shared_globals.ProjectState.FAILED
if not _shared_globals.build_success and _shared_globals.stopOnError:
break
#Wait until all threads are finished. Simple way to do this is acquire the semaphore until it's out of
# resources.
for j in range( _shared_globals.max_threads ):
if not _shared_globals.semaphore.acquire( False ):
if _shared_globals.max_threads != 1:
if _shared_globals.times:
totaltime = (time.time( ) - _shared_globals.starttime)
_shared_globals.lastupdate = totaltime
minutes = math.floor( totaltime / 60 )
seconds = math.floor( totaltime % 60 )
avgtime = sum( _shared_globals.times ) / (len( _shared_globals.times ))
esttime = totaltime + ((avgtime * (_shared_globals.total_compiles - len(
_shared_globals.times ))) / _shared_globals.max_threads)
if esttime < totaltime:
esttime = totaltime
estmin = math.floor( esttime / 60 )
estsec = math.floor( esttime % 60 )
_shared_globals.esttime = esttime
log.LOG_THREAD(
"Waiting on {0} more build thread{1} to finish... ({2}:{3:02}/{4}:{5:02})".format(
_shared_globals.max_threads - j,
"s" if _shared_globals.max_threads - j != 1 else "", int( minutes ),
int( seconds ), int( estmin ), int( estsec ) ) )
else:
log.LOG_THREAD(
"Waiting on {0} more build thread{1} to finish...".format(
_shared_globals.max_threads - j,
"s" if _shared_globals.max_threads - j != 1 else "" ) )
ReconcilePostBuild()
_shared_globals.semaphore.acquire( True )
if linker_threads_blocked > 0:
_shared_globals.link_semaphore.release()
linker_threads_blocked -= 1
if _shared_globals.interrupted:
Exit( 2 )
#Then immediately release all the semaphores once we've reclaimed them.
#We're not using any more threads so we don't need them now.
for j in range( _shared_globals.max_threads ):
if _shared_globals.stopOnError:
projects_in_flight = set()
_shared_globals.semaphore.release( )
ReconcilePostBuild()
if projects_in_flight:
log.LOG_ERROR( "Could not complete all projects. This is probably very bad and should never happen."
" Remaining projects: {0}".format( [p.key for p in projects_in_flight] ) )
if pending_links:
log.LOG_ERROR( "Could not link all projects. Do you have unmet dependencies in your makefile?"
" Remaining projects: {0}".format( [p.key for p in pending_links] ) )
for p in pending_links:
p.state = _shared_globals.ProjectState.ABORTED
_shared_globals.build_success = False
for proj in _shared_globals.sortedProjects:
proj.save_md5s( proj.allsources, proj.allheaders )
if not built:
log.LOG_BUILD( "Nothing to build." )
_building = False
global _linkCond
global _linkMutex
with _linkMutex:
_linkCond.notify()
log.LOG_THREAD("Waiting for linker tasks to finish.")
_linkThread.join()
if not projects_in_flight and not pending_links:
for project in _shared_globals.sortedProjects:
for plugin in project.plugins:
_utils.CheckRunBuildStep(project, plugin.postMakeStep, "plugin post-make")
_shared_globals.globalPostMakeSteps.add(plugin.globalPostMakeStep)
_utils.CheckRunBuildStep(project, project.activeToolchain.postMakeStep, "toolchain post-make")
_shared_globals.globalPostMakeSteps |= project.activeToolchain.GetGlobalPostMakeSteps()
for buildStep in project.postMakeSteps:
_utils.CheckRunBuildStep(project, buildStep, "project post-make")
for buildStep in _shared_globals.globalPostMakeSteps:
if _utils.FuncIsEmpty(buildStep):
continue
log.LOG_BUILD( "Running global post-make step {}".format(_utils.GetFuncName(buildStep)))
buildStep()
compiletime = time.time( ) - _shared_globals.starttime
totalmin = math.floor( compiletime / 60 )
totalsec = math.floor( compiletime % 60 )
log.LOG_BUILD( "Compilation took {0}:{1:02}".format( int( totalmin ), int( totalsec ) ) )
_shared_globals.buildFinished = True
return _shared_globals.build_success | 67ef0546f5f08cef03006545885c3b865bc6e387 | 3,631,388 |
def getRaCfg(name, default):
""" Gets a config attribute, if not set, return the default. """
if 'raCfg' in config:
if name in config['raCfg'] and isinstance(config['raCfg'][name], bool):
return config['raCfg'][name]
return default | 9ea0498568f86948ac2a111622cbae7a2535c24a | 3,631,389 |
import typing
def quat_mean(quaternions: typing.Sequence[typing.Union[typing.Sequence, np.ndarray]]) -> np.ndarray:
"""
Find the mean of a bunch of quaternions
Fails in some pathological cases where the quats are widely distributed.
:param quaternions:
:return:
"""
if len(quaternions) <= 0:
return np.nan
elif len(quaternions) == 1:
# Only one quaternion, it is the average of itself
return quaternions[0]
else:
# Sum them up, maintaining handedness
num = 0
result = np.zeros(4)
first_rotation = None
for quat in quaternions:
if first_rotation is None:
first_rotation = quat
elif np.dot(first_rotation, quat) < 0:
quat = -1 * quat
num += 1
result += quat
result = result / num
return result / np.linalg.norm(result) | cc95cdb2be8db53701e5e97c5200cdcf67ab6be9 | 3,631,390 |
def get_element_dict(propname='mass_number'):
""" Obtain dictionary of elements ordered by a property.
"""
prop_dict = {k:getattr(elements[k], propname) for k in elements.keys()}
elems = list(elements.keys())
props = list(prop_dict.values())
# Sort the element list by the masses
srtseq = np.argsort(props)
elems = np.array(elems)[srtseq]
props = np.array(props)[srtseq]
prop_odict = OD({k:v for k, v in zip(elems, props)})
return prop_odict | 9902bb1f96618a2d8e4d328711cbacdc68c8a2e3 | 3,631,391 |
def get_match_history(start_at_match_id=None, player_name=None, hero_id=None,
skill=0, date_min=None, date_max=None, account_id=None,
league_id=None, matches_requested=None, game_mode=None,
min_players=None, tournament_games_only=None,
**kwargs):
"""
List of most recent 25 matches before start_at_match_id
"""
params = {
"start_at_match_id": start_at_match_id,
"player_name": player_name,
"hero_id": hero_id,
"skill": skill,
"date_min": date_min,
"date_max": date_max,
"account_id": account_id,
"league_id": league_id,
"matches_requested": matches_requested,
"game_mode": game_mode,
"min_players": min_players,
"tournament_games_only": tournament_games_only
}
return make_request("GetMatchHistory", params, **kwargs) | ad89ec7b54e03cddbbe966cc8b2701e6002e8a7e | 3,631,392 |
from alert.models import AddDropPeriod
def get_add_drop_period(semester):
"""
Returns the AddDropPeriod object corresponding to the given semester. Throws the same
errors and behaves the same way as AddDropPeriod.objects.get(semester=semester) but runs faster.
This function uses caching to speed up add/drop period object retrieval. Cached objects
expire every 25 hours, and are also invalidated in the AddDropPeriod.save method.
The add_drop_periods key in cache points to a dictionary mapping semester to add/drop period
object.
"""
changed = False
cached_adps = cache.get("add_drop_periods", None)
if cached_adps is None:
cached_adps = dict()
changed = True
if semester not in cached_adps:
cached_adps[semester] = AddDropPeriod.objects.get(semester=semester)
changed = True
if changed:
cache.set("add_drop_periods", cached_adps, timeout=90000) # cache expires every 25 hours
return cached_adps[semester] | b2e18e73d2d01e064866fb95c5d425e615a5c7da | 3,631,393 |
from solarforecastarbiter.io.fetch import nwp as fetch_nwp
def run_nwp(forecast, model, run_time, issue_time):
"""
Calculate benchmark irradiance and power forecasts for a Forecast or
ProbabilisticForecast.
Forecasts may be run operationally or retrospectively. For
operational forecasts, *run_time* is typically set to now. For
retrospective forecasts, *run_time* is the time by which the
forecast should be run so that it could have been be delivered for
the *issue_time*. Forecasts will only use data with timestamps
before *run_time*.
Parameters
----------
forecast : datamodel.Forecast or datamodel.ProbabilisticForecast
The metadata of the desired forecast.
model : function
NWP model loading and processing function.
See :py:mod:`solarforecastarbiter.reference_forecasts.models`
for options.
run_time : pd.Timestamp
Run time of the forecast.
issue_time : pd.Timestamp
Issue time of the forecast run.
Returns
-------
ghi : pd.Series or pd.DataFrame
dni : pd.Series or pd.DataFrame
dhi : pd.Series or pd.DataFrame
air_temperature : pd.Series or pd.DataFrame
wind_speed : pd.Series or pd.DataFrame
ac_power : pd.Series or pd.DataFrame
Series are returned for deterministic forecasts, DataFrames are
returned for probabilisic forecasts.
Examples
--------
The following code would return hourly average forecasts derived
from the subhourly HRRR model.
.. testsetup::
import datetime
from solarforecastarbiter import datamodel
from solarforecastarbiter.reference_forecasts import models
from solarforecastarbiter.reference_forecasts.main import *
>>> run_time = pd.Timestamp('20190515T0200Z')
>>> issue_time = pd.Timestamp('20190515T0000Z')
>>> modeling_parameters = datamodel.FixedTiltModelingParameters(
... surface_tilt=30, surface_azimuth=180,
... ac_capacity=10, dc_capacity=15,
... temperature_coefficient=-0.4, dc_loss_factor=0,
... ac_loss_factor=0)
>>> power_plant = datamodel.SolarPowerPlant(
... name='Test plant', latitude=32.2, longitude=-110.9,
... elevation=715, timezone='America/Phoenix',
... modeling_parameters=modeling_parameters)
>>> forecast = datamodel.Forecast(
... name='Test plant fx',
... site=power_plant,
... variable='ac_power',
... interval_label='ending',
... interval_value_type='interval_mean',
... interval_length='1h',
... issue_time_of_day=datetime.time(hour=0),
... run_length=pd.Timedelta('24h'),
... lead_time_to_start=pd.Timedelta('0h'))
>>> ghi, dni, dhi, temp_air, wind_speed, ac_power = run_nwp(
... forecast, models.hrrr_subhourly_to_hourly_mean,
... run_time, issue_time)
"""
# bury import in the function so that io.fetch.nwp libraries (aoihttp, etc)
# are restricted to this function. might be better to extract the relevant
# model metadata into io.nwp, but that would be more invasive.
fetch_metadata = fetch_nwp.model_map[models.get_nwp_model(model)]
# absolute date and time for model run most recently available
# as of run_time
init_time = utils.get_init_time(run_time, fetch_metadata)
# absolute start and end times. interval_label still controls
# inclusive/exclusive
start, end = utils.get_forecast_start_end(forecast, issue_time)
site = forecast.site
logger.info(
'Calculating forecast for model %s starting at %s from %s to %s',
model, init_time, start, end)
# model will account for interval_label
*forecasts, resampler, solar_position_calculator = model(
site.latitude, site.longitude, site.elevation,
init_time, start, end, forecast.interval_label)
if isinstance(site, datamodel.SolarPowerPlant):
solar_position = solar_position_calculator()
if isinstance(forecasts[0], pd.DataFrame):
# must iterate over columns because pvmodel.irradiance_to_power
# calls operations that do not properly broadcast Series along
# a DataFrame time index. pvlib.irradiance.haydavies operation
# (AI = dni_ens / dni_extra) is the known culprit, though there
# may be more.
ac_power = {}
for col in forecasts[0].columns:
member_fx = [fx.get(col) for fx in forecasts]
member_ac_power = pvmodel.irradiance_to_power(
site.modeling_parameters,
solar_position['apparent_zenith'],
solar_position['azimuth'],
*member_fx)
ac_power[col] = member_ac_power
ac_power = pd.DataFrame(ac_power)
else:
ac_power = pvmodel.irradiance_to_power(
site.modeling_parameters, solar_position['apparent_zenith'],
solar_position['azimuth'], *forecasts)
else:
ac_power = None
# resample data after power calculation
resampled = list(map(resampler, (*forecasts, ac_power)))
nwpoutput = namedtuple(
'NWPOutput', ['ghi', 'dni', 'dhi', 'air_temperature', 'wind_speed',
'ac_power'])
return nwpoutput(*resampled) | bcff6b763b5391e074f262b96848e3caa216bfa1 | 3,631,394 |
def get_path_filename(handle):
""" cleans path, combines it"""
path = config['path'].strip('/').strip()
return path + '/' + handle + config['extension'] | 01b4a60fdf28327849e2ae63633b1f42c4b09dc8 | 3,631,395 |
def get_user_subscription_steps(signature=None):
"""ユーザー申込みのステップ数
:return:
"""
url_pattern = 'format:user_subscription_step%s'
url_kwargs = {'signature': signature}
step_list = create_steps(
[
('①', '申込み基本情報'),
('②', '申込者分類選択'),
('③', '申込者情報入力'),
('④', '申込み確認'),
('⑤', '申込み完了'),
],
url_pattern=url_pattern,
url_kwargs=url_kwargs,
)
return [step.to_json() for step in step_list] | ba0a3e2b50de225d94e10abe5bdaadf51e95b636 | 3,631,396 |
def approx(g, nodes):
"""
Computes the approximation of g over the nodes for Simpson's method
"""
factor = g(nodes[2] - nodes[0]) / _real(6)
_sum = g(nodes[0]) + _real(4) * g(nodes[1]) + g(nodes[2])
return factor * _sum | b8d41129c251f436aad2d93c166fde744ba4128d | 3,631,397 |
import os
def _get_db_path():
"""
Return the path to the database file. If the environment variable NATURE_RECORDER_DB is set, this will be used
as the path to the SQLite database file. If not, then the default "development" database file, in the
applications data folder, is used.
:return: The path to the database file
"""
db_path = os.environ["NATURE_RECORDER_DB"] if "NATURE_RECORDER_DB" in os.environ else None
if not db_path:
db_path = os.path.join(get_data_path(), "naturerecorder_dev.db")
return db_path | 86029a8d661fc8e81efebf94933820930cfa1c97 | 3,631,398 |
def get_fmtfldsdict(prtfmt):
"""Return the fieldnames in the formatter text."""
# Example prtfmt: "{NS} {study_cnt:2} {fdr_bh:5.3e} L{level:02} D{depth:02} {GO} {name}\n"
return {v:v for v in get_fmtflds(prtfmt)} | 12fbdf364f907783b13babc9ba7f3d8b618b32e5 | 3,631,399 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.