content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
def build_answers_xml(selector, args):
""" builds an answers xml string for a selector class using
default answers for the selector's questions. If any other attributes
were included in the call, they are appended to as part of the
work-info. """
# build an answers xml tree
answers = ET.Element('answers')
questions = ET.SubElement(answers, 'license-%s' % selector.id)
# build the required answer elements
for question in selector.questions():
default_answer = question.answers()[0][1]
ET.SubElement(questions, question.id).text = \
str(args.get(question.id, default_answer))
# shove anything else in the args dict into the work-info
work_info = ET.SubElement(answers, 'work-info')
for field in args:
if questions.xpath('%s' % field) == [] and \
answers.xpath('%s' % field) == []:
# append an element for this argument
ET.SubElement(work_info, field).text = args[field]
return answers | 9290190fef3396990a3a4199d81c50f413ee3bc1 | 3,632,400 |
import time
def give_me_the_answer(question, clever=False):
"""This gives you the answer.
Parameters
----------
question: str
the question you are asking.
clever: bool, optional
whether or not the answer should be clever or not
Returns
-------
The answer
"""
if not isinstance(question, str):
raise ValueError('The question should be a string!')
if clever:
# Mh, maybe I should think about the answer twice
time.sleep(1)
return cfg.ANSWER | a2b8f7fd9cd131caa6d2d99c694d76cef34ead82 | 3,632,401 |
def round_down(x: float, decimal_places: int) -> float:
"""
Round a float down to decimal_places.
Parameters
----------
x : float
decimal_places : int
Returns
-------
rounded_float : float
Examples
--------
>>> round_down(1.23456, 3)
1.234
>>> round_down(1.23456, 2)
1.23
"""
d = int("1" + ("0" * decimal_places))
return math_stl.floor(x * d) / d | f1accd23ffef4fbceb0aa75098862dfb75e03c61 | 3,632,402 |
import importlib
def import_from_string(val, setting_name):
"""
Attempt to import a class from a string representation.
"""
try:
parts = val.split(".")
module_path, class_name = ".".join(parts[:-1]), parts[-1]
module = importlib.import_module(module_path)
return getattr(module, class_name)
except ImportError as e:
msg = "Could not import %r for setting %r. %s: %s." % (val, setting_name, e.__class__.__name__, e)
raise ImportError(msg) | 45bd96b219c808a5cf928e1b7364aa3c3178160a | 3,632,403 |
def datatype_to_tracktype(datatype):
"""
Infer a default track type from a data type. There can
be other track types that can display a given data type.
Parameters
----------
datatype: str
A datatype identifier (e.g. 'matrix')
Returns
-------
str, str:
A track type (e.g. 'heatmap') and position (e.g. 'top')
"""
track_type = _datatype_default_track.get(datatype, None)
position = _track_default_position.get(track_type, None)
return track_type, position | fb0679ade37478ee0b9a451ce667bfd23f86e1ae | 3,632,404 |
def rossler(x, y, z, a, b, c):
""" Rössler System of Ordinary Differential Equations """
dx = - y - z
dy = x + a*y
dz = b + z*(x - c)
return dx, dy, dz | bcf27c7ff8223681d6dc7d0c49497e975b826d80 | 3,632,405 |
import os
def comparePlistVersion(item):
"""Gets the version string from the plist at path and compares versions.
Returns 0 if the plist isn't installed
-1 if it's older
1 if the version is the same
2 if the version is newer
Raises munkicommon.Error if there's an error in the input
"""
version_comparison_key = item.get(
'version_comparison_key', 'CFBundleShortVersionString')
if 'path' in item and version_comparison_key in item:
versionstring = item[version_comparison_key]
filepath = item['path']
minupvers = item.get('minimum_update_version')
else:
raise munkicommon.Error('Missing plist path or version!')
munkicommon.display_debug1('\tChecking %s for %s %s...',
filepath, version_comparison_key, versionstring)
if not os.path.exists(filepath):
munkicommon.display_debug1('\tNo plist found at %s', filepath)
return 0
try:
plist = FoundationPlist.readPlist(filepath)
except FoundationPlist.NSPropertyListSerializationException:
munkicommon.display_debug1('\t%s may not be a plist!', filepath)
return 0
if not hasattr(plist, 'get'):
munkicommon.display_debug1(
'plist not parsed as NSCFDictionary: %s', filepath)
return 0
if 'version_comparison_key' in item:
# specific key has been supplied,
# so use this to determine installed version
munkicommon.display_debug1(
'\tUsing version_comparison_key %s', version_comparison_key)
installedvers = munkicommon.getVersionString(
plist, version_comparison_key)
else:
# default behavior
installedvers = munkicommon.getVersionString(plist)
if installedvers:
munkicommon.display_debug1(
'\tInstalled item has version %s', installedvers)
if minupvers:
if compareVersions(installedvers, minupvers) < 1:
munkicommon.display_debug1(
'\tVersion %s too old < %s', installedvers, minupvers)
return 0
compare_result = compareVersions(installedvers, versionstring)
results = ['older', 'not installed?!', 'the same', 'newer']
munkicommon.display_debug1('\tInstalled item is %s.',
results[compare_result + 1])
return compare_result
else:
munkicommon.display_debug1('\tNo version info in %s.', filepath)
return 0 | 2b02a43fd5460ee6fd826093abca66f91c10ce0d | 3,632,406 |
import re
def get_extension(filename):
"""
Extract file extension from filename using regex.
Args:
filename (str): name of file
Returns:
str: the file extension
"""
match = re.search(r"\.(?P<ext>[^.]+)$", filename)
if match:
return match.group("ext")
raise ValueError(f"No extension could be extracted from '{filename}'") | 8f5195b339a153d5fa144182505dba986992d4df | 3,632,407 |
import asyncio
def run_async(func):
"""
Allows you to run a click command asynchronously.
"""
func = asyncio.coroutine(func)
def inner_handler(*args, **kwargs):
run(func(*args, **kwargs))
return update_wrapper(inner_handler, func) | 29b0d731d101159f7ec7311dc6beb6ab56523022 | 3,632,408 |
def Conv2D(input_tensor, input_shape, filter_size, num_filters, strides=1, name=None):
"""
Handy helper function for convnets.
Performs 2D convolution with a default stride of 1. The kernel has shape
filter_size x filter_size with num_filters output filters.
"""
shape = [filter_size, filter_size, input_shape, num_filters]
# initialize weights and biases of the convolution
W = init_weights(name=name+'_W' , shape=shape)
b = init_bias(name=name+'_b', shape=shape[-1])
conv = tf.nn.conv2d(input_tensor, W, strides=[1, strides, strides, 1], padding='SAME', name=name)
conv = tf.nn.bias_add(conv, b)
return conv | 91505ed82eaf1585023edba55848a33e4145bbc3 | 3,632,409 |
def scale_val(val, factor, direction):
"""Scale val by factor either 'up' or 'down'."""
if direction == 'up':
return val+(val*factor)
if direction == 'down':
return val-(val*factor)
raise ValueError('direction must be "up" or "down"') | 16c2efe16fc787fe4461fb0ae640e2cf22d556e0 | 3,632,410 |
def ttee(iterable, n=2):
"""
>>> ttee("ABC")
(('A', 'B', 'C'), ('A', 'B', 'C'))
"""
return tuple(map(tuple, tee(iterable, n))) | 7b5b6ff83492f4df5cbe845367d73bbadd0c6b10 | 3,632,411 |
def addattrs(field, css):
"""
在模板的form的field中,特别是input中添加各种attr
"""
attrs = {}
definition = css.split(',')
for d in definition:
if '=' not in d:
attrs['class'] = d
else:
t, v = d.split('=')
attrs[t] = v
return field.as_widget(attrs=attrs) | cdbb2b4b44b6e7facbe2af44d503c3118eb31ef7 | 3,632,412 |
def regular_periodic(freqs, amplitudes, phase, size=501):
"""Generate periodic test data sampled at regular intervals: superposition
of multiple sine waves, each with multiple harmonics.
"""
times = np.linspace(0, 2, size)
values = np.zeros(size)
for (i,j), amplitude in np.ndenumerate(amplitudes):
values += amplitude * np.sin(2*np.pi*times*freqs[i]*(j+1) + phase)
errors = 1e-4*np.ones(size)
return times, values, errors | bfe23122e8edd3a279caed27a758edd353f15bc7 | 3,632,413 |
from bread.contrib.reports.fields.queryfield import parsequeryexpression
def generate_excel_view(queryset, fields, filterstr=None):
"""
Generates an excel file from the given queryset with the specified fields.
fields: list [<fieldname1>, <fieldname2>, ...] or dict with {<fieldname>: formatting_function(object, fieldname)}
filterstr: a djangoql filter string which will lazy evaluated, see bread.fields.queryfield.parsequeryexpression
"""
model = queryset.model
if isinstance(fields, list):
fields = _expand_ALL_constant(model, fields)
if not isinstance(fields, dict):
fields = {
field: lambda inst: format_value(getattr(inst, field)) for field in fields
}
def excelview(request):
items = queryset
if isinstance(filterstr, str):
items = parsequeryexpression(model.objects.all(), filterstr)
if "selected" in request.GET:
items = items.filter(
pk__in=[int(i) for i in request.GET.getlist("selected")]
)
items = list(items.all())
workbook = generate_excel(items, fields)
workbook.title = pretty_modelname(model)
return xlsxresponse(workbook, workbook.title)
return excelview | ca03a09ee3a6a2e17df542ab4c124c4078677b4a | 3,632,414 |
import os
def makeDir(dirName):
"""Makes a new directory with the specified name. If the directory already
exists then raise a new exception."""
if (os.access(dirName, os.F_OK)):
raise Exception("Directory already exists: " + dirName)
return commands.getstatusoutput("mkdir " + dirName) | 73f5d0f9fe8671b91abb528584e0714073925110 | 3,632,415 |
def get_tag_by_name(repo: Repository, tag_name: str) -> Tag:
"""Fetches a tag by name from the given repository"""
ref = get_ref_for_tag(repo, tag_name)
try:
return repo.tag(ref.object.sha)
except github3.exceptions.NotFoundError:
raise DependencyLookupError(
f"Could not find tag with SHA {ref.object.sha} on GitHub"
) | d8191f819e7a2f1cdcaeabb52cda452fd3e555bf | 3,632,416 |
def get_geo_selected(results, datas, extras, filters=False):
"""Get specific Geography based on existing ids."""
wards = []
all_list = get_all_geo_list(filters)
datas.remove('') if '' in datas else datas
extras.remove('') if '' in extras else extras
results['wards'] = datas
area_ids = list(map(int, datas))
selected_ids = list(map(int, extras) if extras else [])
# compare
print(area_ids, selected_ids)
for geo_list in all_list:
parent_area_id = geo_list['parent_area_id']
area_id = geo_list['area_id']
area_name = geo_list['area_name']
if parent_area_id in area_ids:
final_list = '{},{}'.format(area_id, area_name)
wards.append(final_list)
# attach already selected
if area_id in selected_ids:
extra_list = '{},{}'.format(area_id, area_name)
wards.append(extra_list)
unique_wards = list(set(wards))
results['wards'] = unique_wards
print('newton', results)
return results | 755b8461f0decc320c54174541bef0672585bcc8 | 3,632,417 |
def notify(text, boxwidth=60):
"""Create a 'notification' styled textbox"""
return box(text, decor="*", boxwidth=boxwidth) | 1fe8d98b890bf7c2cd6aaee27b2c11dca6b8046c | 3,632,418 |
def returns_player(method):
"""
Decorator: Always returns a single result or None.
"""
def func(self, *args, **kwargs):
"decorator"
rfunc = returns_player_list(method)
match = rfunc(self, *args, **kwargs)
if match:
return match[0]
else:
return None
return update_wrapper(func, method) | 22549888600556804ae3446a6c7442e94841813f | 3,632,419 |
def tracks(date):
"""
Query the charts/beatport/tracks endpoint for the given date.
Data available on Fridays.
https://api.chartmetric.com/api/charts/beatport
**Parameters**
- `date`: string date in ISO format %Y-%m-%d
**Returns**
A list of dictionary of tracks on Beatport charts.
"""
urlhandle = f"{BEATPORT_CHARTS_URL}"
params = {
"date": utilities.strWeekday(date, FRIDAY),
"genre": "top-100",
"offset": 0,
}
data = utilities.RequestData(urlhandle, params)
return utilities.RequestGet(data)["data"] | e117fe57e78eb4780f2ba8850b1fe80d7ab43c0c | 3,632,420 |
def cos_sim(A_mat, B_vec):
"""
item-vevtorの行列(またはベクトル)が与えられた際にitem-vevtor間のコサイン類似度行列を求める
"""
d = np.dot(A_mat, B_vec) # 各ベクトル同士の内積を要素とする行列
# 各ベクトルの大きさの平方根
A_norm = (A_mat ** 2).sum(axis=1, keepdims=True) ** .5
B_norm = (B_vec ** 2).sum(axis=0, keepdims=True) ** .5
# それぞれのベクトルの大きさの平方根で割る
return d / A_norm.T / B_norm | 823778eaaeb8bb85e93544c5b6d09001cba0e236 | 3,632,421 |
def parse_dtype(space):
"""Get a tensor dtype from a OpenAI Gym space.
Args:
space: Gym space.
Returns:
TensorFlow data type.
"""
if isinstance(space, gym.spaces.Discrete):
return tf.int32
if isinstance(space, gym.spaces.Box):
return tf.float32
raise NotImplementedError() | 903824c5c013c9081bd988ca437f123ebb322ef8 | 3,632,422 |
def tf_efficientnet_b2_ap(pretrained=True, **kwargs):
""" EfficientNet-B2. Tensorflow compatible variant """
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
out_indices = [1, 2, 4, 6]
model = _gen_efficientnet(
'tf_efficientnet_b2_ap', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs)
return_features_num_channels = [24, 48, 120, 352]
return model, out_indices, return_features_num_channels | 61ebd0953be7024d262ecd509bfd6d4886431afb | 3,632,423 |
def plot_ld_curves(ld_stats, stats_to_plot=[], rows=None, cols=None,
statistics=None, fig_size=(6,6), dpi=150, r_edges=None,
numfig=1, cM=False, output=None, show=False):
"""
Plot single set of LD curves
LD curves are named as given in statistics
ld_stats is the output of bin_stats
stats_to_plot is a list of lists, where each inner list gives the stats
to plot in a given pane, and the list of lists are each pane
rows and cols tells us how to arrange the panes, if there are more than one
sets of statistics to plot
if statistics is None, we use the statistics as listed in ld_stats.names()
For example, to plot four panes, two on each of two rows, with the D^2
statistics, the between-population cov(D) statistics, the non-cross
populations Dz statistics, and the non-cross pop pi2 statistics, we run
plot_ld_curves(ld_stats, stats_to_plot=[['DD_1_1','DD_2_2','DD_3_3'],
['DD_1_2','DD_1_3','DD_2_3'],['Dz_1_1_1','Dz_2_2_2','Dz_3_3_3'],
['pi2_2_2_2_2','pi2_3_3_3_3']], rows=2, cols=2,
statistics=statistics)
If you want to save the figure, set output to the file path+name
"""
num_axes = len(stats_to_plot)
if num_axes == 0:
return
if rows == None and cols == None:
cols = len(stats_to_plot)
rows = 1
if statistics == None:
statistics = ld_stats.names()
# make sure all stats are named properly
r_centers = np.array((r_edges[:-1]+r_edges[1:])/2)
x_label = '$r$'
if cM == True:
r_centers *= 100
x_label = 'cM'
fig = plt.figure(numfig, figsize=fig_size, dpi=dpi)
fig.clf()
axes={}
# loop through stats_to_plot, update axis, and plot
for i,stats in enumerate(stats_to_plot):
axes[i] = plt.subplot(rows,cols,i+1)
for stat in stats:
k = statistics[0].index(stat)
to_plot = [ld_stats[j][k] for j in range(len(r_centers))]
axes[i].plot(r_centers, to_plot, label=stat)
axes[i].set_xscale('log')
axes[i].set_yscale('log')
axes[i].set_xlabel(x_label)
axes[i].legend(frameon=False)
fig.tight_layout()
if output != None:
plt.savefig(output)
if show == True:
fig.show()
else:
return fig | 5bf8128cce7a4b85347787e4552444f69f68873a | 3,632,424 |
def onehottify_2d_array(a):
"""
https://stackoverflow.com/questions/36960320/convert-a-2d-matrix-to-a-3d-one-hot-matrix-numpy
:param a: 2-dimensional array.
:return: 3-dim array where last dim corresponds to one-hot encoded vectors.
"""
# https://stackoverflow.com/a/46103129/ @Divakar
def all_idx(idx, axis):
grid = np.ogrid[tuple(map(slice, idx.shape))]
grid.insert(axis, idx)
return tuple(grid)
num_columns = a.max() + 1
out = np.zeros(a.shape + (num_columns,), dtype=int)
out[all_idx(a, axis=2)] = 1
return out | a612b6fa7ba2bc59f48aec85ee2e63e9d3cf86ac | 3,632,425 |
def getUserCompetencies(cnx, exceptUserIDs):
"""
Returns array of persons with their competences as values
"""
competencies = {}
cnx = establishDBConnection(dbconfig)
cursor = cnx.cursor()
placeholder = '%s'
placeholders = ', '.join(placeholder for unused in exceptUserIDs)
query = ("""SELECT firstname, lastname, competence FROM profile
WHERE user_id NOT IN ({}) AND competence IS NOT NULL
""").format(placeholders)
cursor.execute(query, tuple(exceptUserIDs))
cnx.close()
for (firstname, lastname, competence) in cursor:
competencies[firstname + " " + lastname] = (
[comp.strip().lower() for comp in competence.split(',')]
)
return competencies | 4cd73c2e01fe76abad337cfd4929edcce297c92e | 3,632,426 |
def add_induct_def(name, T, eqs):
"""Add the given inductive definition.
The inductive definition is specified by the name and type of
the constant, and a list of equations.
For example, addition on natural numbers is specified by:
('plus', nat => nat => nat,
[(plus(0,n) = n, plus(Suc(m), n) = Suc(plus(m, n)))]).
Multiplication on natural numbers is specified by:
('times', nat => nat => nat,
[(times(0,n) = 0, times(Suc(m), n) = plus(n, times(m,n)))]).
"""
exts = TheoryExtension()
exts.add_extension(AxConstant(name, T))
for i, prop in enumerate(eqs):
th_name = name + "_def_" + str(i + 1)
exts.add_extension(Theorem(th_name, Thm([], prop)))
exts.add_extension(Attribute(th_name, "hint_rewrite"))
return exts | 7de7884e36cceba2a49377230a7932ff133f902f | 3,632,427 |
def cropDetectionSegments(ffBinRead, segmentList, cropSize = 64):
""" Crops small images around detections.
ffBinRead: read FF bin structure
segmentList: list of coordinate tuples [(x1, y1), (x2, y2),...]
cropSize: image square size in pixels (e.g. 64x64 pixels)"""
ncols = ffBinRead.ncols - 1
nrows = ffBinRead.nrows - 1
cropedList = []
for coordinate in segmentList[0]:
frame, x, y = coordinate
x = int(round(x, 0))
# Make sure each center row is even
y = int(y)
if y % 2 == 1:
y += 1
x_left = x - cropSize
y_left = y - cropSize
x_right = x + cropSize
y_right = y + cropSize
x_diff = 0
y_diff = 0
x_end = cropSize*2
y_end = cropSize*2
fillZeoresFlag = False
if x_left < 0:
fillZeoresFlag = True
x_diff = -x_left
x_end = cropSize*2
x_left = 0
if y_left < 0:
fillZeoresFlag = True
y_diff = -y_left
y_end = cropSize*2
y_left = 0
if x_right > ncols:
fillZeoresFlag = True
x_diff = 0
x_end = cropSize*2 - (x_right - ncols)
x_right = ncols
if y_right > nrows:
fillZeoresFlag = True
y_diff = 0
y_end = cropSize*2 - (y_right - nrows - 1)
y_right = nrows + 1
imageArray = buildFF(ffBinRead, int(frame), videoFlag = True)
# If croped area is in the corner, fill corner with zeroes
if fillZeoresFlag:
cropedArray = np.zeros(shape =(cropSize*2, cropSize*2))
tempCrop = imageArray[y_left:y_right, x_left:x_right]
cropedArray[y_diff:y_end, x_diff:x_end] = tempCrop
else:
cropedArray = imageArray[y_left:y_right, x_left:x_right]
if frame % 1 == 0:
# Deinterlace odd
cropedArray = deinterlace_array_odd(cropedArray)
else:
# Deinterlace even
cropedArray = deinterlace_array_even(cropedArray)
cropedList.append(cropedArray)
#saveImage(cropedArray, "test_"+str(frame)+'.bmp', print_name = False)
return cropedList | 749f13efabc60f9b443ea84cabd459d9a8a70241 | 3,632,428 |
def delete_site_request(site_id):
"""Request deletion of a site."""
site = InventorySite.query.filter_by(id=site_id).first()
if site is None:
abort(404)
return render_template('inventory/manage_site.html', site=site) | 3b137e3140fb5fa9cf9f79dce2cc8b283d3a4a70 | 3,632,429 |
def twoD_Gaussian(tup, amplitude, xo, yo, sigma_x, sigma_y, theta, offset):
""" A 2D Gaussian to be used to fit the cross-correlation
Args:
tup (tuple):
A two element tuple containing the (x,y) coordinates where the 2D Gaussian will be evaluated
amplitude (float):
The amplitude of the 2D Gaussian
xo (float):
The centre of the Gaussian in the x direction
yo (float:
The centre of the Gaussian in the y direction
sigma_x (float):
The dispersion of the Gaussian in the x direction
sigma_y (float):
The dispersion of the Gaussian in the y direction
theta (float):
The angle of the major axis relative to the horizontal
offset (float):
Constant additive term
Returns:
model (`numpy.ndarray`_)
"""
(x, y) = tup
xo = float(xo)
yo = float(yo)
a = (np.cos(theta)**2)/(2*sigma_x**2) + (np.sin(theta)**2)/(2*sigma_y**2)
b = -(np.sin(2*theta))/(4*sigma_x**2) + (np.sin(2*theta))/(4*sigma_y**2)
c = (np.sin(theta)**2)/(2*sigma_x**2) + (np.cos(theta)**2)/(2*sigma_y**2)
g = offset + amplitude*np.exp( - (a*((x-xo)**2) + 2*b*(x-xo)*(y-yo) + c*((y-yo)**2)))
return g.ravel() | 35a4d6362f8751294e460dc7ae529bcb7a48022a | 3,632,430 |
from itertools import combinations
def generate_synthetic_example(n_stations=65, lat_lims=(45, 50), lon_lims=(10, 20), u_0=0, A=.1, phi_2=60, B=.01, phi_4=20, amplitude_noise=.05):
"""
Helper function to generate a simple synthetic example.
Constant anisotropy in entire region.
:param n_stations: Number of stations in study area
:param lat_lims: Latitude limits of study area
:param lon_lims: Longitude limits of study area
:param A: Amplitude of 2Theta-term
:param phi_2: Phase of 2Theta-term
:param B: Amplitude of 4Theta-term
:param phi_4: Phase of 4Theta-term
:param u_0: Isotropic velocity
:returns: station pair locations and velocities
"""
# random station distribution inside grid
station_x = np.random.uniform(lon_lims[0]+1, lon_lims[1]-1, [n_stations])
station_y = np.random.uniform(lat_lims[0]+1, lat_lims[1]-1, [n_stations])
station_coords = list(zip(station_y, station_x))
station_pairs = list(combinations(station_coords, 2))
# compute synthetic velocities for station pairs
vels = []
for sp in station_pairs:
dist, az, baz = get_distance(
point1=sp[0],
point2=sp[1],
coord_type='geographic')
vel_for_sp = aniso_parametrization(
x=np.deg2rad(np.min([az, baz])),
A=A,
B=B,
u_0=u_0,
phi_2=np.deg2rad(phi_2),
phi_4=np.deg2rad(phi_4)
)
# add noise
vel_for_sp += np.random.uniform(-amplitude_noise, amplitude_noise)
vels.append(vel_for_sp)
return station_pairs, vels | 7879af1efcbb7204f454b337f2197f0442673e1f | 3,632,431 |
import logging
def _extractStringType(newTypeName, newProperty, propDict, modelTypes, modelFileContainer):
"""extract the specific string type depending on the given format
and return the specific type
Keyword arguments:
newType -- current type name
newProperty -- current property
propDict -- dict of the property from the model file
modelTypes -- list of already loaded models
modelFileContainer -- file name and stuff, instance of ModelFileContainer
"""
formatValue = propDict.get('format', None)
enumValue = propDict.get('enum', None)
if (formatValue is None) and (enumValue is None):
return StringType()
elif enumValue is not None:
return _extractEnumType(newTypeName, newProperty, enumValue, modelTypes, modelFileContainer)
elif formatValue == 'date':
return DateType()
elif formatValue == 'date-time':
return DateTimeType()
elif formatValue == 'uuid':
return UuidType()
else:
# TODO logging
logging.error(
"modelFile: %s, type=%s, property=%s: unknown string type format: %s"
% (modelFileContainer.fileName, newTypeName, newProperty.name, formatValue))
return StringType() | 02fac4de96466cb311c64ffb0acf5d8d7f20f79b | 3,632,432 |
import yaml
def get_palette(col, col_unique=None, as_dict=True):
"""Get palette for column.
Parameters
----------
col : {'subject_name', 'model', 'scaling', 'cell_type', str}
The column to return the palette for. If we don't have a particular
palette picked out, the palette will contain the strings 'C0', 'C1',
etc, which use the default palette.
col_unique : list or None, optional
The list of unique values in col, in order to determine how many
elements in the palette. If None, we use seaborn's default
as_dict : bool, optional
Whether to return the palette as a dictionary or not.
Returns
-------
pal : dict, list, or seaborn ColorPalette.
palette to pass to plotting function
"""
with open(op.join(op.dirname(op.realpath(__file__)), '..', 'config.yml')) as f:
config = yaml.safe_load(f)
psychophys_vars = config['PSYCHOPHYSICS']
if col_unique is None:
col_nunique = None
else:
col_nunique = len(col_unique)
# this way we ignore all nans, which cause problems for sorting
col_unique = [i for i in col_unique if not isinstance(i, float) or
not np.isnan(i)]
if col == 'subject_name':
all_vals = sorted(psychophys_vars['SUBJECTS'])
pal = sns.color_palette('deep', len(all_vals))
elif col == 'model':
all_vals = [config['RGC']['model_name'], config['V1']['model_name']]
if len(col_unique) == 1 and col_unique[0] in all_vals:
pass
elif sorted(col_unique) != sorted(all_vals):
all_vals = ['Retina', 'V1']
if len(col_unique) == 1 and col_unique[0] in all_vals:
pass
elif sorted(col_unique) != sorted(all_vals):
all_vals = ['Luminance model', 'Energy model']
if len(col_unique) == 1 and col_unique[0] in all_vals:
pass
elif sorted(col_unique) != sorted(all_vals):
raise Exception(f"Don't know what to do with models {col_unique}")
assert len(all_vals) == 2, "Currently only support 2 model values"
pal = sns.color_palette('BrBG', 3)
pal = [pal[0], pal[-1]]
elif col == 'scaling':
# unlike others, we don't force this palette to be consistent across
# possible values of scaling (and hence we don't have an all_vals)
scaling_vals = sorted([c for c in col_unique if c != 'ref_image'])
# we want the color to get lighter as scaling gets larger
pal = sns.color_palette('Reds_r', len(scaling_vals))
pal = dict(zip(scaling_vals, pal))
pal['ref_image'] = 'k'
all_vals = sorted(all_vals)
elif col == 'cell_type':
all_vals = ['midget', 'parasol']
if len(col_unique) == 1 and col_unique[0] in all_vals:
pass
elif sorted(col_unique) != sorted(all_vals):
raise Exception(f"Don't know what to do with cell_type {col_unique}")
assert len(all_vals) == 2, "Currently only support 2 cell types"
pal = sns.color_palette('BrBG', 3)
pal = [(*pal[0], .5), (*pal[0], 1)]
elif col == 'image_name':
all_vals = [i.replace('_symmetric', '') for i in config['IMAGE_NAME']['ref_image']]
pal = sns.color_palette('husl', len(all_vals))
all_vals = sorted(all_vals, key=lambda x: get_order('image_name').index(x))
pal = dict(zip(all_vals, pal))
else:
if col_nunique is None:
col_nunique = 10
else:
all_vals = col_unique
pal = [f'C{i}' for i in range(col_nunique)]
all_vals = sorted(all_vals)
if as_dict and not isinstance(pal, dict):
pal = dict(zip(all_vals, pal))
return pal | ab134032798d1679366533968c4bc0f277f764bf | 3,632,433 |
import ROOT
def array2hist(array, hist, errors=None):
"""Convert a NumPy array into a ROOT histogram
Parameters
----------
array : numpy array
A 1, 2, or 3-d numpy array that will set the bin contents of the
ROOT histogram.
hist : ROOT TH1, TH2, or TH3
A ROOT histogram.
errors : numpy array
A numpy array of errors with matching dimensionality as the
bin contents array. If not given, no errors are set
Returns
-------
hist : ROOT TH1, TH2, or TH3
The ROOT histogram with bin contents set from the array.
Raises
------
TypeError
If hist is not a ROOT histogram.
ValueError
If the array and histogram are not compatible in terms of
dimensionality or number of bins along any axis.
Notes
-----
The NumPy array is copied into the histogram's internal array. If the input
NumPy array is not of the same data type as the histogram bin contents
(i.e. TH1D vs TH1F, etc.) and/or the input array does not contain overflow
bins along any of the axes, an additional copy is made into a temporary
array with all values converted into the matching data type and with
overflow bins included. Avoid this second copy by ensuring that the NumPy
array data type matches the histogram data type and that overflow bins are
included.
See Also
--------
hist2array
Examples
--------
>>> from root_numpy import array2hist, hist2array
>>> import numpy as np
>>> from rootpy.plotting import Hist2D
>>> hist = Hist2D(5, 0, 1, 3, 0, 1, type='F')
>>> array = np.random.randint(0, 10, size=(7, 5))
>>> array
array([[6, 7, 8, 3, 4],
[8, 9, 7, 6, 2],
[2, 3, 4, 5, 2],
[7, 6, 5, 7, 3],
[2, 0, 5, 6, 8],
[0, 0, 6, 5, 2],
[2, 2, 1, 5, 4]])
>>> _ = array2hist(array, hist)
>>> # dtype matches histogram type (D, F, I, S, C)
>>> hist2array(hist)
array([[ 9., 7., 6.],
[ 3., 4., 5.],
[ 6., 5., 7.],
[ 0., 5., 6.],
[ 0., 6., 5.]], dtype=float32)
>>> # overflow is excluded by default
>>> hist2array(hist, include_overflow=True)
array([[ 6., 7., 8., 3., 4.],
[ 8., 9., 7., 6., 2.],
[ 2., 3., 4., 5., 2.],
[ 7., 6., 5., 7., 3.],
[ 2., 0., 5., 6., 8.],
[ 0., 0., 6., 5., 2.],
[ 2., 2., 1., 5., 4.]], dtype=float32)
>>> array2 = hist2array(hist, include_overflow=True, copy=False)
>>> hist[2, 2] = -10
>>> # array2 views the same memory as hist because copy=False
>>> array2
array([[ 6., 7., 8., 3., 4.],
[ 8., 9., 7., 6., 2.],
[ 2., 3., -10., 5., 2.],
[ 7., 6., 5., 7., 3.],
[ 2., 0., 5., 6., 8.],
[ 0., 0., 6., 5., 2.],
[ 2., 2., 1., 5., 4.]], dtype=float32)
>>> # x, y, z axes correspond to axes 0, 1, 2 in numpy
>>> hist[2, 3] = -10
>>> array2
array([[ 6., 7., 8., 3., 4.],
[ 8., 9., 7., 6., 2.],
[ 2., 3., -10., -10., 2.],
[ 7., 6., 5., 7., 3.],
[ 2., 0., 5., 6., 8.],
[ 0., 0., 6., 5., 2.],
[ 2., 2., 1., 5., 4.]], dtype=float32)
"""
if isinstance(hist, ROOT.TH3):
shape = (hist.GetNbinsX() + 2,
hist.GetNbinsY() + 2,
hist.GetNbinsZ() + 2)
elif isinstance(hist, ROOT.TH2):
shape = (hist.GetNbinsX() + 2, hist.GetNbinsY() + 2)
elif isinstance(hist, ROOT.TH1):
shape = (hist.GetNbinsX() + 2,)
else:
raise TypeError(
"hist must be an instance of ROOT.TH1, ROOT.TH2, or ROOT.TH3")
# Determine the corresponding numpy dtype
for hist_type in 'DFISC':
if isinstance(hist, getattr(ROOT, 'TArray{0}'.format(hist_type))):
break
else:
raise AssertionError(
"hist is somehow an instance of TH[1|2|3] "
"but not TArray[D|F|I|S|C]")
# Constuct a NumPy array viewing the underlying histogram array
dtype = np.dtype(DTYPE_ROOT2NUMPY[hist_type])
# No copy is made if the dtype is the same as input
_array = np.ascontiguousarray(array, dtype=dtype)
if errors is not None:
if errors.shape != array.shape:
raise ValueError("Contents and errors are not compatible")
# errors are specified as doubles in SetError function
_errors = np.ascontiguousarray(errors, dtype=np.float64)
else:
_errors = None
if _array.ndim != len(shape):
raise ValueError(
"array and histogram do not have "
"the same number of dimensions")
if _array.shape != shape:
# Check for overflow along each axis
slices = []
for axis, bins in enumerate(shape):
if _array.shape[axis] == bins - 2:
slices.append(slice(1, -1))
elif _array.shape[axis] == bins:
slices.append(slice(None))
else:
raise ValueError(
"array and histogram are not compatible along "
"the {0}-axis".format("xyz"[axis]))
array_overflow = np.zeros(shape, dtype=dtype)
array_overflow[tuple(slices)] = _array
_array = array_overflow
if _errors is not None:
errors_overflow = np.zeros(shape, dtype=np.float64)
errors_overflow[tuple(slices)] = _errors
_errors = errors_overflow
ARRAY_NUMPY2ROOT[len(shape)][hist_type](
ROOT.AsCObject(hist), np.ravel(np.transpose(_array)))
# Set the number of entries to the number of array elements
hist.SetEntries(_array.size)
if _errors is not None:
hist.SetError(np.ravel(_errors.T))
return hist | 40522a374321b768fac800a4fce22440991de05f | 3,632,434 |
def adjust_update_rules_for_fixed_nodes(predecessor_node_lists, truth_tables, fixed_nodes):
"""
Adjust "update rules" matrix and its free element vector so that the fixed nodes will end up in their fixed
states on each time step automatically, with no manual interventions required.
:param predecessor_node_lists: list of predecessor node lists
:param truth_tables: list of dicts (key: tuple of predecessor node states, value: resulting node state)
:param fixed_nodes: dict with fixed nodes (key: node, value: node state)
:return: (predecessor node lists and truth tables, adjusted with respect to fixed nodes)
"""
adjusted_predecessor_node_lists = \
[predecessor_nodes.copy() for predecessor_nodes in predecessor_node_lists]
adjusted_truth_tables = [truth_table.copy() for truth_table in truth_tables]
for node, node_state in fixed_nodes.items():
adjusted_predecessor_node_lists[node] = []
adjusted_truth_tables[node] = {(): node_state}
return adjusted_predecessor_node_lists, adjusted_truth_tables | f41609ae25c3622100674372de5a364b095650f8 | 3,632,435 |
def obs_data():
"""
Dictionary with variables as top keys and available observations
directly below. For each observation data set, path and file pattern must
be defined.
"""
meta_dict = {
# ------------------------------------------------------------------------
# 2m temperature
'tas': {
'EOBS': {
'path': '/home/rossby/imports/obs/EOBS/EOBS17/EUR-22/input/day',
'file pattern': 'tas_EUR-22_EOBS17_obs_r1i1p1_ECAD_v1_day_YYYYMM01-YYYYMM31.nc', # noqa
},
'EOBS19': {
'path': '/home/rossby/imports/obs/EOBS/EOBS19/EUR-10/input/day',
'file pattern': 'tas_EUR-10_EOBS19e_obs_r1i1p1_ECAD_v1_day_YYYYMM01-YYYYMM31.nc', # noqa
},
'EOBS20': {
'path': '/home/rossby/imports/obs/EOBS/EOBS20/EUR-10/input/day',
'file pattern': 'tas_EUR-10_EOBS20e_obs_r1i1p1_ECAD_v1_day_YYYYMM01-YYYYMM31.nc', # noqa
},
'ERA5': {
'path': '/nobackup/rossby20/sm_petli/data/ERA5/VALIDATION/EUR/day',
'file pattern': 'tas_day_ECMWF-ERA5_rean_r1i1p1_YYYYMM01-YYYYMM31.nc', # noqa
},
'NGCD': {
'path': '/nobackup/rossby20/sm_petli/data/NGCD',
'file pattern': 'tas_NGCD_type2_YYYYMMDD-YYYYMMDD.nc', # noqa
},
},
'tasmax': {
'EOBS': {
'path': '/home/rossby/imports/obs/EOBS/EOBS17/EUR-22/input/day',
'file pattern': 'tasmax_EUR-22_EOBS17_obs_r1i1p1_ECAD_v1_day_YYYYMM01-YYYYMM31.nc', # noqa
},
'EOBS19': {
'path': '/home/rossby/imports/obs/EOBS/EOBS19/EUR-10/input/day',
'file pattern': 'tasmax_EUR-10_EOBS19e_obs_r1i1p1_ECAD_v1_day_YYYYMM01-YYYYMM31.nc', # noqa
},
'EOBS20': {
'path': '/home/rossby/imports/obs/EOBS/EOBS20/EUR-10/input/day',
'file pattern': 'tasmax_EUR-10_EOBS20e_obs_r1i1p1_ECAD_v1_day_YYYYMM01-YYYYMM31.nc', # noqa
},
'ERA5': {
'path': '/nobackup/rossby20/sm_petli/data/ERA5/VALIDATION/EUR/day',
'file pattern': 'tasmax_day_ECMWF-ERA5_rean_r1i1p1_YYYYMM01-YYYYMM31.nc', # noqa
},
'NGCD': {
'path': '/nobackup/rossby20/sm_petli/data/NGCD',
'file pattern': 'tasmax_NGCD_type2_YYYYMMDD-YYYYMMDD.nc', # noqa
},
},
'tasmin': {
'EOBS': {
'path': '/home/rossby/imports/obs/EOBS/EOBS17/EUR-22/input/day',
'file pattern': 'tasmin_EUR-22_EOBS17_obs_r1i1p1_ECAD_v1_day_YYYYMM01-YYYYMM31.nc', # noqa
},
'EOBS19': {
'path': '/home/rossby/imports/obs/EOBS/EOBS19/EUR-10/input/day',
'file pattern': 'tasmin_EUR-10_EOBS19e_obs_r1i1p1_ECAD_v1_day_YYYYMM01-YYYYMM31.nc', # noqa
},
'EOBS20': {
'path': '/home/rossby/imports/obs/EOBS/EOBS20/EUR-10/input/day',
'file pattern': 'tasmin_EUR-10_EOBS20e_obs_r1i1p1_ECAD_v1_day_YYYYMM01-YYYYMM31.nc', # noqa
},
'ERA5': {
'path': '/nobackup/rossby20/sm_petli/data/ERA5/VALIDATION/EUR/day',
'file pattern': 'tasmin_day_ECMWF-ERA5_rean_r1i1p1_YYYYMM01-YYYYMM31.nc', # noqa
},
'NGCD': {
'path': '/nobackup/rossby20/sm_petli/data/NGCD',
'file pattern': 'tasmin_NGCD_type2_YYYYMMDD-YYYYMMDD.nc', # noqa
},
},
# ------------------------------------------------------------------------
# Precipitation
'pr': {
'EOBS': {
'path': '/home/rossby/imports/obs/EOBS/EOBS17/EUR-22/input/day',
'file pattern': 'pr_EUR-22_EOBS17_obs_r1i1p1_ECAD_v1_day_YYYYMM01-YYYYMM31.nc', # noqa
},
'EOBS19': {
'path': '/home/rossby/imports/obs/EOBS/EOBS19/EUR-10/input/day',
'file pattern': 'pr_EUR-10_EOBS19e_obs_r1i1p1_ECAD_v1_day_YYYYMM01-YYYYMM31.nc', # noqa
},
'EOBS20': {
'path': '/home/rossby/imports/obs/EOBS/EOBS20/EUR-10/input/day',
'file pattern': 'pr_EUR-10_EOBS20e_obs_r1i1p1_ECAD_v1_day_YYYYMM01-YYYYMM31.nc', # noqa
},
'ERA5': {
'path': '/nobackup/rossby20/sm_petli/data/ERA5/VALIDATION/EUR/day',
'file pattern': 'pr_day_ECMWF-ERA5_rean_r1i1p1_YYYYMM01-YYYYMM31.nc', # noqa
},
'ERAI': {
'path': '/nobackup/rossby20/sm_petli/data/ERAI/VALIDATION/EUR/day',
'file pattern': 'pr_day_ECMWF-ERAINT_rean_r1i1p1_YYYYMM01-YYYYMM31.nc', # noqa
},
'NGCD': {
'path': '/nobackup/rossby20/sm_petli/data/NGCD',
'file pattern': 'pr_NGCD_type2_YYYYMMDD-YYYYMMDD.nc', # noqa
},
'HIPRADv2.0': {
'path': '/nobackup/rossby20/sm_petli/data/HIPRAD/1h_old_code/masked', # noqa
# 'path': '/nobackup/rossby20/sm_petli/data/HIPRAD/1h_old_code', # noqa
'file pattern': 'pr_HIPRAD2_1H_YYYYMM01-YYYYMM31.nc', # noqa
},
'HIPRADv2.1': {
'path': '/nobackup/rossby20/sm_petli/data/HIPRAD/1h/masked',
'file pattern': 'pr_HIPRAD2_1H_YYYYMM01-YYYYMM31.nc', # noqa
},
'HIPRADv3': {
'path': '/nobackup/rossby20/sm_petli/data/HIPRAD/Nordic_v3/1h',
'file pattern': 'pr_HIPRAD3_Nordic_1H_YYYYMM01-YYYYMM31.nc', # noqa
},
'SENORGE': {
'path': '/nobackup/rossby20/sm_petli/data/seNorge_pr/orig',
'file pattern': 'pr_seNorge2_PREC1h_grid_YYYYMM.nc', # noqa
},
'Klimagrid': {
'path': '/nobackup/rossby20/sm_petli/data/klimagrid/1h',
'file pattern': 'pr_Klimagrid_Denmark_1h_YYYYMM01-YYYYMM31.nc', # noqa
},
'Spain02': {
'path': '/nobackup/rossby20/sm_petli/data/Spain02/day',
'file pattern': 'pr_Spain02_v2_day_YYYYMM01_YYYYMM31.nc', # noqa
},
'SAFRAN': {
'path': '/nobackup/rossby20/sm_petli/data/SAFRAN/day',
'file pattern': 'pr_Meteo-France_SAFRAN_day_YYYYMM01_YYYYMM31.nc', # noqa
},
'EURO4M-APGD': {
'path': '/nobackup/rossby20/sm_petli/data/EURO4M/APGD/day',
'file pattern': 'pr_EURO4M-APGD_day_YYYYMM01_YYYYMM31.nc', # noqa
},
},
# ------------------------------------------------------------------------
# Convective Precipitation
'cpr': {
'ERAI': {
'path': '/nobackup/rossby20/sm_petli/data/ERAI/VALIDATION/EUR/day',
'file pattern': 'cpr_day_ECMWF-ERAINT_rean_r1i1p1_YYYYMM01-YYYYMM31.nc', # noqa
},
},
# ------------------------------------------------------------------------
# CAPE
'cape': {
'ERA5': {
'path': '/nobackup/rossby20/sm_petli/data/ERA5/VALIDATION/EUR/day',
'file pattern': 'cape_day_ECMWF-ERA5_rean_r1i1p1_YYYYMM01-YYYYMM31.nc', # noqa
},
},
# ------------------------------------------------------------------------
# MSLP
'psl': {
'EOBS': {
'path': '/home/rossby/imports/obs/EOBS/EOBS17/EUR-22/input/day',
'file pattern': 'psl_EUR-22_EOBS17_obs_r1i1p1_ECAD_v1_day_YYYYMM01-YYYYMM31.nc', # noqa
},
'EOBS19': {
'path': '/home/rossby/imports/obs/EOBS/EOBS19/EUR-10/input/day',
'file pattern': 'psl_EUR-10_EOBS19e_obs_r1i1p1_ECAD_v1_day_YYYYMM01-YYYYMM31.nc', # noqa
},
'EOBS20': {
'path': '/home/rossby/imports/obs/EOBS/EOBS20/EUR-10/input/day',
'file pattern': 'psl_EUR-10_EOBS20e_obs_r1i1p1_ECAD_v1_day_YYYYMM01-YYYYMM31.nc', # noqa
},
'ERA5': {
# 'path': '/nobackup/rossby20/sm_petli/data/ERA5/VALIDATION/EUR/day',
'path': '/home/rossby/imports/obs/ECMWF/ERA5/input/day',
'file pattern': 'psl_day_ECMWF-ERA5_rean_r1i1p1_YYYYMM01-YYYYMM31.nc', # noqa
},
},
# ------------------------------------------------------------------------
# Surface wind speed
'sfcWind': {
'ERA5': {
'path': '/home/rossby/imports/obs/ECMWF/ERA5/input/day',
'file pattern': 'sfcWind_day_ECMWF-ERA5_rean_r1i1p1_YYYYMM01-YYYYMM31.nc', # noqa
},
'EOBS25': {
'path': '/nobackup/rossby20/sm_petli/data/EOBS/EOBS25/day',
'file pattern': 'sfcWind_EOBS25_ens_mean_0.1deg_reg_v25.0e_YYYYMM01-YYYYMM31.nc', # noqa
},
},
# ------------------------------------------------------------------------
# Daily maximum Surface wind speed
'sfcWindmax': {
'ERA5': {
'path': '/home/rossby/imports/obs/ECMWF/ERA5/input/day',
'file pattern': 'sfcWindmax_day_ECMWF-ERA5_rean_r1i1p1_YYYYMM01-YYYYMM31.nc', # noqa
},
},
# ------------------------------------------------------------------------
# Long-wave down-welling radiation
'rlds': {
'ERA5': {
'path': '/nobackup/rossby20/sm_petli/data/ERA5/VALIDATION/EUR/1h',
'file pattern': 'rlds_1H_ECMWF-ERA5_rean_r1i1p1_YYYYMM01-YYYYMM31.nc', # noqa
},
},
# ------------------------------------------------------------------------
# Short-wave down-welling radiation
'rsds': {
'ERA5': {
'path': '/nobackup/rossby20/sm_petli/data/ERA5/VALIDATION/EUR/day',
'file pattern': 'rsds_day_ECMWF-ERA5_rean_r1i1p1_YYYYMM01-YYYYMM31.nc', # noqa
},
'CLARA_A2': {
'path': '/nobackup/rossby20/sm_petli/data/CM_SAF/SW/CLARA-A2/day', # noqa
'file pattern': 'rsds_CMSAF_CLARA-A2_day_YYYYMM01-YYYYMM31.nc', # noqa
},
},
# ------------------------------------------------------------------------
# Long-wave surface net radiation
'rlns': {
'ERA5': {
'path': '/nobackup/rossby20/sm_petli/data/ERA5/VALIDATION/EUR/1h',
'file pattern': 'rlns_1H_ECMWF-ERA5_rean_r1i1p1_YYYYMM01-YYYYMM31.nc', # noqa
},
},
# ------------------------------------------------------------------------
# Short-wave surface net radiation
'rsns': {
'ERA5': {
'path': '/nobackup/rossby20/sm_petli/data/ERA5/VALIDATION/EUR/1h',
'file pattern': 'rsns_1H_ECMWF-ERA5_rean_r1i1p1_YYYYMM01-YYYYMM31.nc', # noqa
},
},
# ------------------------------------------------------------------------
# TOA longwave emissions
'rlnt': {
'METEOSAT': {
'path': '/nobackup/rossby20/sm_petli/data/CM_SAF/TOA/OLR/MSG/day', # noqa
'file pattern': 'rlnt_CMSAF_METEOSAT-MSG2_day_YYYYMM01-YYYYMM31.nc', # noqa
},
},
# ------------------------------------------------------------------------
# Surface upward latent heat fluxes
'hfls': {
'ERA5': {
'path': '/nobackup/rossby20/sm_petli/data/ERA5/VALIDATION/EUR/1h',
'file pattern': 'hfls_1H_ECMWF-ERA5_rean_r1i1p1_YYYYMM01-YYYYMM31.nc', # noqa
},
},
# ------------------------------------------------------------------------
# Surface upward sensible heat fluxes
'hfss': {
'ERA5': {
'path': '/nobackup/rossby20/sm_petli/data/ERA5/VALIDATION/EUR/1h',
'file pattern': 'hfss_1H_ECMWF-ERA5_rean_r1i1p1_YYYYMM01-YYYYMM31.nc', # noqa
},
},
# ------------------------------------------------------------------------
# Total Cloud Cover
'clt': {
'ERA5': {
'path': '/nobackup/rossby20/sm_petli/data/ERA5/VALIDATION/EUR/day',
'file pattern': 'clt_day_ECMWF-ERA5_rean_r1i1p1_YYYYMM01-YYYYMM31.nc', # noqa
},
'CLARA_A2': {
'path': '/nobackup/rossby20/sm_petli/data/CM_SAF/CLOUD/CLARA-A2/day', # noqa
'file pattern': 'clt_CMSAF_CLARA-A2_day_YYYYMM01-YYYYMM31.nc', # noqa
},
},
# ------------------------------------------------------------------------
# Low-level Cloud Cover
'cll': {
'ERA5': {
'path': '/nobackup/rossby20/sm_petli/data/ERA5/VALIDATION/EUR/day',
'file pattern': 'cll_day_ECMWF-ERA5_rean_r1i1p1_YYYYMM01-YYYYMM31.nc', # noqa
},
'CLARA_A2': {
'path': '/nobackup/rossby20/sm_petli/data/CM_SAF/CLOUD/CLARA-A2/day', # noqa
'file pattern': 'cll_CMSAF_CLARA-A2_day_YYYYMM01-YYYYMM31.nc', # noqa
},
},
# ------------------------------------------------------------------------
# Middle-level Cloud Cover
'clm': {
'ERA5': {
'path': '/nobackup/rossby20/sm_petli/data/ERA5/VALIDATION/EUR/day',
'file pattern': 'clm_day_ECMWF-ERA5_rean_r1i1p1_YYYYMM01-YYYYMM31.nc', # noqa
},
'CLARA_A2': {
'path': '/nobackup/rossby20/sm_petli/data/CM_SAF/CLOUD/CLARA-A2/day', # noqa
'file pattern': 'clm_CMSAF_CLARA-A2_day_YYYYMM01-YYYYMM31.nc', # noqa
},
},
# ------------------------------------------------------------------------
# High-level Cloud Cover
'clh': {
'ERA5': {
'path': '/nobackup/rossby20/sm_petli/data/ERA5/VALIDATION/EUR/day',
'file pattern': 'clh_day_ECMWF-ERA5_rean_r1i1p1_YYYYMM01-YYYYMM31.nc', # noqa
},
'CLARA_A2': {
'path': '/nobackup/rossby20/sm_petli/data/CM_SAF/CLOUD/CLARA-A2/day', # noqa
'file pattern': 'clh_CMSAF_CLARA-A2_day_YYYYMM01-YYYYMM31.nc', # noqa
},
},
# ------------------------------------------------------------------------
# Integral of cloud water
'clwvi': {
'ERA5': {
'path': '/nobackup/rossby20/sm_petli/data/ERA5/VALIDATION/EUR/1h',
'file pattern': 'clwvi_1H_ECMWF-ERA5_rean_r1i1p1_YYYYMM01-YYYYMM31.nc', # noqa
},
'CLARA_A2': {
'path': '/nobackup/rossby20/sm_petli/data/CM_SAF/CLOUD/CLARA-A2/day', # noqa
'file pattern': 'clwvi_CMSAF_CLARA-A2_day_YYYYMM01-YYYYMM31.nc', # noqa
},
},
# ------------------------------------------------------------------------
# Integral of cloud ice
'clivi': {
'ERA5': {
'path': '/nobackup/rossby20/sm_petli/data/ERA5/VALIDATION/EUR/1h',
'file pattern': 'clivi_1H_ECMWF-ERA5_rean_r1i1p1_YYYYMM01-YYYYMM31.nc', # noqa
},
'CLARA_A2': {
'path': '/nobackup/rossby20/sm_petli/data/CM_SAF/CLOUD/CLARA-A2/day', # noqa
'file pattern': 'clivi_CMSAF_CLARA-A2_day_YYYYMM01-YYYYMM31.nc', # noqa
},
},
# ------------------------------------------------------------------------
# Total column water vapor
'prw': {
'ERA5': {
'path': '/nobackup/rossby20/sm_petli/data/ERA5/VALIDATION/EUR/day',
'file pattern': 'prw_day_ECMWF-ERA5_rean_r1i1p1_YYYYMM01-YYYYMM31.nc', # noqa
},
},
# ------------------------------------------------------------------------
# Snow cover
'sc': {
'ERA5': {
'path': '/nobackup/rossby20/sm_petli/data/ERA5/VALIDATION/EUR/3h',
'file pattern': 'sc_3H_ECMWF-ERA5_rean_r1i1p1_YYYYMM01-YYYYMM31.nc', # noqa
},
},
# ------------------------------------------------------------------------
# Snow depth water equivalent
'snw_b': {
'ERA5': {
'path': '/nobackup/rossby20/sm_petli/data/ERA5/VALIDATION/EUR/3h',
'file pattern': 'sd_3H_ECMWF-ERA5_rean_r1i1p1_YYYYMM01-YYYYMM31.nc', # noqa
},
'seNorge': {
'path': '/nobackup/rossby20/sm_petli/data/seNorge_snow/day',
'file pattern': 'snw_b_seNorge_snowsim_v201_day_YYYYMM01-YYYYMM31.nc', # noqa
},
},
# ------------------------------------------------------------------------
# Snow depth
'snd': {
'ERA5': {
'path': '/nobackup/rossby20/sm_petli/data/ERA5_snow',
'file pattern': 'snd_day_ECMWF-ERA5_rean_r1i1p1_YYYYMM01-YYYYMM31.nc', # noqa
},
'seNorge': {
'path': '/nobackup/rossby20/sm_petli/data/seNorge_snow/day',
'file pattern': 'snd_seNorge_snowsim_v201_day_YYYYMM01-YYYYMM31.nc', # noqa
},
},
# ------------------------------------------------------------------------
# Geopotential 500hPa
'phi500': {
'ERAI': {
'path': '/nobackup/rossby20/sm_petli/data/ERAI/VALIDATION/EUR/day',
'file pattern': 'phi500_day_ECMWF-ERAINT_rean_r1i1p1_YYYYMM01-YYYYMM31.nc', # noqa
},
},
# ------------------------------------------------------------------------
# Temperature 850hPa
'ta850': {
'ERAI': {
'path': '/nobackup/rossby20/sm_petli/data/ERAI/VALIDATION/EUR/day',
'file pattern': 'ta850_day_ECMWF-ERAINT_rean_r1i1p1_YYYYMM01-YYYYMM31.nc', # noqa
},
},
# ------------------------------------------------------------------------
# Specific humidity 850hPa
'hus850': {
'ERAI': {
'path': '/nobackup/rossby20/sm_petli/data/ERAI/VALIDATION/EUR/day',
'file pattern': 'hus850_day_ECMWF-ERAINT_rean_r1i1p1_YYYYMM01-YYYYMM31.nc', # noqa
},
},
# ------------------------ END OF OBSERVATION LIST -----------------------
}
return meta_dict | f3db013fa2b99cdaee26b82075674a69a662030c | 3,632,436 |
def create_class_prediction_error_chart(classifier, X_train, X_test, y_train, y_test):
"""Create class prediction error chart.
Tip:
Check Sklearn-Neptune integration
`documentation <https://docs-beta.neptune.ai/essentials/integrations/machine-learning-frameworks/sklearn>`_
for the full example.
Args:
classifier (:obj:`classifier`):
| Fitted sklearn classifier object
X_train (:obj:`ndarray`):
| Training data matrix
X_test (:obj:`ndarray`):
| Testing data matrix
y_train (:obj:`ndarray`):
| The classification target for training
y_test (:obj:`ndarray`):
| The classification target for testing
Returns:
``neptune.types.File`` object that you can assign to run's ``base_namespace``.
Examples:
.. code:: python3
import neptune.new.integrations.sklearn as npt_utils
rfc = RandomForestClassifier()
rfc.fit(X_train, y_train)
run = neptune.init(project='my_workspace/my_project')
run['visuals/class_prediction_error'] = \
npt_utils.create_class_prediction_error_chart(rfc, X_train, X_test, y_train, y_test)
"""
assert is_classifier(classifier), 'classifier should be sklearn classifier.'
chart = None
try:
fig, ax = plt.subplots()
visualizer = ClassPredictionError(classifier, is_fitted=True, ax=ax)
visualizer.fit(X_train, y_train)
visualizer.score(X_test, y_test)
visualizer.finalize()
chart = neptune.types.File.as_image(fig)
plt.close(fig)
except Exception as e:
print('Did not log Class Prediction Error chart. Error {}'.format(e))
return chart | c0aadac243614914952d10d484ea4a9a7c89da26 | 3,632,437 |
def footer_embed(message: str, title) -> Embed:
"""
Constructs embed with fixed green color and fixed footer showing website, privacy url and rules url.
:param message: embed description
:param title: title of embed
:return: Embed object
"""
content_footer = (
f"Links: [Website]({constants.website_url}) | "
f"[Privacy statement]({constants.privacy_url}) | "
f"[Rules]({constants.rules_url})"
)
message = f"{message}\n\n{content_footer}"
embed = simple_embed(message, title, color=Color.dark_green())
embed.set_image(url=constants.line_img_url)
return embed | cb4637e479d5eabb3afedb965209271553e1f238 | 3,632,438 |
from .criteria import aic_eigen, mdl_eigen
import logging
def _get_signal_space(S, NP, verbose=False, threshold=None, NSIG=None,
criteria='aic'):
"""todo
"""
# This section selects automatically the noise and signal subspaces.
# NSIG being the number of eigenvalues corresponding to signals.
if NSIG is None:
if threshold is None:
logging.debug('computing NSIG using AIC method')
# get the minimum index of the AIC vector
if criteria == 'aic':
aic = aic_eigen(S, NP*2)
elif criteria == 'mdl':
aic = mdl_eigen(S, NP*2)
# get the minimum index of the AIC vector, add 1 to get the NSIG
NSIG = np.argmin(aic) + 1
logging.debug('NSIG={} found as the number of pertinent sinusoids'.format(NSIG))
else:
logging.debug('computing NSIG using user threshold ')
# following an idea from Matlab, pmusic, we look at the minimum
# eigen value, and split the eigen values above and below
# K times min eigen value, where K is >1
m = threshold * min(S)
new_s = S[np.where(S>m)]
NSIG = len(new_s)
logging.debug('found {}'.format(NSIG))
if NSIG == 0:
NSIG = 1
return NSIG | 3318f23f92b78362dd91cfe5b49e061b12790ba1 | 3,632,439 |
def joins_for_results(basetables, external_info):
"""
Form and return the `results` table
"""
# Get one table per result_type, then stack them,
# kind_problem
# kind_pathproblem
#
# Concatenation with an empty table triggers type conversion to float, so don't
# include empty tables.
tables = [_results_from_kind_problem(basetables, external_info),
_results_from_kind_pathproblem(basetables, external_info)]
stack = [table for table in tables if len(table) > 0]
# Concatenation fails without at least one table, so avoid that.
if len(stack) > 0:
res = pd.concat(stack)
else:
res = tables[0]
return res | 1b2821a11a9a3df65ef9a3dfbf11262175307b9d | 3,632,440 |
def newton_wedge_fringe_sep(alpha, wavelength):
"""Calculate the separation between fringes for an optical flat with angle
alpha."""
d = wavelength/(2*np.sin(alpha))
return d | fc29c6bfcfb6ed19e91588263ef12190bb9ec699 | 3,632,441 |
def hs_classify(scope):
"""
A mapper ``Function -> (Dimension -> [HaloLabel]`` describing what type of
halo exchange is expected by the various :class:`TensorFunction`s in a
:class:`Scope`.
"""
mapper = {}
for f, r in scope.reads.items():
if not f.is_TensorFunction:
continue
elif f.grid is None:
# TODO: improve me
continue
v = mapper.setdefault(f, defaultdict(list))
for i in r:
for d in i.findices:
# Note: if `i` makes use of SubDimensions, we might end up adding useless
# (yet harmless) halo exchanges. This depends on the extent of a
# SubDimension; e.g., in rare circumstances, a SubDimension might span a
# region that falls completely within a single MPI rank, thus requiring
# no communication whatsoever. However, the SubDimension extent is only
# known at runtime (op.apply time), so unless one starts messing up with
# the generated code (by adding explicit `if-then-else`s to dynamically
# prevent a halo exchange), there is no escape from conservatively
# assuming that some halo exchanges will be required
if i.affine(d):
if f.grid.is_distributed(d):
if d in scope.d_from_access(i).cause:
v[d].append(POINTLESS)
elif i.touch_halo(d):
v[d].append(STENCIL)
else:
v[d].append(IDENTITY)
else:
v[d].append(NONE)
elif i.is_increment:
# A read used for a distributed local-reduction. Users are expected
# to deal with this data access pattern by themselves, for example
# by resorting to common techniques such as redundant computation
v[d].append(UNSUPPORTED)
elif i.irregular(d) and f.grid.is_distributed(d):
v[d].append(FULL)
# Sanity check and reductions
for f, v in mapper.items():
for d, hl in list(v.items()):
unique_hl = set(hl)
if unique_hl == {STENCIL, IDENTITY}:
v[d] = STENCIL
elif POINTLESS in unique_hl:
v[d] = POINTLESS
elif UNSUPPORTED in unique_hl:
v[d] = UNSUPPORTED
elif len(unique_hl) == 1:
v[d] = unique_hl.pop()
else:
raise HaloSchemeException("Inconsistency found while building a halo "
"scheme for `%s` along Dimension `%s`" % (f, d))
# Drop functions needing no halo exchange
mapper = {f: v for f, v in mapper.items()
if any(i in [STENCIL, FULL] for i in v.values())}
# Emit a summary warning
for f, v in mapper.items():
unsupported = [d for d, hl in v.items() if hl is UNSUPPORTED]
if configuration['mpi'] and unsupported:
warning("Distributed local-reductions over `%s` along "
"Dimensions `%s` detected." % (f, unsupported))
return mapper | adcf5f795fb5eb505ad1c6e860c038ab31290932 | 3,632,442 |
def train_test_split(shp, savedir, config, client = None):
"""Create the train test split
Args:
shp: a filter pandas dataframe (or geodataframe)
savedir: directly to save train/test and metadata csv files
client: optional dask client
Returns:
None: train.shp and test.shp are written as side effect
"""
#set seed.
np.random.seed(1)
most_species = 0
if client:
futures = [ ]
for x in np.arange(config["iterations"]):
future = client.submit(sample_plots, shp=shp, min_samples=config["min_samples"], test_fraction=config["test_fraction"])
futures.append(future)
for x in as_completed(futures):
train, test = x.result()
if len(train.taxonID.unique()) > most_species:
print(len(train.taxonID.unique()))
saved_train = train
saved_test = test
most_species = len(train.taxonID.unique())
else:
for x in np.arange(config["iterations"]):
train, test = sample_plots(shp, min_samples=config["min_samples"], test_fraction=config["test_fraction"])
if len(train.taxonID.unique()) > most_species:
print(len(train.taxonID.unique()))
saved_train = train
saved_test = test
most_species = len(train.taxonID.unique())
train = saved_train
test = saved_test
print("There are {} records for {} species for {} sites in filtered train".format(
train.shape[0],
len(train.taxonID.unique()),
len(train.siteID.unique())
))
print("There are {} records for {} species for {} sites in test".format(
test.shape[0],
len(test.taxonID.unique()),
len(test.siteID.unique())
))
#Give tests a unique index to match against
test["point_id"] = test.index.values
train["point_id"] = train.index.values
return train, test | 46c8becd416877306e9d28914f3032ff99946321 | 3,632,443 |
def parse_list_from_string(value):
"""
Handle array fields by converting them to a list.
Example:
1,2,3 -> ['1','2','3']
"""
return [x.strip() for x in value.split(",")] | 51e9c654b9d18b8be61c37aab5f5029dfdea2213 | 3,632,444 |
def add_evaluation_args(parser):
"""Evaluation arguments."""
group = parser.add_argument_group('validation', 'validation configurations')
group.add_argument('--eval-batch-size', type=int, default=None,
help='Data Loader batch size for evaluation datasets.'
'Defaults to `--batch-size`')
group.add_argument('--eval-iters', type=int, default=2000,
help='number of iterations per epoch to run '
'validation/test for')
group.add_argument('--eval-seq-length', type=int, default=None,
help='Maximum sequence length to process for '
'evaluation. Defaults to `--seq-length`')
group.add_argument('--eval-max-preds-per-seq', type=int, default=None,
help='Maximum number of predictions to use for '
'evaluation. Defaults to '
'math.ceil(`--eval-seq-length`*.15/10)*10')
return parser | 437a77987e9a4a461b98c9cb08b78a016efca9e9 | 3,632,445 |
import itertools
def merge(d1, d2):
"""Merge to dicts into one.
Args:
d1 (dict): dataset 1
d2 (dict): dataset 2
Returns:
dict: merged dict
"""
return dict(itertools.chain(list(d1.items()), list(d2.items()))) | bb1d38f3cb45de6e98855fb04ae1d3d7e73e4a40 | 3,632,446 |
import re
def is_valid(number):
"""
Check if number is roman
:param number: string to check
:type number: str
:return: True or False
:rtype: bool
"""
return re.match(
r"^(M{0,3})(D?C{0,3}|C[DM])(L?X{0,3}|X[LC])(V?I{0,3}|I[VX])$", number
) | 52e1937418d28701ee3d30da139f16ae64cfe480 | 3,632,447 |
def lammps_created_gsd(job):
"""Check if the mdtraj has converted the production to a gsd trajectory for the job."""
return job.isfile("prod.gsd") | 1b05e085970de4d875044e2e6604c1874a0a0e83 | 3,632,448 |
def has_open_quotes(s):
"""Return whether a string has open quotes.
This simply counts whether the number of quote characters of either type in
the string is odd.
Returns
-------
If there is an open quote, the quote character is returned. Else, return
False.
"""
# We check " first, then ', so complex cases with nested quotes will get
# the " to take precedence.
if s.count('"') % 2:
return '"'
elif s.count("'") % 2:
return "'"
else:
return False | a9adbcd42518a71458c69c9aa1ff751fa3998573 | 3,632,449 |
def get_task(name, context=None, exception_if_not_exists=True):
"""
Returns item for specified task
:param name: Name of the task
:param context: Lambda context
:param exception_if_not_exists: true if an exception should be raised if the item does not exist
:return: Task item, raises exception if task with specified name does not exist
"""
with _get_logger(context=context) as logger:
logger.info("get_task")
item = _get_task(name=name, context=context, logger=logger, exception_if_not_exists=exception_if_not_exists)
return item | 9eb3007c230b75543c5227a44d282ed2ca6b3d9e | 3,632,450 |
def GetResourceReference(project, organization):
"""Get the resource reference of a project or organization.
Args:
project: A project name string.
organization: An organization id string.
Returns:
The resource reference of the given project or organization.
"""
if project:
return resources.REGISTRY.Parse(
project, collection='cloudresourcemanager.projects')
else:
return resources.REGISTRY.Parse(
organization, collection='cloudresourcemanager.organizations') | fd986df9ced20a6b8edbd910d7268806841d2139 | 3,632,451 |
def pauli_block_y(M, norb):
"""
y compoenent of a matrix, see pauli_block
"""
ret = zeros_like(M)
tmp = (M[:norb, norb:] * 1j + M[norb:, :norb] * (-1j)) / 2
ret[:norb, norb:] = tmp * (-1j)
ret[norb:, :norb] = tmp * 1j
return tmp, ret | 848d70de19723ee22f2adc750c7dd9ec8c47d784 | 3,632,452 |
def permission_required_raise(perm, login_url=None, raise_exception=True):
"""
A permission_required decorator that raises by default.
"""
return permission_required(perm, login_url=login_url, raise_exception=raise_exception) | 7a26f7ac1e858cfcba6961856ecf428d0de03982 | 3,632,453 |
def list_objects(root_checkpointable):
"""Traverse the object graph and list all accessible objects.
Looks for `Checkpointable` objects which are dependencies of
`root_checkpointable`. Includes slot variables only if the variable they are
slotting for and the optimizer are dependencies of `root_checkpointable`
(i.e. if they would be saved with a checkpoint).
Args:
root_checkpointable: A `Checkpointable` object whose dependencies should be
flattened.
Returns:
A flat list of objects.
"""
# TODO(allenl): Extract out gathering logic so the naming logic doesn't have
# to run.
checkpointable_objects, path_to_root = (
_breadth_first_checkpointable_traversal(root_checkpointable))
object_names = _ObjectIdentityDictionary()
for obj, path in path_to_root.items():
object_names[obj] = _object_prefix_from_path(path)
node_ids = _ObjectIdentityDictionary()
for node_id, node in enumerate(checkpointable_objects):
node_ids[node] = node_id
_serialize_slot_variables(
checkpointable_objects=checkpointable_objects,
node_ids=node_ids,
object_names=object_names)
return checkpointable_objects | 47715222f0f357cfb0f18f1a28b81fb908055197 | 3,632,454 |
def get_dips_value_around_300(l_cusp):
"""
300°付近の凹みの L* 値および、それを指す Hue の Index を計算する。
"""
dips_300 = np.min(l_cusp[DIPS_300_SAMPLE_ST:DIPS_300_SAMPLE_ED])
dips_300_idx = np.argmin(l_cusp[DIPS_300_SAMPLE_ST:DIPS_300_SAMPLE_ED])
dips_300_idx += DIPS_300_SAMPLE_ST
return dips_300, dips_300_idx | 6db8f0ee9d92c14c50bee096830a9fa0cadc6a94 | 3,632,455 |
def get_filtered_enviro_df(lat_filter, long_filter):
"""
This function takes the latitude and longitude filters and queries the
database to obtain City of Chicago Environmental complaint and enforcement
information that fits within those filters. A pandas dataframe of filtered
database information gets returned.
Input:
- lat_filter: a set of truncated latitude coordinates
- long_filter: a set of truncated longitude coordinates
Output:
- df: a pandas dataframe of environmental complaints and
enforcement information
"""
df_field_names = ['pk', 'longitude', 'latitude', 'address']
rename_to = {"address": "addr"}
df = filter_df(Env_Complaints, df_field_names, rename_to,
latitude__range=(min(lat_filter), max(lat_filter)),
longitude__range=(max(long_filter), min(long_filter)))
return df | b09c2cced6f17c8b4814982eb68f2af1589ad97a | 3,632,456 |
def is_valid_time_stamp_normal_response(response):
"""
Returns true if a time_stamp_normal response is valid.
str -> bool
"""
try:
respones_to_datetime(response, constants.DATETIME_FORMATE_NORMAL)
return True
except ValueError:
return False | 37e521ab18f8a21f311b07a96082ea65d2c3a57e | 3,632,457 |
def status(repo="."):
"""Returns staged, unstaged, and untracked changes relative to the HEAD.
:param repo: Path to repository or repository object
:return: GitStatus tuple,
staged - list of staged paths (diff index/HEAD)
unstaged - list of unstaged paths (diff index/working-tree)
untracked - list of untracked, un-ignored & non-.git paths
"""
with open_repo_closing(repo) as r:
# 1. Get status of staged
tracked_changes = get_tree_changes(r)
# 2. Get status of unstaged
unstaged_changes = list(get_unstaged_changes(r.open_index(), r.path))
# TODO - Status of untracked - add untracked changes, need gitignore.
untracked_changes = []
return GitStatus(tracked_changes, unstaged_changes, untracked_changes) | afc18280842b4fdc9bdcb330796be2a12e5edc1e | 3,632,458 |
def find_best_capacity_value(desired_capacity, data_file='data/capacities.csv'):
"""Return the closest capacity to the desired one from the possible capacity combinations.
Parameters
----------
desired_capacity: float
The desired capacity value needed for the circuit.
data_file: str
Returns
-------
best_capacity: float
error: float
The difference between the output and the desired capacity.
connection_data: tuple
Tuple containing the indexes for the capacity boxes and the connection type (serial/parallel).
"""
try:
values = read_capacities_data_from_file(data_file)
except FileNotFoundError:
data_file = f'../../{data_file}'
values = read_capacities_data_from_file(data_file)
best_capacity = 0
error = desired_capacity
connection_data = None
for i in range(len(values[0])):
value = values[0][i]
new_error = abs(value - desired_capacity)
if new_error < error:
best_capacity = value
error = new_error
connection_data = values[1][i], values[2][i], values[3][i], values[4][i]
return best_capacity, error, connection_data | 19fd7cca9b45b088c2dafa97f75d37c8bae570d8 | 3,632,459 |
def _qt(add_row, secondary_dict_ptr, cols, key):
"""
This sub-function is called by view_utils.qt to add keys to the secondary_dict and
is NOT meant to be called directly.
"""
if cols[key]:
if cols[key] in secondary_dict_ptr:
return add_row, secondary_dict_ptr[cols[key]]
else:
secondary_dict_ptr[cols[key]] = {}
return True, secondary_dict_ptr[cols[key]]
else:
return add_row, secondary_dict_ptr | ce1cec842822077cbfbd908ff92b1552626cd5f2 | 3,632,460 |
from datetime import datetime
def conv_to_schedule(src: datetime) -> str:
"""Convert given datetime to schedule date string."""
return datetime.strftime(src, FMT_STD) | 571fd18bff08e4e9be9929a75b23c5b023122400 | 3,632,461 |
def create(title):
"""Create a Tk root
title - a title for the application
"""
assert isinstance(title, str)
root = tk.Tk()
root.title = title
rx.concurrency.TkinterScheduler(root)
return root | aaa710b6429c0abafe40e7c7ea51f886f15624c4 | 3,632,462 |
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up Homekit lock."""
hkid = config_entry.data["AccessoryPairingID"]
conn = hass.data[KNOWN_DEVICES][hkid]
@callback
def async_add_service(service):
if service.short_type != ServicesTypes.LOCK_MECHANISM:
return False
info = {"aid": service.accessory.aid, "iid": service.iid}
async_add_entities([HomeKitLock(conn, info)], True)
return True
conn.add_listener(async_add_service) | 1ebfa72ffc700a873cff642cbd0a92b2a64cdc35 | 3,632,463 |
from typing import List
from typing import Tuple
import torch
def bounding_boxes_to_tensor(bboxes: List[dict], image_size: Tuple[int, int],
cell_size: Tuple[int, int], classes: List[str],
device: torch.device) -> Tuple[torch.Tensor]:
"""
Converts a list of bounding boxes to a tuple of tensors
(multiple bounding boxes for each image)
* The 'boxe_coords', a tensor of shape (N, 2, H, W) of (x, y)
of the bounding boxe falling in the given grid cell.
* The 'boxe_size', a tensor of shape (N, 2, H, W) of (width, height)
of the bounding boxe falling in the given grid cell.
* The 'object_mask', a tensor of shape (N, H, W) which is True when there
is an object in the grid cell and False otherwise
* The 'class_index', a tensor of shape (N, H, W) of class indexes present
in each grid cell.
0 by default for empty cells
* The 'weights", either None or a tensor of shape (N, H, W) of floats.
In which case the value are the weighting in the loss function of each
object to detect.
Parameters
----------
bboxes : list of dict
A dict for each image with the keys
* x1, x2, y1, y2 : list of int
the x/y positions of the two corners of the boxe (in pixels)
* class : list of str
the classes of the detected objects
* [weights : optional, list of floats]
the weighting of each object in the loss function
image_size : tupe of int
the (height, width) of the images
cell_size : tuple of int
the (height, width) of the anchor grid's cells (in pixels)
classes : list of str
list of the unique class names
device : torch.device
the device the tensors should be stored on
Return
------
tuple of torch.Tensor :
(boxe_coords, boxe_size, object_mask, class_index, weights)
"""
h_image, w_image = image_size
h_cell, w_cell = cell_size
h_grid, w_grid = h_image // h_cell, w_image // w_cell
object_mask = np.zeros((len(bboxes), h_grid, w_grid), dtype=np.bool)
class_index = np.zeros((len(bboxes), h_grid, w_grid), dtype=np.long)
boxe_coords = np.zeros((len(bboxes), 2, h_grid, w_grid), dtype=np.float)
boxe_size = np.zeros((len(bboxes), 2, h_grid, w_grid), dtype=np.float)
data = [(img, int(0.5*(y1+y2)/h_cell), int(0.5*(x1+x2)/w_cell),
0.5*(y1+y2)/h_cell % 1, 0.5*(x1+x2)/w_cell % 1,
abs(y1-y2)/h_cell, abs(x1-x2)/w_cell, classes.index(c))
for img, bb in enumerate(bboxes)
for x1, x2, y1, y2, c
in zip(bb["x1"], bb["x2"], bb["y1"], bb["y2"], bb["class"])]
img, row, column, y, x, h, w, c = zip(*data)
object_mask[img, row, column] = True
boxe_coords[img, :, row, column] = list(zip(x, y))
boxe_size[img, :, row, column] = list(zip(w, h))
class_index[img, row, column] = c
weights = [bb.get("weights", None) for bb in bboxes]
if not(None in weights):
weights = sum(weights, [])
cell_weights = np.zeros((len(bboxes), h, w), dtype=np.float)
cell_weights[img, row, column] = weights
else:
cell_weights = None
boxe_coords = torch.tensor(boxe_coords, dtype=torch.float, device=device)
boxe_size = torch.tensor(boxe_size, dtype=torch.float, device=device)
object_mask = torch.tensor(object_mask, dtype=torch.bool, device=device)
class_index = torch.tensor(class_index, dtype=torch.long, device=device)
if cell_weights is not None:
cell_weights = torch.tensor(cell_weights, dtype=torch.float,
device=device)
return boxe_coords, boxe_size, object_mask, class_index, cell_weights | 1530748ddc02527edea938542b1e60dcfac5a36b | 3,632,464 |
def compartment_size_uncommon_keys(base):
"""
Provide a model with different amounts metabolites for each compartment.
"""
base.add_metabolites(
[cobra.Metabolite(i, compartment='ml') for i in "ABCD"])
base.add_metabolites(
[cobra.Metabolite(i, compartment='om') for i in "EFG"])
return base | d00f1da728b5f8cf9399a8e8810d90cd919c45c6 | 3,632,465 |
def steady_state_step(population: list, reproduction_pipeline: list, insert, probes = (), evaluation_op = ops.evaluate):
"""An operator that performs steady-state evolution when placed in an (otherwise
generational) pipeline.
This is a metaheuristic component that can be parameterized to define many kinds of
steady-state evolution. It takes a population, uses the `reproduction_pipeline` to
produce a single new individual, evaluates it with the provided `evaluation_op` operator,
and then inserts the individual returned by `evaluation_op` into the population using the
strategy defined by `insert`.
"""
offspring = next(toolz.pipe(population, *reproduction_pipeline))
evaluated = next(evaluation_op(iter([ offspring ])))
new_pop = insert(population, evaluated)
# Run the probes on the ind regardless of the result of insert()
list(toolz.pipe(iter([evaluated]), *probes))
return new_pop | e6e03e4b0d70ba4b3a10124e4170988b0774e9f1 | 3,632,466 |
def get_gym_environs():
""" List all valid OpenAI ``gym`` environment ids. """
return [e.id for e in gym.envs.registry.all()] | 899013b5621e63b44bd0600bd037da389fcdb0ff | 3,632,467 |
import time
def single_classic_cv_evaluation(
dx_train, dy_train, name, model, sample_weight, scoring, outer_cv,
average_scores_across_outer_folds, scores_of_best_model, results,
names, random_state):
"""Non nested cross validation of single model."""
if (isinstance(scoring, list) or isinstance(scoring, dict)
or isinstance(scoring, tuple)):
raise TypeError("""'single_classic_cv_evaluation' method takes only
single-metric score values.""")
if scoring not in (None, 'accuracy', 'roc_auc', 'neg_log_loss'):
raise ValueError("""
%s is not a valid scoring value for method
'single_classic_cv_evaluation'. Valid options are ['accuracy',
'roc_auc', 'neg_log_loss']""" % scoring)
print()
print("******* Evaluating model '%s'" % name)
print()
best_score = scores_of_best_model[0]
best_score_dev = scores_of_best_model[1]
best_cv_results = scores_of_best_model[2]
# best_log_loss = log_loss_score
# best_brier_score = scores_of_best_model[2]
best_exec_time = scores_of_best_model[3]
best_model_name = scores_of_best_model[4][0]
best_model_estim = scores_of_best_model[4][1]
best_nn_build_fn = scores_of_best_model[4][2]
print("Best model: '%s'. Best score: %1.3f (%1.3f)"
% (best_model_name, best_score, best_score_dev))
# Create a temporary folder to store the transformers of the pipeline
cachedir = mkdtemp()
steps = []
# ...
if name == 'SVMClf_2nd':
if len(dy_train) > 10000:
name = 'Bagging_SVMClf_2nd'
print("*** SVC detected, evaluating model '%s'" % name)
# model = SVC(C=.01, kernel='linear', probability=True,
# class_weight='balanced', random_state=random_state)
model.set_params(kernel='linear')
n_estimators = 10
bagging = BaggingClassifier(
model, max_samples=1.0/n_estimators, n_estimators=n_estimators,
random_state=random_state)
model = bagging
else:
pass
print("model:", model)
else:
pass
steps.append((name, model))
# transformers.append((name, model))
ppline = Pipeline(steps, memory=cachedir)
cv_success = 0
try:
t0 = time()
cv_results = cross_val_score(
ppline, dx_train, dy_train, cv=outer_cv,
n_jobs=-2,
pre_dispatch='2*n_jobs',
scoring=scoring)
t1 = time()
except AttributeError as ae:
print(ae)
except JoblibValueError as jve:
print(jve)
except OverflowError as oe:
print(oe)
except Exception as e:
print("Exception:", e)
else:
cv_success = 1
names.append(name)
if scoring == 'neg_log_loss':
cv_results = -1*cv_results
results.append(cv_results)
print("Outer CV to get scores: successful.")
exec_time = (t1 - t0)
print('Execution time: %.2fs w %s.' % (exec_time, name))
print()
stats_for_sht = calculate_stats_for_nhst(
name, cv_results, scoring)
score, score_dev = stats_for_sht[0], stats_for_sht[1]
func = stats_for_sht[2]
print("*** Score for %s [%s]: %1.3f (%1.3f)"
% (name, scoring.strip('neg_'), score, score_dev))
print()
# statistical testing of classifiers
stats = score, score_dev, best_score, best_score_dev
# statistical_hypothesis_testing
sht_scores_dicts = compare_models_performance(
name, model, exec_time, best_model_name, best_score_dev,
stats, cv_results, best_cv_results,
average_scores_across_outer_folds, scores_of_best_model, func,
cv_style="classic", scoring=scoring, params=None,
random_state=random_state)
finally:
if cv_success:
print("Yay! Evaluation of model '%s' done." % name)
else:
print("Sorry. Evaluation of model '%s' failed." % name)
sht_scores_dicts = \
average_scores_across_outer_folds, scores_of_best_model
del ppline
# delete the temporary cache before exiting
rmtree(cachedir)
print()
# average_scores_across_outer_folds, scores_of_best_model
return sht_scores_dicts | 530fd65ed867a4f43a1fcc688970c6a6b1e6ffc7 | 3,632,468 |
from nnabla import logger
def get_extension_context(ext_name, **kw):
"""Get the context of the specified extension.
All extension's module must provide `context(**kw)` function.
Args:
ext_name (str) : Module path relative to `nnabla_ext`.
kw (dict) : Additional keyword arguments for context function in a extension module.
Returns:
:class:`nnabla.Context`: The current extension context.
Example:
.. code-block:: python
ctx = get_extension_context('cudnn', device_id='0')
nn.set_default_context(ctx)
"""
if ext_name == 'cuda.cudnn':
logger.warn('Deprecated extension name "cuda.cudnn" passed.')
extensin_name = 'cudnn'
mod = import_extension_module(ext_name)
return mod.context(**kw) | c5f3bf4c6f4053207009e3412c23133df28d6b61 | 3,632,469 |
def resolve_byprop(prop, value, minimum=1, timeout=FOREVER):
"""Resolve all streams with a specific value for a given property.
If the goal is to resolve a specific stream, this method is preferred over
resolving all streams and then selecting the desired one.
Keyword arguments:
prop -- The StreamInfo property that should have a specific value (e.g.,
"name", "type", "source_id", or "desc/manufaturer").
value -- The string value that the property should have (e.g., "EEG" as
the type property).
minimum -- Return at least this many streams. (default 1)
timeout -- Optionally a timeout of the operation, in seconds. If the
timeout expires, less than the desired number of streams
(possibly none) will be returned. (default FOREVER)
Returns a list of matching StreamInfo objects (with empty desc field), any
of which can subsequently be used to open an inlet.
Example: results = resolve_Stream_byprop("type","EEG")
"""
# noinspection PyCallingNonCallable
buffer = (c_void_p * 1024)()
num_found = lib.lsl_resolve_byprop(byref(buffer), 1024,
c_char_p(str.encode(prop)),
c_char_p(str.encode(value)),
minimum,
c_double(timeout))
return [StreamInfo(handle=buffer[k]) for k in range(num_found)] | a3c81185ad3d972e997399d41480e3f814945c52 | 3,632,470 |
import math
def create_plot(model_filenames, ncols=3, projection=None, nplots_increment=0):
"""Create base figure for multipanel plot.
Creates matplotlib figure and set of axis that corespond to
the number of models that should be plotted.
Parameters
----------
model_filenames: OrderedDict
OrderedDict with model names as keys and input files as values.
ncols: int
Number of columns in the plot. The number of rows
will be calculated authomatically.
projection: cartopy projection istance
nplots_increment: int
allows to increase or decrease number of plots.
Returns
-------
fig: matplotlib figure
ax: list with matplotlib axis, flattened
"""
# Calcualte number of plots on the figure
nplots = len(model_filenames) + nplots_increment
ncols = float(ncols)
nrows = math.ceil(nplots / float(ncols))
ncols, nrows = int(ncols), int(nrows)
# different projections will have to have different
# coefficints for creating good looking figsize,
# now the numbers are working well with NorthPolarStereo
# and PlateCaree
if projection:
figsize = (5 * ncols, 1.55 * nrows * ncols)
figure, axis = plt.subplots(nrows,
ncols,
figsize=figsize,
subplot_kw=dict(projection=projection),
constrained_layout=True)
# this workd well for usual plots
else:
figsize = (10 * ncols, 2.5 * nrows * ncols)
figure, axis = plt.subplots(nrows, ncols, figsize=figsize)
# if you have more than one axis, flatten the array
# this way it is easier to handle it.
if isinstance(axis, np.ndarray):
axis = axis.flatten()
# if only one axis is created wrap it in a list.
else:
axis = [axis]
return figure, axis | 11a5e36b641946c5994919845818dcecef98eadf | 3,632,471 |
import time
def get_sample_records(n):
"""get sample records for testing"""
tsk, target = get_sample_task()
inps, ress = [], []
for i in range(n):
inps.append(MeasureInput(target, tsk, tsk.config_space.get(i)))
ress.append(MeasureResult((i + 1,), 0, i, time.time()))
return list(zip(inps, ress)) | 136398926d2638aa0542e76e1e6757484aaf82d1 | 3,632,472 |
def build_get_boolean_tfft_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
"""Get boolean array value [true, false, false, true].
See https://aka.ms/azsdk/python/protocol/quickstart for how to incorporate this request builder
into your code flow.
:return: Returns an :class:`~azure.core.rest.HttpRequest` that you will pass to the client's
`send_request` method. See https://aka.ms/azsdk/python/protocol/quickstart for how to
incorporate this response into your code flow.
:rtype: ~azure.core.rest.HttpRequest
Example:
.. code-block:: python
# response body for status code(s): 200
response.json() == [
bool # Optional.
]
"""
accept = "application/json"
# Construct URL
url = '/array/prim/boolean/tfft'
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
headers=header_parameters,
**kwargs
) | 0711739e4a97a7356f5f69a12e27f940e03e79ff | 3,632,473 |
def grøn(tekst: str):
"""
Farv en tekst der udskrives via Click grøn.
"""
return farvelæg(tekst, "green") | b85629c384c8918bca38093af99a7840cce0aec2 | 3,632,474 |
from typing import Union
from typing import Dict
from pathlib import Path
def init_config(config: Union[Dict, Path, Text, None]) -> ConfigParser:
"""
Initialize skill configuration:
1. Defaults from SDK
2. Additional locations
3. "skill.conf"
:param config:
:return:
"""
# Merge additional configs
config_paths = load_additional()
if not isinstance(config, dict):
config_paths += [get_skill_config_file(config)]
parser = read_config(config_paths)
if isinstance(config, dict):
logger.debug("Creating app from dictionary: %s", repr(config))
parser.read_dict(config)
return parser | 41be149851d052efb456f276d56dad62b0294cef | 3,632,475 |
def update(dbs, user, role_id=None, org_id=None, create_user=None):
"""
更新用户信息
:param dbs:
:param user:
:param role_id:
:param org_id:
:param create_user:
:return:
"""
try:
with transaction.manager:
if org_id and org_id != '' and org_id != 0:
dbs.query(SysUserOrg).filter(SysUserOrg.user_id == user.id).delete()
porg_id = find_parent_org(dbs, org_id)
if porg_id != org_id:
sys_user_parent_org = SysUserOrg(user_id=user.id, org_id=org_id, create_user=create_user,
create_time=date_now())
dbs.add(sys_user_parent_org)
sys_user_org = SysUserOrg(user_id=user.id, org_id=user.org_id, create_user=create_user,
create_time=date_now())
dbs.merge(sys_user_org)
user.org_id = org_id
dbs.merge(user)
if role_id and role_id != '' and role_id != 0:
user_role = dbs.query(SysUserRole).filter(SysUserRole.user_id == user.id).first()
if not user_role:
user_role = SysUserRole()
user_role.user_id = user.id
user_role.create_user = create_user
user_role.create_time = date_now()
user_role.role_id = role_id
dbs.merge(user_role)
return ''
except Exception as e:
logger.error(e)
return '更新用户失败,请核对后重试!' | a345e9edfd0e7174c7ce8e881d3b3699a9222610 | 3,632,476 |
def analogy_making_model(inputs, params, is_training, reuse, output_length=None):
"""Factory function to retrieve analogy-making model."""
latent_encoder = _get_network(Z_ENC_FN)
latent_decoder = _get_network(Z_DEC_FN)
outputs = analogy_seq_encoding_model(inputs, params, is_training, reuse)
with tf.variable_scope('latent_enc', reuse=reuse):
tmp_outputs = latent_encoder(
outputs['A_style'], outputs['B_style'],
False, params, is_training=is_training)
outputs['AtoB_latent'] = tmp_outputs['latent']
##############################
## Compute f*(D) = f(C) + T ##
##############################
outputs['CtoD_latent'] = outputs['AtoB_latent']
with tf.variable_scope('latent_dec', reuse=reuse):
tmp_outputs = latent_decoder(
outputs['CtoD_latent'], outputs['C_content'], outputs['C_style'],
params, is_training=is_training)
outputs['dec_D_embedding'] = tmp_outputs['dec_embedding']
outputs['D_style'] = tmp_outputs['new_style']
outputs = analogy_seq_decoding_model(inputs, outputs, params,
is_training, reuse, output_length)
#
return outputs | 3c83c27ebefcc9d3b080441be5a45c52193a40eb | 3,632,477 |
import torch
def test_model(dataloader, model, gpu=False):
"""Tests model performance on a data from dataloader and prints accuracy.
Args:
dataloader (DataLoader)
model (torchvision model)
gpu (bool): Use GPU if True, otherwise CPU
Returns:
test_acc (float): model prediction accuracy
"""
print('\nEvaluating model performance on a Test data set...')
# Set model evaluation mode and send to torch.device
model.eval()
device = select_device(gpu)
model.to(device)
# setup loss
criterion = nn.NLLLoss()
# Run validation on TEST data
running_corrects = 0
with ShadyBar('Progress', max=len(dataloader)) as bar: # progress bar
for inputs, labels in dataloader:
# send inputs, labels to device
inputs, labels = inputs.to(device), labels.to(device)
with torch.no_grad():
# forward propagation
logps = model(inputs)
# caluclate loss
loss = criterion(logps, labels)
# Running accuracy
ps = torch.exp(logps) # probabilities
_, predictions = ps.topk(1, dim=1) # top predictions
equals = predictions == labels.view(*predictions.shape)
running_corrects += torch.sum(equals.type(torch.FloatTensor)).item()
bar.next() # update progress bar
# Calculate accuracy
test_acc = running_corrects / len(dataloader.dataset)
return test_acc | f5e1f63b8e0a3e2ee94d579f552807af103e2c1a | 3,632,478 |
import io
import struct
def decrypt_chunk(chunk, password=None):
"""Decrypts the given encrypted chunk with the given password and returns the decrypted chunk.
If password is None then saq.ENCRYPTION_PASSWORD is used instead.
password must be a byte string 32 bytes in length."""
if password is None:
password = saq.ENCRYPTION_PASSWORD
assert isinstance(password, bytes)
assert len(password) == 32
_buffer = io.BytesIO(chunk)
original_size = struct.unpack('<Q', _buffer.read(struct.calcsize('Q')))[0]
iv = _buffer.read(16)
chunk = _buffer.read()
#original_size = struct.unpack('<Q', chunk[0:struct.calcsize('Q')])[0]
#iv = chunk[struct.calcsize('Q'):struct.calcsize('Q') + 16]
#chunk = chunk[struct.calcsize('Q') + 16:]
decryptor = AES.new(password, AES.MODE_CBC, iv)
result = decryptor.decrypt(chunk)
return result[:original_size] | f6578fd445c44a2fd4aecc3009278ddd2a45add0 | 3,632,479 |
import time
from datetime import datetime
def epochFromNice(timeToConvert=None):
""" Get the epoch time from the passed in string of the format:
YYYY-mm-dd_HH-MM-SS.UUUUUU
returns time.time() if timeToConvert is not specified """
if timeToConvert is None:
retTime = time.time()
else:
yearMonthDay, hourMinuteSec = timeToConvert.split('_')
(year, month, day) = (int(x) for x in yearMonthDay.split('-'))
hmsStr = hourMinuteSec.split('-')
hour = int(hmsStr[0])
minute = int(hmsStr[1])
secFloat = float(hmsStr[2])
sec = int(secFloat)
microseconds = int((secFloat - sec) * 1E6)
dateTime = datetime.datetime(year, month, day, hour, minute, sec, microseconds)
retTime = time.mktime(dateTime.timetuple()) + microseconds / 1.0E6
#strpTime = time.strptime(timeToConvert, '%Y-%m-%d_%H-%M-%S')
#retTime = time.mktime(strpTime)
return retTime | db6378dd2d47cc17377507474d9726a2f6de22b0 | 3,632,480 |
def prepare_tensor_SVD(tensor, direction, D=None, thresh=1E-32, normalize=False):
"""
prepares and truncates an mps tensor using svd
Parameters:
---------------------
tensor: np.ndarray of shape(D1,D2,d)
an mps tensor
direction: int
if >0 returns left orthogonal decomposition, if <0 returns right orthogonal decomposition
thresh: float
cutoff of schmidt-value truncation
r_thresh: float
only used when svd throws an exception.
D: int or None
the maximum bond-dimension to keep (hard cutoff); if None, no truncation is applied
Returns:
----------------------------
direction>0: out,s,v,Z
out: a left isometric tensor of dimension (D1,D,d)
s : the singular values of length D
v : a right isometric matrix of dimension (D,D2)
Z : the norm of tensor, i.e. tensor"="out.dot(s).dot(v)*Z
direction<0: u,s,out,Z
u : a left isometric matrix of dimension (D1,D)
s : the singular values of length D
out: a right isometric tensor of dimension (D,D2,d)
Z : the norm of tensor, i.e. tensor"="u.dot(s).dot(out)*Z
"""
assert (direction != 0), 'do NOT use direction=0!'
[l1, l2, d] = tensor.shape
if direction in (1, 'l', 'left'):
net = tn.TensorNetwork()
node = net.add_node(tensor)
u_node, s_node, v_node, _ = net.split_node_full_svd(node, [node[0], node[1]], [node[2]], max_singular_values=D, max_truncation_err=thresh)
Z = tf.linalg.norm(s_node.tensor)
if normalize:
s_node.tensor /= Z
return u_node.tensor, s_node.tensor, v_node.tensor, Z
if direction in (-1, 'r', 'right'):
net = tn.TensorNetwork()
node = net.add_node(tensor)
u_node, s_node, v_node, _ = net.split_node_full_svd(node, [node[0]], [node[1], node[2]], max_singular_values=D, max_truncation_err=thresh)
Z = tf.linalg.norm(s_node.tensor)
if normalize:
s_node.tensor /= Z
return u_node.tensor, s_node.tensor, v_node.tensor, Z | ad71c7f2b16624bc9f24e060376a9359c8585c07 | 3,632,481 |
from datetime import datetime
def calc_easter(year):
"""
Returns Easter as a date object. Because Easter is a floating mess
year - the year to calc easter for
"""
a = year % 19
b = year // 100
c = year % 100
d = (19 * a + b - b // 4 - ((b - (b + 8) // 25 + 1) // 3) + 15) % 30
e = (32 + 2 * (b % 4) + 2 * (c // 4) - d - (c % 4)) % 7
f = d + e - 7 * ((a + 11 * d + 22 * e) // 451) + 114
month = f // 31
day = f % 31 + 1
return datetime.date(year, month, day) | c02c2a45f55a8f80273759bbf9c06bb2234b0b8a | 3,632,482 |
def MACDFIX(ds, count, signalperiod=-2**31):
"""Moving Average Convergence/Divergence Fix 12/26"""
ret = call_talib_with_ds(ds, count, talib.MACDFIX, signalperiod)
if ret == None:
ret = (None, None, None)
return ret | 8b440e666d0ac1c669e26da9974136fdd18f5e6e | 3,632,483 |
def create_dataset(form_data, params=None, use_doi=False):
"""
Create dataset in Metax.
Arguments:
form_data {object} -- Object with the dataset data that has been validated and converted to comply with the Metax schema.
params {dict} -- Dictionary of key-value pairs of query parameters.
Returns:
[type] -- Metax response.
"""
return _metax_api.create_dataset(form_data, params, use_doi) | b288c021df3cf37467ea304c2105eceb0fc5f2be | 3,632,484 |
def blrPredict(W, data):
"""
blrObjFunction predicts the label of data given the data and parameter W
of Logistic Regression
Input:
W: the matrix of weight of size (D + 1) x 10. Each column is the weight
vector of a Logistic Regression classifier.
X: the data matrix of size N x D
Output:
label: vector of size N x 1 representing the predicted label of
corresponding feature vector given in data matrix
"""
label = np.zeros((data.shape[0], 1))
##################
# YOUR CODE HERE #
##################
# HINT: Do not forget to add the bias term to your input data
x = data
x = np.insert(x, 0, 1, axis=1)
w = W
posterior = np.dot(x,w)
pred = sigmoid(posterior)
prediction = np.argmax(pred, axis=1)
label = np.reshape(prediction,(data.shape[0],1))
return label | 86374a43e6c7cbe69a6789f532ea3be3bf3238f5 | 3,632,485 |
def _compare_objects(obj1, obj2):
"""
FIXME:
CALL: print(f'compare_objects (ops2,operations): {_compare_objects (ops2,operations)}')
DEBUG Helper to compare 2 numpy arrays
"""
result = (obj1 == obj2).all()
return result | 98d730a8f56df3938933d22cde1dba2fe314147f | 3,632,486 |
import torch
def mds_torch(pre_dist_mat, weights=None, iters=10, tol=1e-5, verbose=2):
""" Gets distance matrix. Outputs 3d. See below for wrapper.
Assumes (for now) distrogram is (N x N) and symmetric
Outs:
* best_3d_coords: (3 x N)
* historic_stress
"""
if weights is None:
weights = torch.ones_like(pre_dist_mat)
# batched MDS
if len(pre_dist_mat.shape) < 3:
pre_dist_mat.unsqueeze_(0)
# start
batch, N, _ = pre_dist_mat.shape
his = []
# init random coords
best_stress = float("Inf") * torch.ones(batch)
best_3d_coords = 2*torch.rand(batch, N, 3) - 1
# iterative updates:
for i in range(iters):
# compute distance matrix of coords and stress
dist_mat = torch.cdist(best_3d_coords, best_3d_coords, p=2)
stress = ( weights * (dist_mat - pre_dist_mat)**2 ).sum(dim=(-1,-2)) / 2
# perturb - update X using the Guttman transform - sklearn-like
dist_mat[dist_mat == 0] = 1e-5
ratio = weights * (pre_dist_mat / dist_mat)
B = ratio * (-1)
B[:, np.arange(N), np.arange(N)] += ratio.sum(dim=-1)
# update - double transpose. TODO: consider fix
coords = (1. / N * torch.matmul(B, best_3d_coords))
dis = torch.norm(coords, dim=(-1, -2))
if verbose >= 2:
print('it: %d, stress %s' % (i, stress))
# update metrics if relative improvement above tolerance
if (best_stress - stress / dis).mean() > tol:
best_3d_coords = coords
best_stress = (stress / dis)
his.append(best_stress)
else:
if verbose:
print('breaking at iteration %d with stress %s' % (i,
stress))
break
return torch.transpose(best_3d_coords, -1,-2), torch.cat(his) | bb1302a5cec2f79f665ef801b9b0dc5a75a79330 | 3,632,487 |
def get_pair(expr, index):
"""Get the field of an expression using python syntax
Arguments:
- `expr`: an expression
- `index`: an integer equal to 0 or 1
"""
if index == 0:
return Fst(expr)
elif index == 1:
return Snd(expr)
else:
raise Exception("Index applied to {0!s} must be 0 or 1"\
.format(expr)) | 5b25163562fb8399d2e948dd8fc9d47aa6467b07 | 3,632,488 |
import functools
def get_standardized_layers(hparams, dp=None, ps_devices=None):
"""Get the common attention and feed-forward layers.
The returned layer functions will have the following signature:
y, extra_loss = fct(x)
extra_loss is set to 0.0 if the layer doesn't have extra loss.
If dp is provided, the layers will be distributed within the devices.
If moe wants to be used, both dp and model need to be set.
Args:
hparams (tf.HParams): the model hparameters
dp (expert_utils.Parallelism): A data parallelism object. If not given,
the dp calls are simply ignored.
ps_devices: a reference to model._ps_devices (only used by the MOE layer)
Returns:
dict[str:fct]: A dictionary containing the standardized functions
"""
def partial(fct, *args, **kwargs):
"""Same as functools.partial but with functools.wraps."""
return functools.wraps(fct)(functools.partial(fct, *args, **kwargs))
def register_layer(
fct_in,
default_args=None,
default_kwargs=None,
use_dp=True,
recompute_grad=False,
):
"""Turn a function into its standardized version.
Args:
fct_in (fct): The function to register
default_args (list): The default parameters to add to the function.
default_kwargs (dict): The default parameters to add to the function.
Those arguments can be overwritten when calling the function.
use_dp (bool): Wrap the function call within a dataparallelism object if
dp is available. Some layers (like MOE) must be called without dp.
recompute_grad (bool): If True, recompute the function during the
backward pass to save memory
Returns:
fct: the standardized layer function.
"""
# The kwargs given when calling the function overwrite the default ones
fct_in = partial(fct_in, *(default_args or []), **(default_kwargs or {}))
@functools.wraps(fct_in)
def decorator(x, *args, **kwargs):
"""Call the layer function."""
fct = fct_in # For closure. Could use nonlocal with Python 3
# Eventually create the memory optimized version of the function
if recompute_grad:
fct = partial(fct, **kwargs) # recompute_grad only accept args
fct = common_layers.recompute_grad(fct)
kwargs = {}
# Eventually use dp (if given and not MoE)
if use_dp and dp is not None:
y = dp(fct, x, *args, **kwargs)
else:
y = fct(x, *args, **kwargs)
# Eventually capture the extra loss
extra_loss = 0.0
if isinstance(y, tuple):
y, extra_loss = y
return y, extra_loss
return decorator
total_key_depth = hparams.attention_key_channels or hparams.hidden_size
total_value_depth = hparams.attention_value_channels or hparams.hidden_size
is_train = hparams.mode == tf.estimator.ModeKeys.TRAIN
moe_hidden_sizes = [int(s) for s in hparams.moe_hidden_sizes.split(",")]
# Use filter size if moe_hidden_sizes was not given
if not moe_hidden_sizes:
moe_hidden_sizes = [hparams.filter_size]
expert_fn = expert_utils.ffn_expert_fn(hparams.hidden_size, moe_hidden_sizes,
hparams.hidden_size)
# Attention layers:
# === Multi-head full attention layer ===
multihead_attention_fn = register_layer(
multihead_attention,
default_kwargs=dict(
memory_antecedent=None, # Self-attention by default
bias=None,
total_key_depth=total_key_depth,
total_value_depth=total_value_depth,
output_depth=hparams.hidden_size,
num_heads=hparams.num_heads,
dropout_rate=hparams.attention_dropout,
))
# === Memory efficient full-attention layer ===
# Save memory by not storing the activations and
# recomputing them during the backward pass
memeff_attention_base_fn = register_layer(
multihead_attention,
default_kwargs=dict(
total_key_depth=total_key_depth,
total_value_depth=total_value_depth,
output_depth=hparams.hidden_size,
num_heads=hparams.num_heads,
dropout_rate=hparams.attention_dropout,
),
recompute_grad=True,
)
def memeff_attention_fn(*args, **kwargs):
"""Modify args/kwargs for compatibility with recompute_grad."""
kwargs = kwargs.copy()
assert len(args) == 1
x = args[0]
memory_antecedent = kwargs.pop("memory_antecedent", x) # Same as x if None
if kwargs.get("bias", None) is not None: # Case where bias has been set
args = (x, memory_antecedent, kwargs.pop("bias"))
else:
# Otherwise, only 2 args. This is necessary as recompute_grad does not
# support None values.
args = (x, memory_antecedent)
return memeff_attention_base_fn(*args, **kwargs)
# === Local attention (unmasked) layer ===
# Reuse same parameters as multihead_attention
# Don't mask the future
local_attention_fn = partial(
multihead_attention_fn,
block_length=hparams.attention_loc_block_length,
block_width=hparams.attention_loc_block_width,
attention_type="local_unmasked",
)
# === Local attention (masked) layer ===
# Reuse same parameters as multihead_attention
# Only works for self attention. Always mask the future.
local_attention_masked_fn = partial(
multihead_attention_fn,
block_length=hparams.attention_loc_block_length,
attention_type="local_mask_right",
)
# === Masked memory-compressed multihead self attention layer ===
# Only works for self attention. Always mask the future.
compressed_attention_masked_fn = register_layer(
multihead_self_attention_reduced,
default_kwargs=dict(
factor=hparams.attention_red_factor,
nonlinearity=hparams.attention_red_nonlinearity,
reduction_type=hparams.attention_red_type,
multihead_params=dict(
total_key_depth=total_key_depth,
total_value_depth=total_value_depth,
num_heads=hparams.num_heads,
dropout_rate=hparams.attention_dropout,
),
),
)
# === Unmasked memory-compressed multihead self attention layer ===
# Only works for self attention. Never mask the future. Bias never added
compressed_attention_fn = partial(
compressed_attention_masked_fn,
add_mask=False,
)
# Feed-forwards layers:
# === Mixture of expert layer ===
distributed_moe = register_layer(
expert_utils.distributed_moe,
default_args=[
dp,
ps_devices,
],
default_kwargs=dict(
train=is_train,
input_size=hparams.hidden_size,
expert_fn=expert_fn,
num_experts=hparams.moe_num_experts,
k=hparams.moe_k,
loss_coef=hparams.moe_loss_coef,
),
use_dp=False,
)
# === FC layer ===
conv_hidden_relu = register_layer(
common_layers.conv_hidden_relu,
default_kwargs=dict(
hidden_size=hparams.filter_size,
output_size=hparams.hidden_size,
dropout=hparams.relu_dropout,
),
)
# === Separable convolution layer ===
# No mask applied
sep_conv_relu = partial(
conv_hidden_relu,
padding="SAME",
# Parameters copied from the transformer model, could add hparams
kernel_size=(3, 1),
second_kernel_size=(31, 1),
)
# === Separable convolution layer (masked version) ===
# Mask the future
sep_conv_relu_masked = partial(
sep_conv_relu,
padding="LEFT", # Mask future for decoder
)
# Define all available layers
layers = dict(
# Attention layers:
a=multihead_attention_fn, # Multihead full attention
loc=local_attention_fn, # Local attention
locm=local_attention_masked_fn, # Local attention (masked)
red=compressed_attention_fn, # Memory-compressed attention
redm=compressed_attention_masked_fn, # Memory-compressed att (masked)
mem=memeff_attention_fn, # Memory efficient
# Feed-forward layers:
fc=conv_hidden_relu, # Fully connected
sep=sep_conv_relu, # Separable convolution (unmasked)
sepm=sep_conv_relu_masked, # Separable convolution (masked)
moe=distributed_moe, # Mixture of expert layer
)
return layers | 99972e4106928ff4c71e0f52c159a74650310c74 | 3,632,489 |
def row_contains_data(fieldnames, row):
"""Returns True if the value of atleast on of the fields is truthy"""
for field in fieldnames:
if row.get(field):
return True
return False | 7575d1280186c582a652ab37deb4a93e667b51b2 | 3,632,490 |
def create_model(name, batch_size, learning_rate = 0.0001, wd = 0.00001, concat = False, l2_loss = False, penalty = False, coef = 0.4, verbosity = 0):
"""
Create a model from model.py with the given configuration
Args:
name : name of the model (used to create a specific folder to save/load parameters)
batch_size : batch size
learning_rate : learning_rate (cross entropy is arround 100* bigger than l2)
wd : weight decay factor
concat : does this model include direct connections?
l2_loss : does this model use l2 loss (if not then cross entropy)
penalty : whether to use the edge contrast penalty
coef : coef for the edge contrast penalty
verbosity : level of details to display
Returns:
my_model : created model
"""
my_model = model.MODEL(name, batch_size, learning_rate, wd, concat, l2_loss, penalty, coef)
my_model.display_info(verbosity)
return my_model | 63b4755e20fc877d9231427ed2cf118efbe37bb0 | 3,632,491 |
def fs(path:str, mode:Mode='rb'):
"""Opens file locally or via s3 depending on path string."""
s3 = s3fs.S3FileSystem()
return partial(
{True: s3.open, False: open}[is_s3_path(path)],
mode=mode
)(path) | 18ffc654e66bdd2c9315d5104fa145a195ee2ce3 | 3,632,492 |
import logging
def create_logger(log_file):
""" Zack's Generic Logger function to create onscreen and file logger
Parameters
----------
log_file: string
`log_file` is the string of the absolute filepathname for writing the
log file too which is a mirror of the onscreen display.
Returns
-------
logger: logging object
Notes
-----
This function is completely generic and can be used in any python code.
The handler.setLevel can be adjusted from logging.INFO to any of the other
options such as DEBUG, ERROR, WARNING in order to restrict what is logged.
"""
logger = logging.getLogger()
logger.setLevel(logging.INFO)
# create console handler and set level to info
handler = logging.StreamHandler()
handler.setLevel(logging.INFO)
formatter = logging.Formatter("%(levelname)s - %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
# create error file handler and set level to info
handler = logging.FileHandler(log_file, "w", encoding=None, delay="true")
handler.setLevel(logging.INFO)
formatter = logging.Formatter("%(levelname)s - %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
return logger | 791fb5dc202b4a01dbb829fc4a890a3443bdf6d3 | 3,632,493 |
import six
def _check_stop_list(stop):
"""
Check stop words list
ref: https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_extraction/text.py#L87-L95
"""
if stop == "thai":
return THAI_STOP_WORDS
elif isinstance(stop, six.string_types):
raise ValueError("not a built-in stop list: %s" % stop)
elif stop is None:
return None
# assume it's a collection
return frozenset(stop) | 8c47875f42fdfcb1f0b7c7c18b8363783f1ebcbb | 3,632,494 |
from typing import Callable
from typing import Any
def protect_with_lock() -> Callable:
""" This is a decorator for protecting a call of an object with a lock
The objects must adhere to the interface of having:
- A mapping of ids to query_lock objects
Objects adhering to this interface(LockableQueryObject) are:
- all the exchanges
- the Blockchain object
"""
def _cache_response_timewise(f: Callable) -> Callable:
@wraps(f)
def wrapper(wrappingobj: LockableQueryObject, *args: Any, **kwargs: Any) -> Any:
lock_key = _function_sig_key(f.__name__, *args, **kwargs)
with wrappingobj.query_locks_map_lock:
lock = wrappingobj.query_locks_map[lock_key]
with lock:
result = f(wrappingobj, *args, **kwargs)
return result
return wrapper
return _cache_response_timewise | 60901e775f1a6a8d3408000f3517f1c3ddedd199 | 3,632,495 |
from datetime import datetime
def _handle_dates():
"""Collect and return data information."""
currentdate = datetime.date.today()
year = currentdate.year
month = currentdate.month
print(f"[INFO] Current year / month: {year:04d} / {month:02d}")
return currentdate | 54735562fe0366f286eeac33f897a98a8d34374a | 3,632,496 |
def add_to_five(number):
"""Add to the 5 by any number.
:param number:
"""
gifs = FrozenGif()
acceptable = (int, float)
type_number = type(number)
if type_number in acceptable:
# get answer
answer = 5 + number
response = int(input("Enter the answer to 5 + {}? ".format(number)))
type_response = type(response)
wrong_response = "Sorry! 5 + {} is not equal to {}. Try again...".format(number, response)
if (type_response in acceptable) and (response == answer):
print('\n CORRECT!!! GREAT WORK!!!')
return gifs.walking()
else:
correct = 0
while correct < 5:
if (type_response in acceptable) and (response == answer):
print('\n CORRECT!!! GREAT WORK!!!')
return gifs.walking()
elif (type_response in acceptable) and (response != answer):
response = int(input(wrong_response))
correct += 1
else:
pass
print("Sorry, either try again or ask for help.")
return gifs.olaf_heart() | 0387af4a7d8587865c3c7e38eeab1af5a00423a2 | 3,632,497 |
def mergeticklists(list1, list2, mergeequal=1):
"""helper function to merge tick lists
- return a merged list of ticks out of list1 and list2
- CAUTION: original lists have to be ordered
(the returned list is also ordered)"""
# TODO: improve along the lines of http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/305269
# do not destroy original lists
list1 = list1[:]
i = 0
j = 0
try:
while 1: # we keep on going until we reach an index error
while list2[j] < list1[i]: # insert tick
list1.insert(i, list2[j])
i += 1
j += 1
if list2[j] == list1[i]: # merge tick
if mergeequal:
list1[i].merge(list2[j])
j += 1
i += 1
except IndexError:
if j < len(list2):
list1 += list2[j:]
return list1 | 6e89944cdeb1d74357cf8dc5a0a434db930d1cf0 | 3,632,498 |
def test_url(url, size=17):
"""Test whether the given URL is accessible."""
try:
with gopen(url) as stream:
data = stream.read(size)
if len(data) == size:
return True
return False
except Exception as e:
print(e)
return False | 13023fc5fd346572b3c31e4f4dd491e285e33b6b | 3,632,499 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.