content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
from typing import Type
def meet_types(s: Type, t: Type) -> ProperType:
"""Return the greatest lower bound of two types."""
if is_recursive_pair(s, t):
# This case can trigger an infinite recursion, general support for this will be
# tricky so we use a trivial meet (like for protocols).
return trivial_meet(s, t)
s = get_proper_type(s)
t = get_proper_type(t)
if isinstance(s, ErasedType):
return s
if isinstance(s, AnyType):
return t
if isinstance(s, UnionType) and not isinstance(t, UnionType):
s, t = t, s
return t.accept(TypeMeetVisitor(s)) | 627920538076f3e15bec341bd32035940f61013c | 3,635,900 |
def delete_context(id):
"""
Delete the requested context object.
This WILL delete the index, but any alias of the same name.
:param id: str, the unique ID of the requested Context
:return: aknowledge message as JSON
"""
# get the object
ctx = Context.get(id)
ctx.delete(delete_index=request.args.get('delete_index', True))
return jsonify(dict(
acknowleged=True,
message='context of ID=%s was deleted.' % id
)) | d14c21a5ba09794c32df331e629d0b3ee9c9f5da | 3,635,901 |
import os
import logging
import sys
def dosplot(filename=None, prefix=None, directory=None, elements=None,
lm_orbitals=None, atoms=None, subplot=False, shift=True,
total_only=False, plot_total=True, legend_on=True,
legend_frame_on=False, legend_cutoff=3., gaussian=None, height=6.,
width=8., xmin=-6., xmax=6., num_columns=2, colours=None, yscale=1,
xlabel='Energy (eV)', ylabel='Arb. units',
style=None, no_base_style=False,
image_format='pdf', dpi=400, plt=None, fonts=None):
"""A script to plot the density of states from a vasprun.xml file.
Args:
filename (:obj:`str`, optional): Path to a vasprun.xml file (can be
gzipped).
prefix (:obj:`str`, optional): Prefix for file names.
directory (:obj:`str`, optional): The directory in which to save files.
elements (:obj:`dict`, optional): The elements and orbitals to extract
from the projected density of states. Should be provided as a
:obj:`dict` with the keys as the element names and corresponding
values as a :obj:`tuple` of orbitals. For example, the following
would extract the Bi s, px, py and d orbitals::
{'Bi': ('s', 'px', 'py', 'd')}
If an element is included with an empty :obj:`tuple`, all orbitals
for that species will be extracted. If ``elements`` is not set or
set to ``None``, all elements for all species will be extracted.
lm_orbitals (:obj:`dict`, optional): The orbitals to decompose into
their lm contributions (e.g. p -> px, py, pz). Should be provided
as a :obj:`dict`, with the elements names as keys and a
:obj:`tuple` of orbitals as the corresponding values. For example,
the following would be used to decompose the oxygen p and d
orbitals::
{'O': ('p', 'd')}
atoms (:obj:`dict`, optional): Which atomic sites to use when
calculating the projected density of states. Should be provided as
a :obj:`dict`, with the element names as keys and a :obj:`tuple` of
:obj:`int` specifying the atomic indices as the corresponding
values. The elemental projected density of states will be summed
only over the atom indices specified. If an element is included
with an empty :obj:`tuple`, then all sites for that element will
be included. The indices are 0 based for each element specified in
the POSCAR. For example, the following will calculate the density
of states for the first 4 Sn atoms and all O atoms in the
structure::
{'Sn': (1, 2, 3, 4), 'O': (, )}
If ``atoms`` is not set or set to ``None`` then all atomic sites
for all elements will be considered.
subplot (:obj:`bool`, optional): Plot the density of states for each
element on separate subplots. Defaults to ``False``.
shift (:obj:`bool`, optional): Shift the energies such that the valence
band maximum (or Fermi level for metals) is at 0 eV. Defaults to
``True``.
total_only (:obj:`bool`, optional): Only extract the total density of
states. Defaults to ``False``.
plot_total (:obj:`bool`, optional): Plot the total density of states.
Defaults to ``True``.
legend_on (:obj:`bool`, optional): Plot the graph legend. Defaults
to ``True``.
legend_frame_on (:obj:`bool`, optional): Plot a frame around the
graph legend. Defaults to ``False``.
legend_cutoff (:obj:`float`, optional): The cut-off (in % of the
maximum density of states within the plotting range) for an
elemental orbital to be labelled in the legend. This prevents
the legend from containing labels for orbitals that have very
little contribution in the plotting range.
gaussian (:obj:`float`, optional): Broaden the density of states using
convolution with a gaussian function. This parameter controls the
sigma or standard deviation of the gaussian distribution.
height (:obj:`float`, optional): The height of the plot.
width (:obj:`float`, optional): The width of the plot.
xmin (:obj:`float`, optional): The minimum energy on the x-axis.
xmax (:obj:`float`, optional): The maximum energy on the x-axis.
num_columns (:obj:`int`, optional): The number of columns in the
legend.
colours (:obj:`dict`, optional): Use custom colours for specific
element and orbital combinations. Specified as a :obj:`dict` of
:obj:`dict` of the colours. For example::
{
'Sn': {'s': 'r', 'p': 'b'},
'O': {'s': '#000000'}
}
The colour can be a hex code, series of rgb value, or any other
format supported by matplotlib.
xlabel (:obj:`str`, optional): Label/units for x-axis (i.e. energy)
ylabel (:obj:`str`, optional): Label/units for y-axis (i.e. DOS)
yscale (:obj:`float`, optional): Scaling factor for the y-axis.
style (:obj:`list` or :obj:`str`, optional): (List of) matplotlib style
specifications, to be composed on top of Sumo base style.
no_base_style (:obj:`bool`, optional): Prevent use of sumo base style.
This can make alternative styles behave more predictably.
image_format (:obj:`str`, optional): The image file format. Can be any
format supported by matplotlib, including: png, jpg, pdf, and svg.
Defaults to pdf.
dpi (:obj:`int`, optional): The dots-per-inch (pixel density) for
the image.
plt (:obj:`matplotlib.pyplot`, optional): A
:obj:`matplotlib.pyplot` object to use for plotting.
fonts (:obj:`list`, optional): Fonts to use in the plot. Can be a
a single font, specified as a :obj:`str`, or several fonts,
specified as a :obj:`list` of :obj:`str`.
Returns:
A matplotlib pyplot object.
"""
if not filename:
if os.path.exists('vasprun.xml'):
filename = 'vasprun.xml'
elif os.path.exists('vasprun.xml.gz'):
filename = 'vasprun.xml.gz'
else:
logging.error('ERROR: No vasprun.xml found!')
sys.exit()
dos, pdos = load_dos(filename, elements, lm_orbitals, atoms, gaussian,
total_only)
save_files = False if plt else True # don't save if pyplot object provided
plotter = SDOSPlotter(dos, pdos)
plt = plotter.get_plot(subplot=subplot, width=width, height=height,
xmin=xmin, xmax=xmax, yscale=yscale,
colours=colours, plot_total=plot_total,
legend_on=legend_on, num_columns=num_columns,
legend_frame_on=legend_frame_on,
xlabel=xlabel, ylabel=ylabel,
legend_cutoff=legend_cutoff, dpi=dpi, plt=plt,
fonts=fonts, style=style,
no_base_style=no_base_style)
if save_files:
basename = 'dos.{}'.format(image_format)
filename = '{}_{}'.format(prefix, basename) if prefix else basename
if directory:
filename = os.path.join(directory, filename)
plt.savefig(filename, format=image_format, dpi=dpi,
bbox_inches='tight')
write_files(dos, pdos, prefix=prefix, directory=directory)
else:
return plt | 91cf886bad801150340507f046e5736bd241e909 | 3,635,902 |
def get_relative_percentage(new, last):
"""
:param new: float
New value
:param last: float
Last value
:return: float in [0, 100]
Percentage (with errors handling)
"""
ratio = get_ratio(new, last)
relative_ratio = ratio - 1.0
return 100.0 * relative_ratio | c563ec1406469acdf76d6052c2187fa20efe5bf2 | 3,635,903 |
def parse_data_name(line):
"""
Parses the name of a data item line, which will be used as an attribute name
"""
first = line.index("<") + 1
last = line.rindex(">")
return line[first:last] | 53a9c7e89f5fa5f47dad6bfc211d3de713c15c67 | 3,635,904 |
def api_error(api, error):
"""format error message for api error, if error is present"""
if error is not None:
return "calling: %s: got %s" % (api, error)
return None | a9269a93d51e3203646886a893998ffec6488c95 | 3,635,905 |
from datetime import datetime
import time
import re
def dailyImage(request,date=None):
""" returns daily image html page for date """
today = datetime.datetime.today()
earliest = Image.pub_dates.earliest()
if date is None:
this_date = datetime.datetime.strftime(today,'%F')
else:
try:
this_date = datetime.datetime(*(time.strptime(date, '%Y-%m-%d')[0:6]))
except:
msg = "invalid date"
this_date = today
# if date is in the future reset date to today
if this_date > today:
this_date = today
msg = '*** No peeking into the future! ***'
# if date is too far into the past reset to today
if this_date < earliest:
this_date = earliest
msg = "*** This is the earliest available image ***"
# get the prev and next dates as strings
prev = this_date - datetime.timedelta(days=1)
if prev < earliest:
prev = None
else:
prev = datetime.datetime.strftime(prev,'%F')
next = this_date + datetime.timedelta(days=1)
if next > today:
next = None
else:
next = datetime.datetime.strftime(next,'%F')
image = ImageForDate(this_date, today)
str_date = datetime.datetime.strftime(this_date,'%F')
# just before we pass it back, turn it into a date objects (a date with no time)
date = datetime.date.fromordinal(this_date.toordinal())
try:
caption_short = re.search("<p>(.*?)</p>", image.caption, re.DOTALL | re.UNICODE).group(1).strip()
except:
caption_short = image.caption
if request.GET.get('fmt') == 'json':
# partial caption :
json = {'name':image.name, 'more_info':image.more_info,'title':image.title,'caption':caption_short, 'jpg':image.jpg, 'str_date':str_date }
# full caption:
#json = {'name':image.name, 'more_info':image.more_info,'title':image.title,'caption':image.ca$
return HttpResponse(simplejson.dumps(json), mimetype='application/json')
if not image:
raise Http404
return render_to_response('dailyrings.html',locals(), context_instance=RequestContext(request)) | 9562fca76317614830f9940031c42efa6fa99e14 | 3,635,906 |
def get_depolarizing_channel(T, t_gate=10e-9):
"""Get the depolarizing channel
Args:
T (float): Decoherence parameter (seconds)
"""
assert T > 0
assert t_gate > 0
gamma = 1 - pow(np.e, -1 / T * t_gate)
noise_model = depolarize(gamma)
return noise_model | 22a42a2f72f1a0dc1a1c828d755b37148517302f | 3,635,907 |
import numpy
def censor_signal(signal, filtered_signal, fold, left_trim, right_trim_percent):
"""Finds points where residual is greater than the given fold, and trims
given number of point from the left, and some multiple of that from the right.
"""
ltrim = int(left_trim)
rtrim = int(left_trim * right_trim_percent)
indexes = numpy.arange(len(signal))
residual = signal - filtered_signal
threshold = float(fold) * numpy.std(residual)
censored = numpy.absolute(residual) > threshold
accepted = numpy.zeros(len(residual)) == 0
logger.info( "Censoring n dirty peaks: %d" % (len(numpy.where(censored)[0])) )
for i in numpy.where(censored)[0]:
for j in range(i - ltrim, i + rtrim):
if (j>=0 and j<len(accepted)): accepted[j] = False
return (signal[accepted], filtered_signal[accepted], indexes[accepted]) | 79c9c6f64d22c075b054aae9cb95102d89d77531 | 3,635,908 |
def get_choices(state, attribute_name):
"""
Return a list of the choices (excluding separators) in the
SelectionCallbackProperty.
"""
choices = []
labels = []
display_func = getattr(type(state), attribute_name).get_display_func(state)
if display_func is None:
display_func = str
for choice in getattr(type(state), attribute_name).get_choices(state):
if not isinstance(choice, ChoiceSeparator):
choices.append(choice)
labels.append(display_func(choice))
return choices, labels | 2716e3d850d62cf9018e9ec68e6db9cfe1fe721b | 3,635,909 |
def compatibility_factors_to_coo(ncf: dict, nreg: dict):
"""
ncf : nodal_compatibility_factors
"""
nN = len(ncf)
widths = np.zeros(nN, dtype=np.int32)
for iN in prange(nN):
widths[iN] = len(nreg[iN])
shapes = (widths**2).astype(np.int64)
N = np.sum(shapes)
data = np.zeros(N, dtype=np.float64)
rows = np.zeros(N, dtype=np.int32)
cols = np.zeros(N, dtype=np.int32)
c = 0
for iN in range(nN):
data[c: c + shapes[iN]] = flatten2dC(ncf[iN])
nNN = widths[iN]
for jNN in prange(nNN):
for kNN in prange(nNN):
rows[c + jNN*nNN + kNN] = nreg[iN][jNN]
cols[c + jNN*nNN + kNN] = nreg[iN][kNN]
c += shapes[iN]
return data, rows, cols | 92995712ff559c0b44f362c0c2c56b7a6891afeb | 3,635,910 |
from typing import Type
def combine_complex(output_arr_t):
"""
Returns a transformation that joins two real inputs into complex output
(1 output, 2 inputs): ``output = real + 1j * imag``.
"""
input_t = Type(dtypes.real_for(output_arr_t.dtype), shape=output_arr_t.shape)
return Transformation(
[Parameter('output', Annotation(output_arr_t, 'o')),
Parameter('real', Annotation(input_t, 'i')),
Parameter('imag', Annotation(input_t, 'i'))],
"""
${output.store_same}(
COMPLEX_CTR(${output.ctype})(
${real.load_same},
${imag.load_same}));
""") | 7bb5b0c2ad840487983afe8e0574bdce4cdf8774 | 3,635,911 |
def quantity_unwrapped(units, multiplier=1.0):
"""
A decorator for setters to extract the plain device value from the quantity.
"""
def wrap(f):
@wraps(f)
def wrapped(self, value):
value.assert_dimensions(units)
return f(self, value.value * multiplier)
return wrapped
return wrap | 8fcbdcd52ed2538d6ec7fcc8f8986e0047909b7e | 3,635,912 |
import torch
def displacement_error(pred_fut_traj, fut_traj, consider_ped=None, mode="sum"):
"""
Compute ADE
Input:
- pred_fut_traj: Tensor of shape (seq_len, batch, 2). Predicted trajectory. [12, person_num, 2]
- fut_traj: Tensor of shape (seq_len, batch, 2). Groud truth future trajectory.
- consider_ped: Tensor of shape (batch)
- mode: Can be one of sum, raw
Output:
- loss: gives the Euclidean displacement error
"""
loss = (fut_traj.permute(1, 0, 2) - pred_fut_traj.permute(1, 0, 2))**2
if consider_ped is not None:
loss = torch.sqrt(loss.sum(dim=2)).sum(dim=1) * consider_ped
else:
loss = torch.sqrt(loss.sum(dim=2)).sum(dim=1)
if mode == "sum":
return torch.sum(loss)
elif mode == "mean":
return torch.mean(loss)
elif mode == "raw":
return loss | 8ae4c0d9b10b7f0f45b9716a86f02b2b0e66ea71 | 3,635,913 |
import _json
def __json(informations: _Dict[int, ErrorInfo]) -> str:
"""制作 json 格式的数据"""
errcodes: _Dict[int, _Dict[str, str]] = dict()
for code, info in informations.items():
errcodes[code] = dict(info._asdict())
return _json.dumps(errcodes, indent=4, sort_keys=True, ensure_ascii=False) | 8f3c31a978d27e1874d1082b7458cabbe0764251 | 3,635,914 |
import random
def get_average_pairwise_distance(df, n=1000):
"""Get average pairwise distance for a group of embeddings. Smaller number means closer together.
With n=1000, this function is generally precise within 0.01
Parameters
----------
df: DataFrame
input data
n: int
number of random pairs to calculate distances for
Returns
-------
float
the average pairwise cosine distance between the random pairs in df
"""
l1 = range(len(df))
l2 = range(len(df))
distances = []
for i in range(n):
i1, i2 = random.choice(l1), random.choice(l2)
distances.append(cosine(df["embedding"].iloc[i1], df["embedding"].iloc[i2]))
return np.mean(distances) | 70f7c547c87718fee03aa6a413dc0c4b0072ab19 | 3,635,915 |
def thorpe_scales(S, T, p, lat, lon, axis=-1):
"""
Thorpe scales simplified for estimating dissipation rates
Parameters
----------
S : Practical Salinity
T : Practical Temperature
P : Pressure Measurements -- NOT A NORMALIZED PRESSURE GRID
lat : Latitude
lon : Longitude
Returns
-------
thorpe: Thorpe Scale estimated from vertical displacement RMS
"""
rho = rhoFromCTD(S, T, p, lat, lon)
order = np.argsort(rho, axis=axis)
displacements = []
for i, p_in in enumerate(p):
displacements.append(p[order[i]] - p)
displacements = np.vstack(displacements)
thorpe = np.sqrt(np.mean(displacements**2))
return thorpe | 7d8f8d711e081e3fe58b25508a1b62c576eb4062 | 3,635,916 |
from typing import Dict
def v() -> Dict[TState, float]:
"""Initial state values for use in tests."""
return {"A": 3.0, "B": 1.0, "C": 0.0} | 5bcda636a3b94e5204e2ed5adde641862b87a240 | 3,635,917 |
import os
def GetSvnInfo():
"""Returns the project name and the current SVN workspace's root path."""
for line in GetCommandOutput('svn info .'):
m = _SVN_INFO_URL_RE.match(line)
if m:
project = m.group(1) # googletest or googlemock
rel_path = m.group(2)
root = os.path.realpath(rel_path.count('/') * '../')
return project, root
return None, None | f28011cf048d4c6bc63da4b2158a9b08777bef15 | 3,635,918 |
def iter_listeners(event):
"""Return an iterator for all the listeners for the event provided."""
ctx = stack.top
return ctx.app.extensions.get('plugin_manager')._event_manager.iter(event) | 1774062c69647beb703a5a4a087c362703515eb7 | 3,635,919 |
def _compute_box_size(topology, density):
"""
Lookup the masses in the `topology`
and compute a cubic box that matches
the `density` given the number of molecules
defined in the topology. Units are nm
Parameters:
-----------
topology: :class:`polyply.src.topology`
density: float
target density
Returns:
--------
float
the edge length of cubix box
"""
total_mass = 0
for meta_molecule in topology.molecules:
molecule = meta_molecule.molecule
for node in molecule.nodes:
if 'mass' in molecule.nodes[node]:
total_mass += molecule.nodes[node]['mass']
else:
try:
atype = molecule.nodes[node]["atype"]
total_mass += topology.atom_types[atype]['mass']
except KeyError as error:
msg = ("Trying to compute system density, but cannot "
"find mass of atom {} with type {} in topology.")
atom = molecule.nodes[node]["atomname"]
raise KeyError(msg.format(atom, atype)) from error
# amu -> kg and cm3 -> nm3
# conversion = 1.6605410*10**-27 * 10**27
box = (total_mass*1.6605410/density)**(1/3.)
return box | f4ae82e174b2d0673c331a521b2caec91f94af8f | 3,635,920 |
def build_template(ranges, template, build_date, use_proxy=False, redir_target=""):
"""
Input: output of process_<provider>_ranges(), output of get_template()
Output: Rendered template string ready to write to disk
"""
return template.render(
ranges=ranges["ranges"],
header_comments=ranges["header_comments"],
build_date=build_date,
use_proxy=use_proxy,
redir_target=redir_target,
) | ec10897cb6f92e2b927f4ef84511a7deab8cd37d | 3,635,921 |
def _get_named_value(obj, name):
""" Utility function that returns the value of the given copasi object
:param obj: a copasi object, that could be a compartment, species, parameter, reaction
:param name: the reference name to return
:return:
"""
is_metab = isinstance(obj, COPASI.CMetab)
is_reaction = isinstance(obj, COPASI.CReaction)
is_model = isinstance(obj, COPASI.CModel)
is_cparam = isinstance(obj, COPASI.CCopasiParameter)
value = None
if is_reaction:
value = {
'Flux': obj.getFlux(),
'ParticleFlux': obj.getParticleFlux(),
}.get(name, None)
elif is_metab:
value = {
'ParticleNumber': obj.getValue(),
'ParticleNumberRate': obj.getRate(),
'InitialParticleNumber': obj.getInitialValue(),
'InitialConcentration': obj.getInitialConcentration(),
'Concentration': obj.getConcentration(),
'Rate': obj.getConcentrationRate(),
}.get(name, None)
elif is_model:
value = {
'Time': obj.getValue(),
}.get(name, None)
elif is_cparam:
# this will be the case if it is a local parameter
parent = obj.getObjectParent().getObjectParent()
assert (isinstance(parent, COPASI.CReaction))
value = parent.getParameterValue(obj.getObjectName())
if pd.isna(value):
value = {
'Time': obj.getValue(),
'Volume': obj.getValue(),
'Value': obj.getValue(),
'Rate': obj.getRate(),
'InitialValue': obj.getInitialValue(),
'InitialVolume': obj.getInitialValue(),
'InitialParticleNumber': obj.getInitialValue(),
}.get(name, None)
return value | a69bf15a9ed78da02687172e5e4cc38adbf4415c | 3,635,922 |
def lang_add(cursor, lang, trust):
"""Adds language for db"""
if trust:
query = 'CREATE TRUSTED LANGUAGE "%s"' % lang
else:
query = 'CREATE LANGUAGE "%s"' % lang
executed_queries.append(query)
cursor.execute(query)
return True | c7157d83aab143b4628dc38f64abcfb84cbacbf1 | 3,635,923 |
import textwrap
def _strip_and_dedent(s):
"""For triple-quote strings"""
return textwrap.dedent(s.lstrip('\n').rstrip()) | 8d392daede103cb2a871b94d415c705fa51d7cef | 3,635,924 |
def calculate_stress_by_matrix_rotation(wlsq_strain, U):
"""
Calculate the intra-granular stresses by applying the sample system stiffnes matrix for a given grain and z-slice
to the corresponding strains.
:param wlsq_strain: Strains as a list of numpy arrays, where each list contains a strain component. The order of
the strains are ["XX", "YY", "ZZ", "YZ", "XZ", "XY"].
:type wlsq_strain: list[ndarray]
:param U: The orientation matrix for a given grain and z-slice.
:type U: ndarray
:return: The intra-granular stresses in the same format as the provided intragranular strains.
:rtype: list[ndarray]
"""
# Get the stiffness matrix as measured in the grain coordinate system
C = alpha_quartz_stiffness()
# Rotate the stiffness matrix by the grain orientation matrix
C = transform_stiffness(U, C)
# Stack the strain vectors into a matrix, where each row contains the strain components for a certain element in
# the mesh which the stress will be plotted on. Make an empty matrix for the stress vectors.
strain_mat = np.column_stack(wlsq_strain)
stress_mat = np.zeros_like(strain_mat)
# Exract a row from the strain matrix, multiply the shear strain components by 2 to obtain the engineeering shear
# strain which is compatible with the Voigt notation.
for i in range(np.size(strain_mat, 0)):
strain_vector = strain_mat[i, :]
strain_vector[3:6] *= 2
# Apply the stiffness matrix to get the stress vectors and stack the stress vectors in a matrix.
stress_mat[i, :] = C @ strain_vector
# Split the stress matrix to give it the same format as wlsq_strains.
wlsq_stress = np.hsplit(stress_mat, 6)
for i, arr in enumerate(wlsq_stress):
wlsq_stress[i] = arr.reshape((-1))
return wlsq_stress | 88a570dcae97de0ac0ee746af805f2b90a1a7c7c | 3,635,925 |
def fundamentals_dataframe(
timeframe: Timeframe, stock: str, ld: LazyDictionary
) -> pd.DataFrame:
"""Return a dict of the fundamentals plots for the current django template render to use"""
df = ld["stock_df"]
# print(df)
df["change_in_percent_cumulative"] = df[
"change_in_percent"
].cumsum() # nicer to display cumulative
df = df.drop("change_in_percent", axis=1)
df["volume"] = df["last_price"] * df["volume"] / 1000000 # again, express as $(M)
df["market_cap"] /= 1000 * 1000
df["number_of_shares"] /= 1000 * 1000
df["fetch_date"] = pd.to_datetime(df.index, format="%Y-%m-%d")
# print(df.shape)
df = df.set_index("fetch_date")
df = df.resample(
"B"
).asfreq() # fill gaps in dataframe with business day dates only
# with pd.option_context('display.max_rows', None, 'display.max_columns', None):
# print(df)
df["fetch_date"] = pd.to_datetime(df.index, format="%Y-%m-%d")
return df | 52883783049c3f0e097f61334b67ec6aa085c8cc | 3,635,926 |
def returns_normally(expr):
"""For use inside `test[]` and its sisters.
Assert that `expr` runs to completion without raising or signaling.
Usage::
test[returns_normally(myfunc())]
"""
# The magic is, `test[]` lifts its expr into a lambda. When the test runs,
# our arg gets evaluated first, and then its value is passed to us.
#
# To make the test succeed whenever `unpythonic.syntax.testingtools._observe`
# didn't catch an unexpected signal or exception in `expr`, we just ignore
# our arg, and:
return True | 469447c704247f46a0cebf1b582c08d36f53383f | 3,635,927 |
from datetime import datetime
def verify_presentation_state_content(presentation_state):
"""Helper function to verify that the content in presentation_state is appropriate
to create a basic text SR object
----------
Parameters
"""
if not isinstance(presentation_state, dict):
print("instance is expected to be a dict")
return
if "SOPClassUID" not in presentation_state:
presentation_state["SOPClassUID"] = "1.2.840.10008.5.1.4.1.1.11.2"
if "SOPInstanceUID" not in presentation_state:
presentation_state["SOPInstanceUID"] = pydicom.uid.generate_uid()
if "InstanceNumber" not in presentation_state:
presentation_state["InstanceNumber"] = "1"
if "ContentDescription" not in presentation_state:
presentation_state["ContentDescription"] = ""
if "ContentCreatorName" not in presentation_state:
presentation_state["ContentCreatorName"] = ""
if "PresentationCreationDate" not in presentation_state:
presentation_state["PresentationCreationDate"] = datetime.datetime.now().strftime("%Y%m%d")
if "PresentationCreationTime" not in presentation_state:
presentation_state["PresentationCreationTime"] = datetime.datetime.now().strftime("%H%M%S")
return presentation_state | e495ff51854a18a5c87be3a3734aac0f6a671cf7 | 3,635,928 |
def getProfile(request):
"""
Return MY profile for editing.
"""
profile = UserProfile.objects.get(
user=request.user
)
return profile.ajax() | fbb6859b3448387e96f698148899d24e1d7fb20d | 3,635,929 |
import re
def finder(input, collection, fuzzy=False, accessor=lambda x: x):
"""
Args:
input (str): A partial string which is typically entered by a user.
collection (iterable): A collection of strings which will be filtered
based on the `input`.
fuzzy (bool): perform a fuzzy search (default=False)
Returns:
suggestions (generator): A generator object that produces a list of
suggestions narrowed down from `collection` using the `input`.
"""
suggestions = []
input = str(input) if not isinstance(input, str) else input
pat = input
if fuzzy:
pat = ".*?".join(map(re.escape, input))
regex = re.compile(pat, re.IGNORECASE)
for item in collection:
r = regex.search(accessor(item))
if r:
suggestions.append((len(r.group()), r.start(), accessor(item), item))
return (z[-1] for z in sorted(suggestions)) | 1bbe22f6b38f447f20071bd810cfee6e4e491f5f | 3,635,930 |
def calc_rets(returns, weights):
"""
Calculate continuous return series for futures instruments. These consist
of weighted underlying instrument returns, who's weights can vary over
time.
Parameters
----------
returns: pandas.Series or dict
A Series of instrument returns with a MultiIndex where the top level is
pandas.Timestamps and the second level is instrument names. Values
correspond to one period instrument returns. returns should be
available for all for all Timestamps and instruments provided in
weights. If dict is given this should be a dict of pandas.Series in the
above format, with keys which are a subset of the keys given in weights
weights: pandas.DataFrame or dict
A DataFrame of instrument weights with a MultiIndex where the top level
contains pandas.Timestamps and the second level is instrument names.
The columns consist of generic names. If dict is given this should be
a dict of pandas.DataFrame in the above format, with keys for different
root generics, e.g. 'CL'
Returns
-------
A pandas.DataFrame of continuous returns for generics. The index is
pandas.Timestamps and the columns is generic names, corresponding to
weights.columns
Examples
--------
>>> import pandas as pd
>>> import mapping.util as util
>>> idx = pd.MultiIndex.from_tuples([(pd.Timestamp('2015-01-02'), 'CLF5'),
... (pd.Timestamp('2015-01-03'), 'CLF5'),
... (pd.Timestamp('2015-01-03'), 'CLG5'),
... (pd.Timestamp('2015-01-04'), 'CLF5'),
... (pd.Timestamp('2015-01-04'), 'CLG5'),
... (pd.Timestamp('2015-01-05'), 'CLG5')])
>>> price = pd.Series([45.63, 45.85, 46.13, 46.05, 46.25, 46.20], index=idx)
>>> vals = [1, 1/2, 1/2, 1]
>>> widx = pd.MultiIndex.from_tuples([(pd.Timestamp('2015-01-03'), 'CLF5'),
... (pd.Timestamp('2015-01-04'), 'CLF5'),
... (pd.Timestamp('2015-01-04'), 'CLG5'),
... (pd.Timestamp('2015-01-05'), 'CLG5')])
>>> weights = pd.DataFrame(vals, index=widx, columns=["CL1"])
>>> irets = price.groupby(level=-1).pct_change()
>>> util.calc_rets(irets, weights)
""" # NOQA
if not isinstance(returns, dict):
returns = {"": returns}
if not isinstance(weights, dict):
weights = {"": weights}
generic_superset = []
for root in weights:
generic_superset.extend(weights[root].columns.tolist())
if len(set(generic_superset)) != len(generic_superset):
raise ValueError("Columns for weights must all be unique")
_check_indices(returns, weights)
grets = []
cols = []
for root in returns:
root_wts = weights[root]
root_rets = returns[root]
for generic in root_wts.columns:
gnrc_wts = root_wts.loc[:, generic]
# drop generics where weight is 0, this avoids potential KeyError
# in later indexing of rets even when ret has weight of 0
gnrc_wts = gnrc_wts.loc[gnrc_wts != 0]
rets = root_rets.loc[gnrc_wts.index]
# groupby time
group_rets = (rets * gnrc_wts).groupby(level=0)
grets.append(group_rets.apply(pd.DataFrame.sum, skipna=False))
cols.extend(root_wts.columns.tolist())
rets = pd.concat(grets, axis=1, keys=cols).sort_index(axis=1)
return rets | 2974b02c82038d20982a6bb974912913ece61e89 | 3,635,931 |
def testMaze(n_training_trials, n_navigation_trials):
"""
No comments here. Look at single_maze_learning_agent.py for more details!
"""
ValueLearning.DBG_LVL = 0
move_distance = 0.29
nx = 6
ny = 6
n_fields = round(1.0 * (nx + 3) * (ny+3))
Hippocampus.N_CELLS_PER_FIELD = 4
n_cells = Hippocampus.N_CELLS_PER_FIELD * n_fields
n_alternations = 1
max_nav_steps = 400
max_train_steps = 4000
# First Environment: Has its own place cells and place fields
env_E1 = Environment.RandomGoalOpenField(nx, ny, move_distance)
canvas_E1 = Graphics.WallMazeCanvas(env_E1)
place_fields_E1 = Hippocampus.setupPlaceFields(env_E1, n_fields)
place_cells_E1 = Hippocampus.assignPlaceCells(n_cells, place_fields_E1)
# Create empty actors and critics
actor = Agents.Actor(env_E1.getActions(), n_cells)
critic = Agents.Critic(n_cells)
# Second Environment: This has a different set (but the same number) of
# place fields and place cells
nx = 6
ny = 6
lp_wall = Environment.Wall((0,3), (3,3))
rp_wall = Environment.Wall((4,3), (6,3))
env_E2 = Environment.MazeWithWalls(nx, ny, [lp_wall, rp_wall], move_distance)
canvas_E2 = Graphics.WallMazeCanvas(env_E2)
place_fields_E2 = Hippocampus.setupPlaceFields(env_E2, n_fields)
place_cells_E2 = Hippocampus.assignPlaceCells(n_cells, place_fields_E2)
learning_steps_E1 = np.zeros((n_training_trials, 1), dtype=float)
learning_steps_E2 = np.zeros((n_training_trials, 1), dtype=float)
for alt in range(n_alternations):
print('Alternation: %d' % alt)
# First look at the performance of the agent in the task before it is
# allowed to learn anything. Then allow learning
print('Learning Environment A')
(actor, critic, steps_E1) = ValueLearning.learnValueFunction(n_training_trials, env_E1, place_cells_E1, actor, critic, max_train_steps)
learning_steps_E1 = steps_E1
print('Learning Environment B')
(actor, critic, steps_E2) = ValueLearning.learnValueFunction(n_training_trials, env_E2, place_cells_E2, actor, critic, max_train_steps)
learning_steps_E2 = steps_E2
# canvas_E1.plotValueFunction(place_cells_E1, critic)
# canvas_E2.plotValueFunction(place_cells_E2, critic)
# Plot a histogram of the weights
# Critic
# critic_weights = np.reshape(critic.getWeights(), -1)
# Graphics.histogram(critic_weights)
"""
# Actor
actor_weights = np.reshape(actor.getWeights(), -1)
Graphics.histogram(actor_weights)
"""
# After alternation, check the behavior on both the tasks
n_trials = n_navigation_trials
ValueLearning.DBG_LVL = 0
print('Navigating Environment A')
navigation_steps_E1 = ValueLearning.navigate(n_trials, env_E1, place_cells_E1, actor, critic, max_nav_steps)
print('Navigating Environment B')
navigation_steps_E2 = ValueLearning.navigate(n_trials, env_E2, place_cells_E2, actor, critic, max_nav_steps)
return (learning_steps_E1, learning_steps_E2, navigation_steps_E1, navigation_steps_E2) | f35093e5ee7f00aa547ce9a377f7d6d399070d76 | 3,635,932 |
def group_calc(df, single_group_func, group_col, sort_col=None,
exclude_groups=None):
"""Performs a general function on a dataframe with distinct subsets.
In a tidy, time series dataframe with multiple groups, say states, age
groups, etc. there is often a need to perform an operation on each group
individually. This function takes in a dataframe with distinct groups,
performs a desired operation on each group, and puts the dataframe back
together.
Args:
df: A tidy pandas dataframe which can be broken into non overlapping
subgroups.
single_group_func: A function to be performed on a single group. This
function must accept two parameters: The first parameter is a
dataframe where each line belongs to a single group, such as the
subset containing just entries for California, or just the 18-40
year old population. The second parameter is the name of the group.
Following our previous example, this would be "California" or
"18 to 40".
group_col: The column in df identifying the subgroups, perhaps "State"
or "Age Group".
sort_col: An optional argument identifying the column by which to sort
the dataframe once the individual groups are combined.
exclude_groups: A list of groups to exclude from the calculations.
Returns:
A copy of the original dataframe where the single_group_func is applied
to each group. Depending on the function, this may add additional
columns.
"""
if exclude_groups is None:
exclude_groups = []
indiv_groups = []
for group in df[group_col].unique():
if group in exclude_groups:
continue
df_group = df[df[group_col] == group]
indiv_groups.append(single_group_func(df_group, group))
sort_list = group_col if sort_col is None else [sort_col, group_col]
df = pd.concat(indiv_groups).sort_values(sort_list)
return df[df[group_col].notna()].reset_index(drop=True).copy() | b33708e5d704b0de526ef27032aeaedc700f5bd5 | 3,635,933 |
def str2int(video_path):
"""
argparse returns and string althout webcam uses int (0, 1 ...)
Cast to int if needed
"""
try:
return int(video_path)
except ValueError:
return video_path | 2d4714ec53304fb6cafabd5255a838b478780f8a | 3,635,934 |
def describe_inheritance_rule(rule):
"""
Given a dictionary representing a koji inheritance rule (i.e., one of the
elements of getInheritanceData()'s result), return a tuple of strings to be
appended to a module's stdout_lines array conforming to the output of
koji's taginfo CLI command, e.g.:
0 .... a-parent-tag
10 M... another-parent-tag
maxdepth: 1
100 .F.. yet-another-parent-tag
package filter: ^prefix-
"""
# koji_cli/commands.py near the end of anon_handle_taginfo()
flags = '%s%s%s%s' % (
'M' if rule['maxdepth'] not in ('', None) else '.',
'F' if rule['pkg_filter'] not in ('', None) else '.',
'I' if rule['intransitive'] else '.',
'N' if rule['noconfig'] else '.',
)
result = ["%4d %s %s" % (rule['priority'], flags, rule['name'])]
if rule['maxdepth'] not in ('', None):
result.append(" maxdepth: %d" % rule['maxdepth'])
if rule['pkg_filter'] not in ('', None):
result.append(" package filter: %s" % rule['pkg_filter'])
return tuple(result) | 32eae010365d8fd5b253f23acf8104932773c7c1 | 3,635,935 |
def plot_cv_indices(cv, X, y, ax, lw=50):
"""Create a sample plot for indices of a cross-validation object."""
splits = list(cv.split(X=X, y=y))
n_splits = len(splits)
# Generate the training/testing visualizations for each CV split
for ii, (train, test) in enumerate(splits):
# Fill in indices with the training/test groups
indices = np.zeros(shape=X.shape[0], dtype=np.int32)
indices[train] = 1
# Visualize the results
ax.scatter(range(len(indices)), [ii + .5] * len(indices),
c=indices, marker='_', lw=lw, cmap=cmap_cv,
vmin=-.2, vmax=1.2)
# Formatting
yticklabels = list(range(n_splits))
ax.set(yticks=np.arange(n_splits) + .5,
yticklabels=yticklabels, xlabel='Sample index',
ylabel="CV iteration", ylim=[n_splits + .2,
-.2], xlim=[0, 100])
ax.set_title('{}'.format(type(cv).__name__), fontsize=15)
return ax | 96102c6e886cecdd3b16380c58cb2596119c59f2 | 3,635,936 |
def find_colour(rgb):
"""Compare given rgb triplet to predefined colours to find the closest one"""
# this cannot normally happen to an image that is processed automatically, since colours
# are rbg by default, but it can happen if the function is called with invalid values
if rgb[0] < 0 or rgb[0] > 255 or rgb[1] < 0 or rgb[1] > 255 or rgb[2] < 0 or rgb[2] > 255:
return "part of the rgb triplet was invalid"
# dictionary of predefined colours
colours = {
(255, 0, 0): "red",
(255, 100, 100): "red",
(200, 100, 100): "red",
(150, 0, 0): "red",
(150, 50, 50): "red",
(50, 0, 0): "red",
(0, 255, 0): "green",
(100, 255, 100): "green",
(100, 200, 100): "green",
(0, 150, 0): "green",
(50, 150, 50): "green",
(0, 50, 0): "green",
(0, 0, 255): "blue",
(100, 100, 255): "blue",
(100, 100, 200): "blue",
(0, 0, 150): "blue",
(50, 50, 150): "blue",
(0, 0, 50): "blue",
(255, 255, 0): "yellow",
(255, 255, 100): "yellow",
(200, 200, 100): "yellow",
(150, 150, 0): "yellow",
(150, 150, 50): "yellow",
(50, 50, 0): "yellow",
(247, 248, 232): "yellow", # light yellow colour used on most of the map
(233, 231, 182): "yellow", # darker yellow used in some places
(255, 0, 255): "magenta",
(255, 100, 255): "magenta",
(200, 100, 200): "magenta",
(150, 0, 150): "magenta",
(150, 50, 150): "magenta",
(50, 0, 50): "magenta",
(0, 255, 255): "teal",
(100, 255, 255): "teal",
(100, 200, 200): "teal",
(0, 150, 150): "teal",
(50, 150, 150): "teal",
(0, 50, 50): "teal",
(232, 248, 248): "teal", # light blue-ish colour used for water in some places
(255, 255, 255): "white",
(0, 0, 0): "black"
}
# calculate euclidean distance to all of the predefined colours
# pick the closest one
# note: 30000 was arbitrarily chosen as a threshold for a "close enough" colour
# i.e. if a distance is greater than that it cannot reasonably be considered closest,
# even if it is the smallest distance, though it should be quite unlikely to happen,
# due to the number of predefined colours
min_dist = 30000
nearest_colour = ""
for colour in colours:
# euclidean distance
dist = pow((colour[0] - rgb[0]), 2) + pow((colour[1] - rgb[1]), 2) + pow(
(colour[2] - rgb[2]), 2)
if dist < min_dist:
min_dist = dist
nearest_colour = colours[colour]
# colour is considered gray if the r g b values are all within 10 of each other
gray = 1
differences = [abs(rgb[0] - rgb[1]), abs(rgb[1] - rgb[2]), abs(rgb[2] - rgb[1])]
for diff in differences:
if diff > 10:
gray = 0
if gray == 1 and rgb[0] != 0 and rgb[1] != 0 and rgb[2] != 0\
and rgb[0] != 255 and rgb[1] != 255 and rgb[2] != 255:
return "gray"
return nearest_colour | f2c6e2b7daa7fd45411376cfc64409486c18e252 | 3,635,937 |
def log_sum_exp(input, dim=None, keepdim=False):
"""Numerically stable LogSumExp.
Args:
input (Tensor)
dim (int): Dimension along with the sum is performed
keepdim (bool): Whether to retain the last dimension on summing
Returns:
Equivalent of log(sum(exp(inputs), dim=dim, keepdim=keepdim)).
"""
# For a 1-D array x (any array along a single dimension),
# log sum exp(x) = s + log sum exp(x - s)
# with s = max(x) being a common choice.
if dim is None:
input = input.view(-1)
dim = 0
max_val = input.max(dim=dim, keepdim=True)[0]
output = max_val + (input - max_val).exp().sum(dim=dim, keepdim=True).log()
if not keepdim:
output = output.squeeze(dim)
return output | c9c867d9d81191922a56716dab128ea71821a638 | 3,635,938 |
import torch
def train_epoch_ch3(net, train_iter, loss, updater): # @save
"""训练模型一个迭代周期(定义见第3章)"""
# 将模型设置为训练模式
if isinstance(net, torch.nn.Module):
net.train()
# 训练损失总和、训练准确度总和、样本数
metric = Accumulator(3)
for X, y in train_iter:
# 计算梯度并更新参数
y_hat = net(X)
l = loss(y_hat, y)
if isinstance(updater, torch.optim.Optimizer):
# 使用PyTorch内置的优化器和损失函数
updater.zero_grad()
l.mean().backward()
updater.step()
else:
# 使用定制的优化器和损失函数
l.mean().backward()
updater(X.shape[0])
metric.add(float(l.sum()), accuracy(y_hat, y), y.numel())
# 返回训练损失和训练精度
return metric[0] / metric[2], metric[1] / metric[2] | 2c25a4e1e48e867c51d395deb58436df981b6d77 | 3,635,939 |
def get_defined_names_for_position(scope, position=None, start_scope=None):
"""
Return filtered version of ``scope.get_defined_names()``.
This function basically does what :meth:`scope.get_defined_names
<parsing_representation.Scope.get_defined_names>` does.
- If `position` is given, delete all names defined after `position`.
- For special objects like instances, `position` is ignored and all
names are returned.
:type scope: :class:`parsing_representation.IsScope`
:param scope: Scope in which names are searched.
:param position: the position as a line/column tuple, default is infinity.
"""
names = scope.get_defined_names()
# Instances have special rules, always return all the possible completions,
# because class variables are always valid and the `self.` variables, too.
if (not position or isinstance(scope, (er.Array, er.Instance))
or start_scope != scope
and isinstance(start_scope, (pr.Function, er.Execution))):
return names
names_new = []
for n in names:
if n.start_pos[0] is not None and n.start_pos < position:
names_new.append(n)
return names_new | 82bb80c271cfe450ca3aa67bb3ccfbff73ae5212 | 3,635,940 |
from typing import List
def intermediate(
raw_seqs: List[str],
can_seqs: List[str],
indices: List[int],
seq_idx: int,
subjs: dict,
ents: dict,
kb: dict = None,
sep_char: str = SEP_CHAR,
join_char: str = JOIN_SUBJ_CHAR,
canon_start_char: str = CANON_START_CHAR,
trg: bool = True,
):
"""
# Input
For a given batch sequence of tokens delimited into individual sentences by sep_char
and a seq_idx indexing the seq_idx'th trg (or source) sentence "of interest" within them,
# Function
tries to change the labels of all entities in that sentence
from @time or @poi_type to @meeting_time or @pizza_hut_poi_type
# Search strategy
by searching greedily from sentence to sentence for subject contenders
in the order: descend into history first, then ascend to future:
seq_idx => seq_idx - 1 => ... => 0 => seq_idx + 1 => seq_idx + 2 => ... => max_batch
once a subject has been found we greedily take it and label all of our entities with it
once multiple subjects are found we are fucked and need to use more heuristics:
* look up addresses in entity dictionary
* TODO figure out what to do with weather
* FIXME why dont I just look at KB?
Algo:
1. check which domain we are in using subjs on all matches => check we dont get multiple domain options
2. Procedure depends on domain:
* traffic: look up POI in ents dict => EZ
* weather:
* calendar: probably only got one contender most of the time anyways
:param raw_seqs: last src and last target sequence of given batch (src is concatenation of dialogue history, so last src + trg contain everything) (list of strings)
:param can_seqs: output of canonize_seq on raw_seqs
:param indices: surjective but non injective mapping of raw tokens to canonicals
:param matches: matches output of canonize_seq on raw_seqs
:param seq_idx: which sequence in dialogue history we interested in?
:param subjs: subj dict to look up which attributes are contenders for subject
:param ents: kvret_entities_altered.json dict
:param join_char:
:param canon_start_char:
:param trg: bool whether to look at seq_idx'th trg sequence or, if False, at seq_idx'th source seq of batch
"""
if not isinstance(subjs, defaultdict):
assert type(subjs) == dict, type(subjs)
subjs = defaultdict(lambda: None,subjs)
for key, val in subjs.items():
if not key.startswith(CANON_START_CHAR):
del subjs[key]
subjs[CANON_START_CHAR+key] = val
# t(batch) setup
seqs_raw_separated = [[]]
seqs_separated_start_indices = [0]
for i, tok in enumerate(raw_seqs):
if tok == sep_char:
seqs_raw_separated += [[]]
seqs_separated_start_indices += [i+1]
else:
seqs_raw_separated[-1] += [tok]
seqs_separated_start_indices += [len(raw_seqs)]
global_can_seqs_bin = dict()# index this to get num of sequence or hit a None mine if u index at sep_char u noob lmao
global_offsets = dict() # start offsets of canonical sequences
rels_vals_per_seq = dict() # dict of seq idx : rels_vals dict for all visited sequences
subject_mapping = dict() # this should be set at end of while loop; otherwise no subject appeared in entire batch
subject_dict = None
# procedure: look at sequences in the order seq_idx[trg], seq_idx[src], seq_idx-1[trg],seq_idx-1[src],...,0[src]; then ascending afterwards
direction = -1 # start while loop in descending order, then ascend after hitting first src
cache_trg = trg
seq_offset = (seq_idx*2)+int(cache_trg)
seq_offset_cache = seq_offset
while seq_offset < len(raw_seqs):
look_at_seq = (seq_offset//2)
# input((seq_idx, seq_offset, look_at_seq))
raw_seq = seqs_raw_separated[seq_offset]
raw_seq_start_idx = seqs_separated_start_indices[seq_offset]
raw_seq_end_idx = seqs_separated_start_indices[seq_offset+1]-2 # leave out delimiting “@DOT” sep_char
can_seq = can_seqs[indices[raw_seq_start_idx]:indices[raw_seq_end_idx]+1] # inklusionserhaltende abb
local_indices = [idx - indices[raw_seq_start_idx] for idx in indices[raw_seq_start_idx:raw_seq_end_idx+1]]
assert local_indices[0] == 0, (can_seq, indices[raw_seq_start_idx:raw_seq_end_idx+1], raw_seq_start_idx, raw_seq_end_idx)
# input((raw_seq, can_seq))
# start procedure: try to find subject indices in this sequence
entity_indices_local, domains, domains_vals, rels_vals, subj_indices_local = find_subjs_in_seq(
raw_seq=raw_seq,
can_seq=can_seq,
indices=local_indices,
subjs=subjs
)
# cache vars for all visited sequences:
global_offsets[seq_offset] = indices[raw_seq_start_idx]
rels_vals_per_seq[seq_offset] = rels_vals
for i in range(indices[raw_seq_start_idx], indices[raw_seq_end_idx+1]):
global_can_seqs_bin[i] = seq_offset
# cache vars for the sequence of interest (first one)
if trg == cache_trg and look_at_seq == seq_idx:
can_seq_of_interest = can_seq
entity_indices_local_of_interest = entity_indices_local
rels_vals_of_interest = rels_vals
# try to look up subject mapping in KB
# heuristic switch case
# every case needs to set subject_mapping to dict of entity_idx: subj_idx for all entities in the sent
# in case of success and break
if len(domains_vals) == 0:
# sentence contains no entities
if seq_offset == seq_offset_cache:
# break if this is the sequence of interest (could also just return can_seq)
# return can_seq
break
elif domains_vals == {None}:
# TODO confirm subjs are in proper format
# case 0: there is 0 subjects: extend search to other sequences in batch
# input(("extend search ! No subjects found in (seq, then batch): ", can_seq, raw_seqs, subjs, look_at_seq, cache_trg, direction))
# what order to recurse to other sentences in? probably backward, then forward
# TODO this method of looking to other sequences in batch as backup is only better if
# time_f(all_seq)
# >
# time_f(curr_seq) + p(no match | trg_seq) * time_f(prev_seq) * p(match|prev_seq) + p(no match | trg_seq) * time_f(prev_seq) * p (no match | prev_seq) * time_f (prevprev_seq) .....
# depends on constant overhead i think?
#
# (heuristic procedure cases 2,3 are greedy in that they assume
# the correct subject is likely to be in this sentence, and return it
# instead of extending search to other sentences)
pass
elif len(domains_vals) > 2:
# case 1: there is multiple domains: assert False, whats this
assert False, ("subjects of different domains found:", domains, can_seq, raw_seq)
elif len(subj_indices_local) == 1:
# case 2: there is 1 subject: take it for all attributes and break
subject_mapping.update({ent: global_offsets[seq_offset]+subj_indices_local[0] for ent in entity_indices_local_of_interest})
print(f"found exactly one subject {rels_vals[can_seq[subj_indices_local[0]]][subj_indices_local[0]]} for sequence ", can_seq, raw_seq)
# unit test
subj_canon = can_seq[subj_indices_local[0]]
assert len(rels_vals[subj_canon]) == 1, f"more than one originator for {subj_canon} found in {rels_vals[subj_canon]}"
break # found subj; have set it and can stop searching
else:
assert len(subj_indices_local) > 1, (domains,can_seq)
print(f"found multiple subject contenders")
# case 3: there is more subjects: heuristics:
# traffic: match POI attributes based on entities dict # what about distance, traffic info
# event: assert False, when does this ever happen?
# weather: print out a bunch and figure out something based on collocation
domain = list({v for k,v in domains.items() if v is not None})[0]
if domain == "calendar":
assert False, f"found multiple events: {[can_seq[subj] for subj in subj_indices_local]} in {can_seq}"
elif domain == "weather":
# TODO run some kind of dependency parse to match attributes with subjects
print(("\n"*4)+("\n"*4)+"WEATHER DOMAIN OMG WHATWEDO"+"\n"*4)
input((can_seq, can_seq_of_interest))
else:
assert domain == "traffic"
# traffic attributes: poi, address, poi_type, distance, traffic_info
# can lookup address
# simply annotate distance, traffic info ? how long is poi_list?
# TODO move all of this before while loop
pois = ents["poi"]
pois_by_address = {poi_dict["address"]: {"poi": poi_dict["poi"], "type": poi_dict["type"]} for poi_dict in pois}
poi_address_list = list(pois_by_address)
# look up poi info for each subject
compare_subjects = dict()
for subj in subj_indices_local:
subject_mapping[subj] = global_offsets[seq_offset]+subj # set local subject mapping to its own global subj index
can_subj = can_seq[subj]
subj_raw_list = rels_vals[can_subj][subj] # TODO should probably unit test if this is in ents.values()
candidate_subj = " ".join(subj_raw_list)
compare_subjects[subj] = candidate_subj
# TODO do MED match with poi_name_list; could be multiple in case of home_1, home_2 etc
# => immediately try to match with attributes
# first do descending from seq of interest; when hit 0 go back
if seq_offset == 0:
seq_offset = seq_idx
direction *= -1 # start ascending
if cache_trg == True: # switch one extra time if we started with target because now we goin from src to src once
trg = not trg
seq_offset += direction # first from src sequence to prev sequence, then afterwards if seq_offset <= 0 and not trg: # hit first source; now continue with entries afterward
# inverttrg (alternate between looking at src and trg)
trg = not trg
# TODO FIXME at end of while loop,
# subject_mapping should be entity: subject dict with
# entity: index of entity in local can_seq
# subject: index of subject in global can_seqs
# (can_seq, rels_vals, etc should be set to the last processed sequence that also returned subject_mapping)
# assert subject_mapping != {}, (can_seqs, can_seq_of_interest, global_offsets, seq_offset, global_can_seqs_bin)
subject_prefixes = dict()
for local_ent, global_subj in subject_mapping.items():
# FIXME TODO get these variables
subj_seq = global_can_seqs_bin[global_subj] # index in can_seqs NOTE probably look at seq but just figure out using sep in beginning
if subj_seq is None: # just gonna let this slide lol
subj_seq = global_can_seqs_bin[global_subj+1]
subj = global_subj-global_offsets[subj_seq] # index in its local sequence
subj_canon = can_seqs[global_subj] # poi_type
subj_raw_list = rels_vals_per_seq[subj_seq][subj_canon][subj] # TODO should probably unit test if this is in ents.values()
# input((subj_raw_list, rels_vals[subj_canon], subj, subject_mapping, can_seq))
at_subj_raw_joined_ = CANON_START_CHAR + join_char.join(subj_raw_list) + join_char # @dish_parking_
subject_prefixes[local_ent] = at_subj_raw_joined_
if kb is not None:
# try to do a lookup directly in the KB
subject_dict = dict() # subject dict with local enitity index: ["dish", "parking"]
for label_coarse in rels_vals:
dict_for_label_coarse = rels_vals[label_coarse]
for instance in dict_for_label_coarse:
joined_instance = " ".join(dict_for_label_coarse[instance])
label_without_at = label_coarse if not label_coarse.startswith("@") else label_coarse[1:]
if label_without_at == "poi_name":
label_without_at = "poi"
if label_without_at == "poi_address":
label_without_at = "address"
if label_without_at == "poi_distance":
label_without_at = "distance"
closest_entry_idx = lowest_med_match(joined_instance, kb.keys())
probable_intermediate_label = list(kb.keys())[closest_entry_idx]
probable_intermediate_label_list = kb[probable_intermediate_label]
assert False, (joined_instance, label_coarse, probable_intermediate_label_list)
# decide on probable subject
# TODO
# find probable subj among intermediate labels
# cant i just pick one of the labels?
# why cant i have the subject itself in the list?
subject_dict[instance] = probable_subj.lower()
for local_ent, subj_joined in subject_dict.items():
at_subj_raw_joined_ = CANON_START_CHAR + join_char.join(subj_joined.lower().split()) + join_char
subject_prefixes[local_ent] = at_subj_raw_joined_
intermediate_entities = dict()
for e_i in entity_indices_local_of_interest:
try:
subject_prefix = subject_prefixes[e_i]
except KeyError as KE:
# XXX removeme
print(subject_prefixes)
print(entity_indices_local_of_interest)
print(KE)
print(e_i)
print(can_seq)
print(can_seq_of_interest)
assert False, subject_prefixes[e_i]
can_without_at = can_seq_of_interest[e_i][1:]
intermediate_label_i = subject_prefix + can_without_at
intermediate_entities[e_i] = intermediate_label_i
intermediate_entities = {i: subject_prefixes[i] + can_seq_of_interest[i][1:] \
for i in entity_indices_local_of_interest}
intermediate_canonized = [can if i not in entity_indices_local_of_interest else intermediate_entities[i] for i, can in enumerate(can_seq_of_interest)]
# input(("canonized ",can_seq_of_interest, " to ", intermediate_canonized))
return intermediate_canonized | cec73fd3770e429b294be08a4a16ad3a99512906 | 3,635,941 |
def make_screen_hicolor(screen):
"""returns a screen to pass to MainLoop init
with 256 colors.
"""
screen.set_terminal_properties(256)
screen.reset_default_terminal_palette()
return screen | 1fa4ee36825ca9672af58332001463e5b804d171 | 3,635,942 |
def cached_object_arg_test(x):
"""takes a MyTestClass instance and returns a string"""
return str(x) | 5880976f0c74dc588b1b4e93cee349fee06473ee | 3,635,943 |
from functools import reduce
import logging
def calculate_news_id(title: str = "", description: str = "") -> hex:
"""
Calculate an idempotency ID of a piece of news, by taking and summing the unicode values
of each character (in lower case if applicable) in title and description
and representing the final value in hex.
Example:
assert calculate_news_id(
title="example title",
description="example description"
) == hex(0xcde)
:params title: Title of the news headline.
:params description: Description of the news headline.
:returns The hexidecimal representation of the idempotency ID of the news headline,
or None if ValueError is encountered.
"""
try:
if not isinstance(title, str) or not isinstance(description, str):
raise ValueError()
return hex(
reduce(
lambda final, char: final + ord(char),
((title or "") + (description or "")).lower(),
0,
)
)
except ValueError:
logging.error(
"ValueError encountered when trying to calculate idempotency id for a news headline. "
"This indicates that title or description is not a string. "
"NewsAPI may have changed, causing type errors. \n"
"Supplied title: %s\n"
"Supplied description: %s",
title,
description,
)
return None | d46520430d8b5d77c4d52c7c99574ed600576048 | 3,635,944 |
import configparser
def retrieve_artifact(artifact_id):
"""
Allows the client side API call to "retrieve" the artifact.
Returns:
type: str
String representing JSON object which contains the result of
the "artifact retrieve {uuid}" if the call was a success; else,
JSON object which contains error message.
Raises:
Exception:
* If the request does not contain JSON payload
"""
config = configparser.ConfigParser()
config.set("DEFAULT", "url", "http://127.0.0.1:8008")
try:
output = artifact_cli.api_do_retrieve_artifact(artifact_id, config)
return output
except Exception as e:
return e | 2a53985aaab42d9873ddb27156195744d7a40c3c | 3,635,945 |
import ipaddress
def ip_network(addr):
"""Wrapper for ipaddress.ip_network which supports scoped addresses"""
idx = addr.find('/')
if idx >= 0:
addr, mask = addr[:idx], addr[idx:]
else:
mask = ''
return ipaddress.ip_network(_normalize_scoped_ip(addr) + mask) | c15718cc9b39129901937c4de5b73a7a33649a3d | 3,635,946 |
def evaluate(env, agent, n_games=1, greedy=False, t_max=10000):
""" Plays n_games full games. If greedy, picks actions as argmax(qvalues). Returns mean reward. """
rewards = []
for _ in range(n_games):
s = env.reset()
reward = 0
for _ in range(t_max):
qvalues = agent.get_qvalues([s])
action = qvalues.argmax(axis=-1)[0] if greedy else agent.sample_actions(qvalues)[0]
s, r, done, _ = env.step(action)
reward += r
if done:
break
rewards.append(reward)
return np.mean(rewards) | 2fa4880c612f8c9cdf95d71ebe64236def345fbf | 3,635,947 |
def kabsch(query, target, operator=True):
"""Compute the RMSD between two structures with he Kabsch algorithm
Parameters
----------
query : np.ndarray, ndim=2, shape=[n_atoms, 3]
The set of query points
target : np.ndarray, ndim=2, shape=[n_atoms, 3]
The set of reference points to align to
operator : bool
Return the alignment operator, which is a callable wrapper for the
rotation and translation matrix. To align the query points to
the target, you'd apply the operator to the query, i.e. `op(query)`.
Returns
-------
rmsd : float
The root-mean-square deviation after alignment
operator : AlignOperator, optional
If operator = True, the alignment operator (rot and trans matrix)
will be returned too.
"""
if not query.ndim == 2:
raise ValueError('query must be 2d')
if not target.ndim == 2:
raise ValueError('target must be 2d')
n_atoms, three = query.shape
if not three == 3:
raise ValueError('query second dimension must be 3')
n_atoms, three = target.shape
if not three == 3:
raise ValueError('target second dimension must be 3')
if not query.shape[0] == target.shape[0]:
raise ValueError('query and target must have same number of atoms')
# centroids
m_query = np.mean(query, axis=0)
m_target = np.mean(target, axis=0)
# centered
c_query = query - m_query
c_target = target - m_target
error_0 = np.sum(c_query**2) + np.sum(c_target**2)
A = np.dot(c_query.T, c_target)
u, s, v = np.linalg.svd(A)
#d = np.diag([1, 1, np.sign(np.linalg.det(A))])
#print v.shape
# LPW: I encountered some mirror-imaging if this line was not included.
if np.sign(np.linalg.det(A)) == -1:
v[2] *= -1.0
rmsd = np.sqrt(np.abs(error_0 - (2.0 * np.sum(s))) / n_atoms)
if operator:
rotation_matrix = np.dot(v.T, u.T).T
translation_matrix = m_query - np.dot(m_target, rotation_matrix)
return rmsd, AlignOperator(rotation_matrix, translation_matrix)
return rmsd | c3753f7a2b726908bedb9cd8ae562c8be664d0f0 | 3,635,948 |
def GetConfig(user_config):
"""Load and return benchmark config.
Args:
user_config: user supplied configuration (flags and config file)
Returns:
loaded benchmark configuration
"""
return configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME) | cf6f63bdb4eba1dfdf61965a074fbaf4fe0be89b | 3,635,949 |
def get_postgres_data():
"""
reads in movie and ratings data for all users using the postgres database
Parameters: -
Returns: dataframe with movie IDs, ratings, user IDs;
number of unique movies in database
"""
engine = create_engine(CONN, encoding='latin1', echo=False)
df_ratings_proxy = engine.execute(ratings_query)
df_ratings = pd.DataFrame(df_ratings_proxy.fetchall())
df_ratings.columns = ['movieid',
'index',
'userid',
'rating',
'demeaned',
'title',
'genre']
df_ratings = df_ratings.drop('index', axis=1)
number_of_movies = engine.execute(movie_number_query).fetchall()[0][0]
return df_ratings, number_of_movies | 6a7642eaabd8bad9903af127bdb967fd2c60031c | 3,635,950 |
def adriatic_name(p, i, j, a):
""" Return the name for given parameters of Adriatic indices"""
#(j)
name1 = {1:'Randic type ',\
2:'sum ',\
3:'inverse sum ', \
4:'misbalance ', \
5:'inverse misbalance ', \
6:'min-max ', \
7:'max-min ', \
8:'symmetric division '}
# (i,a)
name2 = {(1, 0.5):'lor',\
(1,1):'lo', \
(1,2):'los', \
(2,-1):'in', \
(2, -0.5):'ir', \
(2, 0.5):'ro', \
(2,1):'', \
(2,2):'s', \
(3, 0.5):'ha', \
(3,2):'two'}
#(p)
name3 = {0: 'deg', 1: 'di'}
return (name1[j] + name2[(i, a)] + name3[p]) | d08ed926d80aa19326ab4548288a0b9cb02737e4 | 3,635,951 |
def add_certificate(cluster_name, data):
""" Add a certificate to a cluster reference in the Asperathos section.
Normal response codes: 202
Error response codes: 400, 401
"""
return u.render(api.add_certificate(cluster_name, data)) | 3e9db7c5c6cc5deb5e3128e1be0041b1f3cf680f | 3,635,952 |
def _make_histogram(values, bins):
"""Converts values into a histogram proto using logic from
https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/lib/histogram/histogram.cc"""
values = values.reshape(-1)
counts, limits = np.histogram(values, bins=bins)
limits = limits[1:]
sum_sq = values.dot(values)
return HistogramProto(min=values.min(),
max=values.max(),
num=len(values),
sum=values.sum(),
sum_squares=sum_sq,
bucket_limit=limits,
bucket=counts) | b3801888d7d0225f159ff02b7593cb6287ae76a7 | 3,635,953 |
def generate_spec(nu1, nu2, dist):
"""
Generate a fake spectrum under the assumptions of
the standard accretion disk model.
dist needs to be in cm
returns spectrum with luminosity at the source of
the object
"""
freq = np.linspace(nu1, nu2, 1e3)
return [freq, (freq**(1./3.))] | a53faed11196c17c5ce6b89c24ff8283cd9c21cb | 3,635,954 |
def dearomatize():
"""
Dearomatize structure
---
tags:
- indigo
parameters:
- name: json_request
in: body
required: true
schema:
id: IndigoDearomatizeRequest
properties:
struct:
type: string
required: true
examples: c1ccccc1
output_format:
type: string
default: chemical/x-mdl-molfile
examples: chemical/x-daylight-smiles
enum:
- chemical/x-mdl-rxnfile
- chemical/x-mdl-molfile
- chemical/x-indigo-ket
- chemical/x-daylight-smiles
- chemical/x-chemaxon-cxsmiles
- chemical/x-cml
- chemical/x-inchi
- chemical/x-iupac
- chemical/x-daylight-smarts
- chemical/x-inchi-aux
example:
struct: c1ccccc1
output_format: chemical/x-daylight-smiles
responses:
200:
description: Dearomatized chemical structure
schema:
$ref: "#/definitions/IndigoResponse"
400:
description: 'A problem with supplied client data'
schema:
$ref: "#/definitions/ClientError"
500:
description: 'A problem on server side'
schema:
$ref: "#/definitions/ServerError"
"""
data = IndigoRequestSchema().load(get_request_data(request))
LOG_DATA('[REQUEST] /dearomatize', data['input_format'], data['output_format'], data['struct'], data['options'])
md = load_moldata(data['struct'], mime_type=data['input_format'], options=data['options'])
if md.is_query:
return get_error_response("Structures with query features cannot be dearomatized yet", 400, data['json_output'])
md.struct.dearomatize()
return get_response(md, data['output_format'], data['json_output'], data['options']) | e7a11519f712bbc36de93fe2ab24c3e128682b00 | 3,635,955 |
def isTree(rootFile,pathSplit):
"""
Return True if the object, corresponding to (rootFile,pathSplit), inherits from TTree
"""
if pathSplit == []: return False # the object is the rootFile itself
else: return isTreeKey(getKey(rootFile,pathSplit)) | 002790cd2087ea40f77b27e9fe7fe51db1566761 | 3,635,956 |
def create_server(host="localhost", remote_port=22, local_port=2222):
"""
Creates and returns a TCP Server thread
:param str host: the host to open the port forwarding from
:param int remote_port: the remote ort to be forward to locally
:param int local_port: the local port to open to forward to
:return ServerSession: the thread with the forwarding server
"""
# ThreadedTCPServer for multiple connections
server = ThreadedTCPServer((host, int(local_port)), TCPRelay)
server.rport = int(remote_port)
server.bufsize = 128
thread = ServerSession(server)
thread.start()
return thread | dbd18d57da57aaa375a4f7f29957c6e98e450e72 | 3,635,957 |
def output_to_df(record):
"""Converts list of records to DataFrame"""
df = pd.DataFrame(record, columns=["line_count_python", "author_name"])
return df | 5486abf0a4a57fa96582c4a0a8417b0836a943f0 | 3,635,958 |
def sanitize_tx_data(
unspents,
outputs,
fee,
leftover,
combine=True,
message=None,
compressed=True,
absolute_fee=False,
min_change=0,
version='main',
message_is_hex=False,
):
"""
sanitize_tx_data()
fee is in satoshis per byte.
"""
outputs = outputs.copy()
for i, output in enumerate(outputs):
dest, amount, currency = output
outputs[i] = (dest, currency_to_satoshi_cached(amount, currency))
if not unspents:
raise ValueError('Transactions must have at least one unspent.')
# Temporary storage so all outputs precede messages.
messages = []
if message:
if message_is_hex:
message_chunks = chunk_data(message, MESSAGE_LIMIT)
else:
message_chunks = chunk_data(message.encode('utf-8'), MESSAGE_LIMIT)
for message in message_chunks:
messages.append((message, 0))
# Include return address in output count.
# Calculate output size as a list (including return address).
output_size = [len(address_to_scriptpubkey(o[0])) + 9 for o in outputs]
output_size.append(len(messages) * (MESSAGE_LIMIT + 9))
output_size.append(len(address_to_scriptpubkey(leftover)) + 9)
sum_outputs = sum(out[1] for out in outputs)
# Use Branch-and-Bound for coin selection:
unspents[:], remaining = select_coins(
sum_outputs,
fee,
output_size,
min_change=min_change,
absolute_fee=absolute_fee,
consolidate=combine,
unspents=unspents,
)
if remaining > 0:
outputs.append((leftover, remaining))
# Sanity check: If spending from main-/testnet, then all output addresses must also be for main-/testnet.
for output in outputs:
dest, amount = output
vs = get_version(dest)
if vs and vs != version:
raise ValueError('Cannot send to ' + vs + 'net address when spending from a ' + version + 'net address.')
outputs.extend(messages)
return unspents, outputs | 778b94b495993daa1f9f1573d85a7dac868473b1 | 3,635,959 |
def create_csrf_disabled_registrationform():
"""Create a registration form with CSRF disabled."""
return create_registrationform(**_get_csrf_disabled_param()) | ff258d35b8ba793ffb5e64a3b99065da1561f2de | 3,635,960 |
def calc_a_lzc(ts, norm_factor=None):
"""
Calculates lempel-ziv complexity of a single time series.
:param ts: a time-series: nx1
:param norm_factor: the normalization factor. If none, the output will not be normalized
:return: the lempel-ziv complexity
"""
bin_ts = np.char.mod('%i', ts >= np.median(ts))
value = lempel_ziv_complexity("".join(bin_ts))
if norm_factor:
value /= norm_factor
return value | 6827b44dc5f533df0ce41361164d68636d5fa4a4 | 3,635,961 |
import json
import os
def run_train_model(args):
"""
:param args:
:return:
"""
logger = args.module_logger
_ = create_filepath(args.modelout, logger)
logger.debug('Loading model specification from {}'.format(args.modelspec))
model_spec = json.load(open(args.modelspec))
model = load_model(model_spec['module_path'], model_spec['model_name'])
load_groups = get_valid_hdf5_groups(args.inputfile, args.inputgroup)
traindata, targets, dtinfo, sminfo, ftinfo = load_ml_dataset(args.inputfile, load_groups, None, args, logger)
assert traindata.shape[0] > 1, 'No samples (rows) in training data'
assert traindata.shape[1] > 1, 'No features (columns) in training data'
if 'preprocess' in model_spec and model_spec['preprocess']:
logger.debug('Preprocessing dataset with method: {}'.format(model_spec['preprocessor']['preprocessor_name']))
traindata, prepinfo = apply_preprocessor(traindata, model_spec['preprocessor'], 'train')
else:
prepinfo = None
if targets is not None:
assert targets.size == traindata.shape[0], 'Mismatch num targets {} and num samples {}'.format(targets.size, traindata.shape[0])
run_metadata = {'dataset_info': dtinfo, 'sample_info': sminfo,
'feature_info': ftinfo, 'model_info': dict()}
if prepinfo is not None:
run_metadata['preprocess_info'] = prepinfo
logger.debug('Training model')
if args.notuning:
params = model_spec['default']
model = train_nocv(model, params, traindata, targets, sminfo['weights'])
run_metadata['model_info']['params'] = params
run_metadata['model_info']['tuned'] = False
else:
params = model_spec['cvtune']
tune_info = train_gridcv(model, params, traindata, targets, args.cvfolds, args.workers, sminfo['weights'])
model = tune_info.best_estimator_
run_metadata['model_info']['params'] = tune_info.best_params_
run_metadata['model_info']['tuned'] = True
run_metadata['training_info'] = dict()
run_metadata['training_info']['cv_scores'] = simplify_cv_scores(tune_info.cv_results_)
run_metadata['training_info']['best_score'] = tune_info.best_score_
run_metadata['training_info']['best_index'] = int(tune_info.best_index_)
run_metadata['training_info']['scoring'] = params['scoring']
run_metadata['model_info']['name'] = model_spec['model_name']
run_metadata['model_info']['type'] = model_spec['model_type']
if model_spec['model_type'] == 'classifier':
run_metadata['training_info']['class_order'] = list(map(int, model.classes_))
logger.debug('Training finished')
if 'store_attributes' in model_spec:
logger.debug('Storing user requested model attributes')
attribs = extract_model_attributes(model, model_spec['store_attributes'], logger)
run_metadata['attribute_info'] = attribs
if args.calcweights:
raise NotImplementedError('Currently not functional')
logger.debug('Saving model and metadata')
run_metadata['run_info'] = dict()
run_metadata['run_info']['model_spec'] = os.path.basename(args.modelspec)
run_metadata['run_info']['model_file'] = os.path.basename(args.modelout)
run_metadata['run_info']['train_data'] = os.path.basename(args.inputfile)
run_metadata['run_info']['train_group'] = args.inputgroup
logger.debug('Writing model file...')
with open(args.modelout, 'wb') as outfile:
pck.dump(model, outfile)
if not args.metadataout:
mdout = args.modelout.rsplit('.', 1)[0] + '.json'
else:
mdout = args.metadataout
_ = create_filepath(mdout, logger)
logger.debug('Writing model metadata...')
with open(mdout, 'w') as outfile:
_ = json.dump(run_metadata, outfile, indent=1, sort_keys=True)
logger.debug('Done')
return 0 | 7c9d9792219e7d9229faab277f195e5cebf4eb74 | 3,635,962 |
def rbd_command(command_args, pool_name=None):
"""
Run a rbd CLI operation directly. This is a fallback to allow
manual execution of arbitrary commands in case the user wants to
do something that is absent or broken in Calamari proper.
:param pool_name: Ceph pool name, or None to run without --pool argument
:param command_args: Command line, excluding the leading 'rbd' part.
"""
if pool_name:
args = ["rbd", "--pool", pool_name] + command_args
else:
args = ["rbd"] + command_args
log.info('rbd_command {0}'.format(str(args)))
rc, out, err = utils.execCmd(args)
log.info('rbd_command {0} {1} {2}'.format(str(rc), out, err))
return {
'out': out,
'err': err,
'status': rc
} | 447432fe8c2a0b03f073a5471aa1d52610e7529e | 3,635,963 |
import sys
def get_twitter_auth():
"""
Setup Twitter authentication.
Return: tweepy.OAuthHandler object
"""
try:
consumer_key = "YOUR_KEY"
consumer_secret = "CONSUMER SECRET"
access_token = "ACCESS TOKEM"
access_secret = "ACCESS SECRET"
except KeyError:
sys.stderr.write("TWITTER_* environment variables not set\n")
sys.exit(1)
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_secret)
return auth | 00dbd0ca9291387334cc578bc44c0eb37a179879 | 3,635,964 |
from typing import Counter
def aggregate_roles_iteration(roles, parameters=None):
"""
Single iteration of the roles aggregation algorithm
Parameters
--------------
roles
Roles
parameters
Parameters of the algorithm
Returns
--------------
agg_roles
(Partially aggregated) roles
"""
threshold = exec_utils.get_param_value(Parameters.ROLES_THRESHOLD_PARAMETER, parameters, 0.65)
sim = []
for i in range(len(roles)):
for j in range(i + 1, len(roles)):
sim.append((i, j, roles[i][0], roles[j][0], -find_role_similarity(roles, i, j, parameters=parameters)))
sim = sorted(sim, key=lambda x: (x[-1], constants.DEFAULT_VARIANT_SEP.join(x[-3]), constants.DEFAULT_VARIANT_SEP.join(x[-2])))
found_feasible = False
if sim:
if -sim[0][-1] > threshold:
set_act1 = roles[sim[0][0]][0]
set_act2 = roles[sim[0][1]][0]
set_res1 = roles[sim[0][0]][1]
set_res2 = roles[sim[0][1]][1]
total_set_act = sorted(list(set(set_act1).union(set(set_act2))))
total_set_res = Counter(set_res1 + set_res2)
del roles[sim[0][0]]
del roles[sim[0][1] - 1]
roles.append([total_set_act, total_set_res])
roles = sorted(roles, key=lambda x: constants.DEFAULT_VARIANT_SEP.join(x[0]))
found_feasible = True
return roles, found_feasible | 44ce1310683f798f4cdc04ec5a9725c1d3c0f260 | 3,635,965 |
def convertToOneHot(vector, num_classes=None):
"""
Converts an input 1-D vector of integers into an output
2-D array of one-hot vectors, where an i'th input value
of j will set a '1' in the i'th row, j'th column of the
output array.
Example:
v = np.array((1, 0, 4))
one_hot_v = convertToOneHot(v)
print one_hot_v
[[0 1 0 0 0]
[1 0 0 0 0]
[0 0 0 0 1]]
"""
assert isinstance(vector, np.ndarray)
assert len(vector) > 0
if num_classes is None:
num_classes = np.max(vector)+1
else:
assert num_classes > 0
assert num_classes >= np.max(vector)
result = np.zeros(shape=(len(vector), num_classes))
result[np.arange(len(vector)), vector] = 1
return result.astype(int) | 949370fb73416253266d00649d4ecc1075293f63 | 3,635,966 |
def open_signal(file, sr):
"""
Open a txt file where the signal is
Parameters:
file: Address where the file is located
sr: Sampling rate
Return:
signal: The numpy-shaped signal
t: Time vector
"""
signal = np.loadtxt(file, comments="%", delimiter=",", usecols=(1, 2, 3, 4, 5, 6, 7, 8))
c = signal.shape
c = c[0]
x = c / sr
t = np.arange(0, x, 1 / sr)
return signal, t | 9604783450d70aa16648b933dbf12890efea22de | 3,635,967 |
def flops_elu(module: nn.ELU, input: Tensor, output: Tensor) -> int:
"""FLOPs estimation for `torch.nn.ELU`"""
# For each element, compare it to 0, exp it, sub 1, mul by alpha, compare it to 0 and sum both
return input.numel() * 6 | d22d5a60c46e74b4cfc92ffd55b027c199b914e7 | 3,635,968 |
import copy
def gen_DFSC_MitEx(backend: Backend, **kwargs) -> MitEx:
"""
Produces a MitEx object that applies DFSC characteriastion to all experiment results.
:param backend: Backend experiments are run through.
:type backend: Backend
:key experiment_mitex: MitEx object observable experiments are run through
:key characterisation_mitex: MitEX object characteriastion experiments are run through.
:return: MitEx object for automatic DFSC correction of circuits.
:rtype: MitEx
"""
_experiment_mitex = copy.copy(
kwargs.get(
"experiment_mitex",
MitEx(
backend,
_label="ExperimentMitex",
mitres=gen_compiled_MitRes(backend, 0),
),
)
)
_characterisation_mitex = copy.copy(
kwargs.get(
"characterisation_mitex",
MitEx(
backend,
_label="CharacterisationMitex",
mitres=gen_compiled_MitRes(backend, 0),
),
)
)
_characterisation_taskgraph = TaskGraph().from_TaskGraph(_characterisation_mitex)
_experiment_taskgraph = TaskGraph().from_TaskGraph(_experiment_mitex)
_characterisation_taskgraph.add_wire()
_characterisation_taskgraph.prepend(DFSC_collater_task_gen())
_characterisation_taskgraph.append(DFSC_characterisation_task_gen())
_experiment_taskgraph.parallel(
MitEx(backend).from_TaskGraph(_characterisation_taskgraph)
)
_experiment_taskgraph.prepend(DFSC_circuit_task_gen())
_experiment_taskgraph.append(
DFSC_correction_task_gen(kwargs.get("DFSC_threshold", 0.01))
)
return MitEx(backend).from_TaskGraph(_experiment_taskgraph) | ff250e8f8319ba5ceca6b035741d7c896f460268 | 3,635,969 |
import json
def dumps(pif, **kwargs):
"""
Convert a single Physical Information Object, or a list of such objects, into a JSON-encoded string.
:param pif: Object or list of objects to serialize.
:param kwargs: Any options available to json.dumps().
"""
return json.dumps(pif, cls=PifEncoder, **kwargs) | 9b3c1712bc094a9ff4aed32edad6ffc6db718e66 | 3,635,970 |
def replace_digits_with_zero(data):
"""Follow the paper's implementation
"""
new_data = []
for words, tags in data:
new_words = []
for w in words:
new_w = "0" if is_number(w) else w
new_words.append(new_w)
new_tags = list(tags)
new_data.append((new_words, new_tags))
return new_data | a92a604a2146ac9a4f6de7f5ee96bb3b9eaf18ea | 3,635,971 |
def gcd(number1: int, number2: int) -> int:
"""Counts a greatest common divisor of two numbers.
:param number1: a first number
:param number2: a second number
:return: greatest common divisor"""
number_pair = (min(abs(number1), abs(number2)), max(abs(number1), abs(number2)))
while number_pair[0] > 0:
number_pair = (number_pair[1] % number_pair[0], number_pair[0])
return number_pair[1] | 9f22c315cc23e2bbf954f06d416a2c44f95ddbb7 | 3,635,972 |
import time
def message_counter_down_timer(strMsg="Calling ClointFusion Function in (seconds)",start_value=5):
"""
Function to show count-down timer. Default is 5 seconds.
Ex: message_counter_down_timer()
"""
CONTINUE = True
layout = [[sg.Text(strMsg,justification='c')],[sg.Text('',size=(10, 0),font=('Helvetica', 20),justification='c', key='text')],
[sg.Image(filename = str(cf_logo_file_path),size=(60,60))],
[sg.Exit(button_color=('white', 'firebrick4'), key='Cancel')]]
window = sg.Window('ClointFusion - Countdown Timer', layout, no_titlebar=True, auto_size_buttons=False,keep_on_top=True, grab_anywhere=False, element_justification='c',element_padding=(0, 0),finalize=True,icon=cf_icon_cdt_file_path)
current_value = start_value + 1
while True:
event, _ = window.read(timeout=2)
current_value = current_value - 1
time.sleep(1)
if current_value == 0:
CONTINUE = True
break
if event in (sg.WINDOW_CLOSED , 'Cancel'):
CONTINUE = False
print("Action cancelled by user")
break
window['text'].update(value=current_value)
window.close()
return CONTINUE | 064f995aa27298414f83398facbfbfa449c67dd1 | 3,635,973 |
def get_overlapping_timestamps(timestamps: list, starttime: int, endtime: int):
"""
Find the timestamps in the provided list of timestamps that fall between starttime/endtime. Return these timestamps
as a list. First timestamp in the list is always the nearest to the starttime without going over.
Parameters
----------
timestamps
list of timestamps we want to pull from, to get the timestamps between starttime and endtime
starttime
integer utc timestamp in seconds
endtime
integer utc timestamp in seconds
Returns
-------
list
list of timestamps that are within the starttime/endtime range
"""
final_timestamps = []
# we require a starting time stamp that is either less than the given starttime or no greater than
# the given starttime by 60 seconds
buffer = 60
starting_timestamp = None
for tstmp in timestamps: # first pass, find the nearest timestamp (to starttime) without going over the starttime
if tstmp < starttime + buffer:
if not starting_timestamp:
starting_timestamp = tstmp
elif (tstmp > starting_timestamp) and (tstmp <= starttime):
starting_timestamp = tstmp
if starting_timestamp is None:
# raise ValueError('VesselFile: Found no overlapping timestamps for range {} -> {}, within the available timestamps: {}'.format(starttime, endtime, timestamps))
return final_timestamps
starttime = starting_timestamp
final_timestamps.append(str(starttime))
for tstmp in timestamps: # second pass, append all timestamps that are between the starting timestamp and endtime
if (tstmp > starttime) and (tstmp <= endtime):
final_timestamps.append(str(tstmp))
return final_timestamps | 0ad6836d43d670f811b436e34887e159462c9ec1 | 3,635,974 |
import os
def get_field_h5files(sdir, prefix_dirs="ph"):
"""Return names of field h5 files in a directory
Parameters
----------
sdir: str
Path to the search directory
prefix_dirs: str
If no matching files are found in sdir, search
subdirectories whose name starts with this string.
Returns
-------
files: list of str
Paths to the found h5 files
Notes
-----
If DIR does not contain any h5 fields, then returns all h5 fields
in subdirectories that start with `prefix`.
This method ignores h5 files of the eps structure, i.e. h5 files
starting with "eps" are ignored.
"""
sdir = os.path.realpath(sdir)
files = os.listdir(sdir)
ffil = []
for f in files:
if f.endswith(".h5") and not f.startswith("eps"):
ffil.append(os.path.join(sdir, f))
ffil.sort()
if len(ffil):
return ffil
else:
# go through subdirs
for df in files:
if (df.startswith(prefix_dirs) and
os.path.isdir(os.path.join(sdir, df))):
df = os.path.join(sdir, df)
sfiles = os.listdir(df)
for f in sfiles:
if f.endswith(".h5") and not f.startswith("eps"):
ffil.append(os.path.join(df, f))
ffil.sort()
return ffil | 4940a3ea642e477481ff926a7d5638d6af6e0120 | 3,635,975 |
def calc_war_battingfactor(oba,mlb,league,parkfactor,batting):
"""
oba: instance of wOBAWeightSim
mlb: instance of BattingSim
league: DataFrame
parkfactor: DataFrame
batting: DataFrame
-----------------------------------------
returns: DataFrame with column [BattingFactor]
"""
# calculate wOBA
woba = calc_woba_weights(oba,mlb)
# Calculate Runs/Plate Appearance
rpa = pd.Series(np.r_[(*mlb['R/(O+E+K+BB+IBB+HBP+I+S+D+T+HR)'],)],index=mlb.index.pandas(),name='RPA')
# Calculate weighted park factor
wPF = (1 - (parkfactor / 100)).groupby('team').apply(lambda x: x * rpa).rename('wPF').to_frame()
# Calculate np_league wOBA
lw = woba[LINEAR_WEIGHTS]
np_woba = (league[LINEAR_WEIGHTS].groupby('league').apply(lambda x: x * lw)).sum(axis=1) / league[['O','E','K','S','D','T','HR','BB','SF','HBP']].sum(axis=1)
# Calculate modified wRC for np-league
wLG = np_woba.groupby('league').apply(lambda x: -(x/woba['woba_Scale'] - woba['woba'])).rename('wLG').to_frame()
# calc wRAA
wRAA = pd.concat([*_calc_wraa(batting,woba['woba'],woba['woba_Scale'],lw)],axis=0).to_frame()
# calc batting PA
pa = batting[['O','E','K','BB','IBB','HBP','I','S','D','T','HR']].sum(axis=1).rename('pa').to_frame()
# Merge PA, wRAA, wPF, wLG into one frame
bf = pd.merge(pa,wRAA,how='inner',left_index=True,right_index=True).reset_index()
bf = pd.merge(bf,wPF,how='left',left_on=['year','team'],right_on=['year','team'])
bf = pd.merge(bf,wLG,how='left',left_on=['year','league'],right_on=['year','league'])
bf.set_index(list(batting.index.names),inplace=True)
# Calculate Batting Factor
batfactor = (bf['wRAA']*bf['pa']+bf['wPF']*bf['pa']+bf['wLG']*bf['pa']).rename('bf').to_frame()
return pd.merge(batfactor,pa,how='inner',left_index=True,right_index=True) | 40f07a7ea1fab67bb982d3292d51784990fa8d2d | 3,635,976 |
def average_implied_variance(asset: Asset, tenor: str, strike_reference: EdrDataReference, relative_strike: Real, *,
source: str = None, real_time: bool = False) -> Series:
"""
Historic weighted average implied variance for the underlying assets of an equity index.
:param asset: asset object loaded from security master
:param tenor: relative date representation of expiration date e.g. 1m
:param strike_reference: reference for strike level
:param relative_strike: strike relative to reference
:param source: name of function caller
:param real_time: whether to retrieve intraday data instead of EOD
:return: average implied variance curve
"""
if real_time:
raise NotImplementedError('realtime average_implied_variance not implemented')
if strike_reference == EdrDataReference.DELTA_PUT:
relative_strike = abs(100 - relative_strike)
relative_strike = relative_strike / 100
delta_types = (EdrDataReference.DELTA_CALL, EdrDataReference.DELTA_PUT)
strike_ref = "delta" if strike_reference in delta_types else strike_reference.value
_logger.debug('where tenor=%s, strikeReference=%s, relativeStrike=%s', tenor, strike_ref, relative_strike)
mqid = asset.get_marquee_id()
where = FieldFilterMap(tenor=tenor, strikeReference=strike_ref, relativeStrike=relative_strike)
q = GsDataApi.build_market_data_query([mqid], QueryType.AVERAGE_IMPLIED_VARIANCE, where=where, source=source,
real_time=real_time)
_logger.debug('q %s', q)
df = _market_data_timed(q)
return Series() if df.empty else df['averageImpliedVariance'] | 705fcaa7d7ca42a69db4fef2469151cbc18ffa90 | 3,635,977 |
def get_forward_backward_walk_union_ops(forward_seed_ops,
backward_seed_ops,
forward_inclusive=True,
backward_inclusive=True,
within_ops=None,
control_inputs=True):
"""Return the union of a foward and a backward walk.
Args:
forward_seed_ops: an iterable of operations from which the forward graph
walk starts. If a list of tensors is given instead, the seed_ops are set
to be the consumers of those tensors.
backward_seed_ops: an iterable of operations from which the backward graph
walk starts. If a list of tensors is given instead, the seed_ops are set
to be the generators of those tensors.
forward_inclusive: if True the given forward_seed_ops are also part of the
resulting set.
backward_inclusive: if True the given backward_seed_ops are also part of the
resulting set.
within_ops: restrict the search within those operations. If within_ops is
None, the search is done within the whole graph.
control_inputs: an object convertible to a control output dictionary
(see function util.convert_to_control_outputs for more details).
If the dictionary can be created, it will be used while walking the graph
forward.
Returns:
A Python set of all the tf.Operation in the union of a foward and a
backward walk.
Raises:
TypeError: if forward_seed_ops or backward_seed_ops or within_ops cannot be
converted to a list of tf.Operation.
"""
forward_ops = get_forward_walk_ops(forward_seed_ops,
inclusive=forward_inclusive,
within_ops=within_ops,
control_outputs=control_inputs)
backward_ops = get_backward_walk_ops(backward_seed_ops,
inclusive=backward_inclusive,
within_ops=within_ops,
control_inputs=control_inputs)
return forward_ops | backward_ops | c99d621024a832caaff3f915cd792e5c46e11c95 | 3,635,978 |
def random_crop(*arrays, height, width=None):
"""Random crop.
Args:
*arrays: Input arrays that are to be cropped. None values accepted.
The shape of the first element is used as reference.
height: Output height.
width: Output width. Default is same as height.
Returns:
Cropped array if `arrays` contains a single array; a list of cropped arrays otherwise
"""
if len(arrays) <= 0:
return None
if width is None:
width = height
h, w = arrays[0].shape[:2]
hh, ww = h - height, w - width
a, b = np.random.randint(0, hh) if hh > 0 else 0, np.random.randint(0, ww) if ww > 0 else 0
slices = (
slice(a, a + height),
slice(b, b + width)
)
results = [(None if v is None else v[slices]) for v in arrays]
if len(results) == 1:
results, = results
return results | 466acb422499f26c971fcb55ea8d7ac595a45660 | 3,635,979 |
from re import T
def block_amplitudes(name, block_spec, t, hrfs=(glover,),
convolution_padding=5.,
convolution_dt=0.02,
hrf_interval=(0.,30.)):
""" Design matrix at times `t` for blocks specification `block_spec`
Create design matrix for linear model from a block specification
`block_spec`, evaluating design rows at a sequence of time values `t`.
`block_spec` may specify amplitude of response for each event, if different
(see description of `block_spec` parameter below).
The on-off step function implied by `block_spec` will be convolved with
each HRF in `hrfs` to form a design matrix shape ``(len(t), len(hrfs))``.
Parameters
----------
name : str
Name of condition
block_spec : np.recarray or array-like
A recarray having fields ``start, end, amplitude``, or a 2D ndarray /
array-like with three columns corresponding to start, end, amplitude.
t : np.ndarray
An array of np.float values at which to evaluate the design. Common
examples would be the acquisition times of an fMRI image.
hrfs : sequence, optional
A sequence of (symbolic) HRFs that will be convolved with each block.
Default is ``(glover,)``.
convolution_padding : float, optional
A padding for the convolution with the HRF. The intervals
used for the convolution are the smallest 'start' minus this
padding to the largest 'end' plus this padding.
convolution_dt : float, optional
Time step for high-resolution time course for use in convolving the
blocks with each HRF.
hrf_interval: length 2 sequence of floats, optional
Interval over which the HRF is assumed supported, used in the
convolution.
Returns
-------
X : np.ndarray
The design matrix with ``X.shape[0] == t.shape[0]``. The number of
columns will be ``len(hrfs)``.
contrasts : dict
A contrast is generated for each HRF specified in `hrfs`.
"""
block_spec = np.asarray(block_spec)
if block_spec.dtype.names is not None:
if block_spec.dtype.names not in (('start', 'end'),
('start', 'end', 'amplitude')):
raise ValueError('expecting fields called "start", "end" and '
'(optionally) "amplitude"')
block_spec = np.array(block_spec.tolist())
block_times = block_spec[:, :2]
amplitudes = block_spec[:, 2] if block_spec.shape[1] == 3 else None
# Now construct the design in time space
convolution_interval = (block_times.min() - convolution_padding,
block_times.max() + convolution_padding)
B = blocks(block_times, amplitudes=amplitudes)
t_terms = []
c_t = {}
n_hrfs = len(hrfs)
for hrf_no in range(n_hrfs):
t_terms.append(convolve_functions(B, hrfs[hrf_no](T),
convolution_interval,
hrf_interval,
convolution_dt))
contrast = np.zeros(n_hrfs)
contrast[hrf_no] = 1
c_t['{0}_{1:d}'.format(name, hrf_no)] = contrast
t_formula = Formula(t_terms)
tval = make_recarray(t, ['t'])
X_t = t_formula.design(tval, return_float=True)
return X_t, c_t | 3b8e30b5a08ba016c4ebd286b7735e6b5f7c18f9 | 3,635,980 |
def get_embedding(param_list, meta):
"""
Get the USE embeddings of the input text. (non-qa USE)
Param 1 - either string or list of strings
Return - Embeddings
"""
data = {
'op': 'encode',
'text': param_list[0]
}
ret = USE_ENCODER_API.post(data)
return ret['encoded'] | dbfe9cf039fbbe059cded8646479019f30d96c0b | 3,635,981 |
import json
def ticket(request, key):
""" 提供查询接口,让客户拿到 result key 之后查询用户的信息
"""
wxuser = ResultTicket.fetch_user(key)
if not wxuser:
return HttpResponse(status=404)
return HttpResponse(json.dumps(wxuser.serialize())) | ebe7c6511f8a95b2c6cdaaec7c080e8e8eb70c83 | 3,635,982 |
def is_sequence(arg):
"""Check if an object is iterable (you can loop over it) and not a string."""
return not hasattr(arg, "strip") and hasattr(arg, "__iter__") | 466154b8ef9d19b53744187d44dc6cd172a70f62 | 3,635,983 |
def _sig_figs(x):
""" Wrapper around `utils.sigFig` (n=3, tex=True) requiring only
argument for the purpose of easily "apply"-ing it to a pandas
dataframe.
"""
return numutils.sigFigs(x, n=3, tex=True) | b91ae5dca83d0b40362a7087dfda7788a59604a0 | 3,635,984 |
from typing import List
from typing import Optional
from typing import Tuple
def stackbar(
y: np.ndarray,
type_names: List[str],
title: str,
level_names: List[str],
figsize: Optional[Tuple[int, int]] = None,
dpi: Optional[int] = 100,
cmap: Optional[ListedColormap] = cm.tab20,
plot_legend: Optional[bool] = True,
) -> plt.Subplot:
"""
Plots a stacked barplot for one (discrete) covariate
Typical use (only inside stacked_barplot): plot_one_stackbar(data.X, data.var.index, "xyz", data.obs.index)
Parameters
----------
y
The count data, collapsed onto the level of interest. i.e. a binary covariate has two rows, one for each group, containing the count
mean of each cell type
type_names
The names of all cell types
title
Plot title, usually the covariate's name
level_names
names of the covariate's levels
figsize
figure size
dpi
dpi setting
cmap
The color map for the barplot
plot_legend
If True, adds a legend
Returns
-------
Returns a plot
ax
a plot
"""
n_bars, n_types = y.shape
figsize = rcParams["figure.figsize"] if figsize is None else figsize
fig, ax = plt.subplots(figsize=figsize, dpi=dpi)
r = np.array(range(n_bars))
sample_sums = np.sum(y, axis=1)
barwidth = 0.85
cum_bars = np.zeros(n_bars)
for n in range(n_types):
bars = [i / j * 100 for i, j in zip([y[k][n] for k in range(n_bars)], sample_sums)]
plt.bar(r, bars, bottom=cum_bars, color=cmap(n % cmap.N), width=barwidth, label=type_names[n], linewidth=0)
cum_bars += bars
ax.set_title(title)
if plot_legend:
ax.legend(loc='upper left', bbox_to_anchor=(1, 1), ncol=1)
ax.set_xticks(r)
ax.set_xticklabels(level_names, rotation=45)
ax.set_ylabel("Proportion")
return ax | 328de6e4f91d3e2014432e20e476cd8833ed3f4c | 3,635,985 |
def get_mempool_transaction_ids():
"""
Request the full list of transactions IDs currently in the mempool,
as an array
:return list: a list of transaction IDs
"""
resource = 'mempool/txids'
return call_api(resource) | 770039eacf42fe6bc14c3571a5b907f7e9d02f3a | 3,635,986 |
def zsplits(ds, flds=['density'], npts=21):
"""Return a lineout of a yt dataset, along z, through X=Y=0 but averaged over X,Y within a cube edge length
Splits region into a series of cubes and takes their averages
Expects multiple fields; returns dict of field grid vectors.
Also, outputs YT arrays (keep units)
"""
# Define the centers
zmin = ds.domain_left_edge[2]
zmax = ds.domain_right_edge[2]
dz = (zmax - zmin)/npts
zgv = np.linspace(zmin + dz, zmax - dz, npts)
fldgv = {}
for i in range(npts):
# For each zvalue, define a cube centered on (X, Y, Z) = [0, 0, zcent] and take its average
cent = ds.arr([ds.domain_center[0].v, ds.domain_center[1].v, zgv[i].v], 'code_length') # Typically, centered at (0, 0, zcent)
le = ds.arr([ds.domain_left_edge[0].v, ds.domain_left_edge[1].v, (zgv[i] - dz/2.0).v], 'code_length') # Sub-region left edge
re = ds.arr([ds.domain_right_edge[0].v, ds.domain_right_edge[1].v, (zgv[i] + dz/2.0).v], 'code_length') # Sub-region right edge
reg = ds.region(cent, le, re) # Sub-region rectangular prism
for fld in flds:
myval = reg.mean(fld)
if i < 1: # First iteration, allocate arrays
fldgv[fld] = ds.arr(np.zeros([npts]), myval.units)
fldgv[fld][i] = myval
reg.clear_data() # Free up memory after each field; probably good practice given high-res 3D datasets
return zgv, fldgv | 4a3c4d004c5b84f9996c2e7702468a8ca63b1a62 | 3,635,987 |
def get_ma15(ticker):
"""15일 이동 평균선 조회"""
df = pyupbit.get_ohlcv(ticker, interval="day", count=15)
ma15 = df['close'].rolling(15).mean().iloc[-1]
return ma15 | c9ce795bc2d71a427d14d2123b3c99f51fd3ba82 | 3,635,988 |
import os
def bootstrap(config_env=None):
"""Build, configure, and return a WSGI application using default
settings from the avalon.settings module and optionally from the file
referenced by the environmental variable.
:return: Fully configured WSGI application
:rtype: flask.Flask
"""
# Note that we don't attempt to catch any potential exceptions during
# bootstrap. Instead, we just let them bubble up and blow up whatever
# context the application is being started in.
app = _load_application(config_env)
# Make sure to access the Flask application logger before trying to
# configure it since Flask will remove all currently installed handlers
# when initializing it. https://github.com/mitsuhiko/flask/issues/641
log = app.logger
avalon.app.factory.configure_logger(log, app.config)
if config_env is not None:
log.info(
"Attempted to load config from var %s (%s)",
config_env, os.getenv(config_env))
# Register a Sentry client for log messages at ERROR or higher
# if the client is installed and configured, otherwise this has
# no effect.
avalon.app.factory.configure_sentry_logger(log, app.config)
# Get a StatsClient instance if installed and update the singleton
# metrics bridge instance with it. This allows decorators executed
# before the client is bootstrapped to talk to it once it's ready.
stats_client = avalon.app.factory.new_stats_client(log, app.config)
avalon.metrics.bridge.client = stats_client
log.info("Connecting to database")
database = avalon.app.factory.new_db_engine(app.config)
database.connect()
dao = avalon.app.factory.new_dao(database)
id_cache = avalon.app.factory.new_id_cache(dao)
log.info("Building in-memory stores")
controller = avalon.app.factory.new_controller(dao, id_cache)
controller.reload()
app.json_decoder = avalon.web.response.AvalonJsonDecoder
app.json_encoder = avalon.web.response.AvalonJsonEncoder
request_path = app.config['REQUEST_PATH']
path_resolver = _EndpointPathResolver(request_path)
app.add_url_rule(path_resolver('version'), view_func=controller.get_version)
app.add_url_rule(path_resolver('heartbeat'), view_func=controller.get_heartbeat)
app.add_url_rule(path_resolver('albums'), view_func=controller.get_albums)
app.add_url_rule(path_resolver('artists'), view_func=controller.get_artists)
app.add_url_rule(path_resolver('genres'), view_func=controller.get_genres)
app.add_url_rule(path_resolver('songs'), view_func=controller.get_songs)
# Catch-all for any unexpected errors that ensures we still render
# a JSON payload in the same format the client is expecting while
# also logging the exception.
app.register_error_handler(Exception, controller.handle_unknown_error)
log.info(
"Avalon Music Server %s running with request path %s as %s:%s "
"using %s MB memory", avalon.__version__, request_path,
avalon.util.get_current_uname(), avalon.util.get_current_gname(),
avalon.util.get_mem_usage())
return app | 2a565183fbda863d55f6813ff7dda07373df14df | 3,635,989 |
def urls(self, key, value):
"""Translates urls field."""
sub_y = clean_val("y", value, str, default="")
sub_u = clean_val("u", value, str, req=True)
_migration = self["_migration"]
volume_info = extract_volume_info(sub_y) if sub_y else None
if volume_info:
# url for a specific volume
# TODO?
description = volume_info["description"]
volume_number = volume_info["volume"]
if description != "ebook":
raise UnexpectedValue(subfield="y", message=" unsupported value")
volume_obj = {
"url": sub_u,
"description": description,
}
_insert_volume(_migration, volume_info["volume"], volume_obj)
raise IgnoreKey("urls")
else:
return urls_base(self, key, value) | c9954821bad084dc3d3886d67b205a2cbbea5014 | 3,635,990 |
def distance_at_t(points, t):
"""
Determine the sum of all the distances of the Points at time t using the easy-to-calculate Manhattan metric.
We could use the Euclidean metric but the extra computation is entirely unnecessary.
:param points: the list of Points
:param t: the time t
:return: the sum of the distances between all pairs of points for time t
"""
positions_t = positions_at_t(points, t)
return sum([abs(p0[0] - p1[0]) + abs(p0[1] + p1[1]) for p0 in positions_t for p1 in positions_t]) | f113622e0b655515686e81895633adfbc0617612 | 3,635,991 |
def fixed_from_persian(p_date):
"""Return fixed date of Astronomical Persian date, p_date."""
month = standard_month(p_date)
day = standard_day(p_date)
year = standard_year(p_date)
temp = (year - 1) if (0 < year) else year
new_year = persian_new_year_on_or_before(PERSIAN_EPOCH + 180 +
ifloor(MEAN_TROPICAL_YEAR * temp))
return ((new_year - 1) +
((31 * (month - 1)) if (month <= 7) else (30 * (month - 1) + 6)) +
day) | fcd5fa44cae344658cf0377642e1f20bd1199c38 | 3,635,992 |
def fix_stddev_function_name(self, compiler, connection):
"""
Fix function names to 'STDEV' or 'STDEVP' as used by mssql
"""
function = 'STDEV'
if self.function == 'STDDEV_POP':
function = 'STDEVP'
return self.as_sql(compiler, connection, function=function) | b1fa48801fb397590ad5fb249d928906e7c21c8a | 3,635,993 |
import functools
import inspect
def argdispatch(argument=None):
""" Type dispatch decorator that allows dispatching on a custom argument.
Parameters
----------
argument : str
The symbolic name of the argument to be considered for type dispatching.
Defaults to ``None``. When ``None``, the decorator acts exactly like the
standard ``functools.singledispatch``.
Returns
-------
callable
The dispatch closure.
"""
# Define dispatch argument:
dispatch_arg_name = argument
def dispatch_decorator(func):
"""Dispatch closure decorator."""
# Apply std decorator:
dispatcher = functools.singledispatch(func)
# Cache wrapped signature:
wrapped_signature = inspect.signature(func)
# Check argument correctness
if dispatch_arg_name is not None and \
dispatch_arg_name not in wrapped_signature.parameters:
raise ValueError('unknown dispatch argument specified')
def wrapper(*args, **kwargs):
"""Dispatch function wrapper."""
if dispatch_arg_name is None:
discriminator = args[0].__class__ # mimic functools.singledispatch behaviour
else:
bound_args = wrapped_signature.bind(*args, **kwargs).arguments
if dispatch_arg_name not in bound_args:
# ...with the new register this should be dead code.
raise TypeError('registered method mismatch')
discriminator = bound_args[dispatch_arg_name].__class__
return dispatcher.dispatch(discriminator)(*args, **kwargs)
def register(cls, reg_func=None):
""" Registration method replacement.
Ensures that situations like the following never happen:
>>> @argdispatch('c')
... def test(a, obj, b=None, c=None):
... pass
...
>>> @test.register(int)
... def _(a, obj):
... pass
>>>
>>> test(1, 2) # ----> TypeError
"""
if reg_func is not None:
# Check signature match:
reg_sig = inspect.signature(reg_func)
if reg_sig != wrapped_signature:
raise TypeError('registered method signature mismatch')
return dispatcher.register(cls, reg_func)
wrapper.register = register
functools.update_wrapper(wrapper, func)
return wrapper
return dispatch_decorator | 70f2680c92dd2f2efedfb5a78cef25ad66c35f7d | 3,635,994 |
def calc_intersections(cost, weights):
"""
---------------------------------------------------------------
|Function unused in this code but remains for debugging issues|
---------------------------------------------------------------
Calculates for which layer weights the cost function value is larger for the zero-centroid w_0 than for the
negative and positive centroid w_n and w_p, i.e. where the algorithm would not assign a given weight to w_0.
Parameters:
-----------
weights:
Full precision weights of the given layer
cost:
Cost list includes the distance of all weights to all centroids added to the information content of all
centroids, i.e. -log2(probability_centroid_assignment). For the ternary net the list has 3 subspaces.
Returns:
--------
n_intersection:
Full precision weights that would be assigned to w_n (for a given Lambda)
p_intersection:
Full precision weights that would be assigned to w_p (for a given Lambda)
"""
# Where the cost of w_0 is greater than the cost of w_n, i.e. assignment to w_n
n_intersection = cost[1].gt(cost[0]).float()
# Where the cost of w_0 is greater than the cost of w_p, i.e. assignment to w_p
p_intersection = cost[1].gt(cost[2]).float()
return n_intersection * weights, p_intersection * weights | bec9e3d3bca2ff9dc37fb3f1d9c98e12ec677dd6 | 3,635,995 |
def yaepblur(stream: Stream, *args, **kwargs) -> FilterableStream:
"""https://ffmpeg.org/ffmpeg-filters.html#yaepblur"""
return filter(stream, yaepblur.__name__, *args, **kwargs) | 379cee07637cbfb7aef6ec67fbca5b97477d6936 | 3,635,996 |
import transformers
import torch
def prepare_ml_span_inputs(model: transformers.EncoderDecoderModel, tokenizer: transformers.T5TokenizerFast,
encoder_input_attention: InputAndAttention, context: Tensor, chunk_size: int, device: int,
trim_context: int):
"""Prepare the inputs for calculating the most likely span. Do a forward pass on the encoder using the input and
expand the context (passage) to a matrix holding all its suffices, to later be used as contextualization for all
passage suffixes. Prepare the matching nll scores mask, see the "prepare_nlls_infs" method for elaboration.
Return "encoder_outputs" as the encoder last hidden state duplicated to the chunk size shape. If the total context
size is not divisible in the context size the returned "encoder_last_hidden_state" can be used later.
:param model: The encoder-decoder model, used for performing an encoder forward pass once for every
:param tokenizer: The tokenizer at use.
:param encoder_input_attention: dataclass holding the input tokens and attention mask.
:param context: The passage to be used for finding most likely span from.
:param chunk_size: Go over the context (passage) suffixes in chunks to avoid memory issues.
:param device: gpu index to be used (only single device supported).
:param trim_context: Only allow answers of this size to be extracted; larger chunks can be used hence improved
performace on the expanse of ignoring longer answers.
:return: a tuple of (expanded_context, nlls_infs, encoder_outputs, encoder_last_hidden_state, attention_mask)
"""
chunk_size = min(chunk_size, context.shape[0]-1)
bos_token_id = tokenizer.additional_special_tokens_ids[0]
expanded_context = expand_context(context, bos_token_id, device)
nlls_infs = prepare_nlls_infs(expanded_context, device)
expanded_context = expanded_context[:, :trim_context]
nlls_infs = nlls_infs[:, :trim_context]
input_ids = encoder_input_attention.input_ids.view(1, -1)
attention_mask = encoder_input_attention.attention_mask.view(1, -1)
encoder_last_hidden_state = None
while encoder_last_hidden_state is None and input_ids.shape[-1] > 0:
try:
encoder_last_hidden_state = model.encoder.forward(input_ids=input_ids,
attention_mask=attention_mask).last_hidden_state
except RuntimeError as e:
print(e)
print(f'input of size {input_ids.shape} failed to pass encoder, reducing to {input_ids[:, 10:].shape}')
input_ids = input_ids[:, 10:]
encoder_outputs = (None, torch.cat([encoder_last_hidden_state] * chunk_size), None)
return expanded_context, nlls_infs, encoder_outputs, encoder_last_hidden_state, attention_mask | 6ed0669d588d7b03997201a25ca0b226358233d5 | 3,635,997 |
def toolButton(pixmap='', orientation=0, size=None):
""" toolbutton function with image
:param pixmap: location of the image
:type pixmap: string
:param orientation: rotation in degrees clockwise
:type orientation: int
:param size: height and width of image in pixels
:type size: int
:return: the button
:rtype: QToolButton
"""
btn = QToolButton()
if isinstance(pixmap, str):
pixmap = QPixmap(pixmap)
if orientation != 0 and not _isSVG:
transform = QTransform().rotate(orientation, Qt.ZAxis)
pixmap = pixmap.transformed(transform, Qt.SmoothTransformation)
btn.setIcon(QIcon(pixmap))
btn.setFocusPolicy(Qt.NoFocus)
btn.setStyleSheet('border: 0px;')
if size is not None:
if type(size) == int:
btn.setFixedSize(QSize(size, size))
btn.setIconSize(QSize(size, size))
else:
btn.setFixedSize(size)
btn.setIconSize(size)
return btn | e07f33a19d67d393e926e2d5d50d91734539df94 | 3,635,998 |
def process_hub_timeout(bit):
"""Return the HUB timeout."""
if bit == '1':
return '5 Seconds'
return '2 Seconds' | 64f11056a341d64d670e2e8c918a796c954854a1 | 3,635,999 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.