content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
def GSSO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2.0.5", **kwargs
) -> Graph:
"""Return GSSO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2.0.5"
Version to retrieve
The available versions are:
- 2.0.5
"""
return AutomaticallyRetrievedGraph(
"GSSO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)() | ac5722afae3bb28321aa9d465873b12852b1f2f6 | 28,600 |
from typing import Optional
from typing import Union
from typing import Sequence
from typing import Dict
def tb(
data: pd.DataFrame,
columns: Optional[Union[Sequence[str], pd.Index]] = None,
subscales: Optional[Dict[str, Sequence[int]]] = None,
) -> pd.DataFrame:
"""Compute the **Technology Commitment Questionnaire (TB – Technologiebereitschaft)**.
It consists of the subscales with the item indices (count-by-one, i.e., the first question has the index 1!):
* Technology Acceptance (Technikakzeptanz – ``TechAcc``): [1, 2, 3, 4]
* Technology Competence Beliefs (Technikkompetenzüberzeugungen – ``TechComp``): [5, 6, 7, 8]
* Technology Control Beliefs (Technikkontrollüberzeugungen – ``TechControl``): [9, 10, 11, 12]
.. note::
This implementation assumes a score range of [1, 5].
Use :func:`~biopsykit.questionnaires.utils.convert_scale()` to convert the items into the correct range
beforehand.
.. warning::
Column indices in ``subscales`` are assumed to start at 1 (instead of 0) to avoid confusion with
questionnaire item columns, which typically also start with index 1!
Parameters
----------
data : :class:`~pandas.DataFrame`
dataframe containing questionnaire data. Can either be only the relevant columns for computing this score or
a complete dataframe if ``columns`` parameter is supplied.
columns : list of str or :class:`pandas.Index`, optional
list with column names in correct order.
This can be used if columns in the dataframe are not in the correct order or if a complete dataframe is
passed as ``data``.
subscales : dict, optional
A dictionary with subscale names (keys) and column names or column indices (count-by-1) (values)
if only specific subscales should be computed.
Returns
-------
:class:`~pandas.DataFrame`
TB score
Raises
------
ValueError
if ``subscales`` is supplied and dict values are something else than a list of strings or a list of ints
:exc:`~biopsykit.utils.exceptions.ValidationError`
if number of columns does not match
:exc:`~biopsykit.utils.exceptions.ValueRangeError`
if values are not within the required score range
References
----------
Neyer, F. J. J., Felber, J., & Gebhardt, C. (2016). Kurzskala. Technikbereitschaft (TB)[Technology commitment].
In *ZIS-Zusammenstellung sozialwissenschaftlicher Items und Skalen (ed.)*.
"""
score_name = "TB"
score_range = [1, 5]
# create copy of data
data = data.copy()
if columns is not None:
# if columns parameter is supplied: slice columns from dataframe
_assert_has_columns(data, [columns])
data = data.loc[:, columns]
if subscales is None:
_assert_num_columns(data, 12)
subscales = {"TechAcc": [1, 2, 3, 4], "TechComp": [5, 6, 7, 8], "TechControl": [9, 10, 11, 12]}
_assert_value_range(data, score_range)
# Reverse scores 5, 6, 7, 8
# (numbers in the dictionary correspond to the *positions* of the items to be reversed in the item list specified
# by the subscale dict)
data = _invert_subscales(data, subscales=subscales, idx_dict={"TechComp": [0, 1, 2, 3]}, score_range=score_range)
tb_data = _compute_questionnaire_subscales(data, score_name, subscales)
tb_data = pd.DataFrame(tb_data, index=data.index)
if len(data.columns) == 12:
# compute total score if all columns are present
tb_data[score_name] = tb_data.sum(axis=1)
return tb_data | ac92f6ed7dd484e076b80db32fff6bc9fdd64619 | 28,601 |
def format_date(date: str):
"""
This function formats dates that are in MM-DD-YYYY format,
and will convert to YYYY-MM-DD, which is required sqlite.
:param date: The date to modify.
:return: The modified string.
"""
tmp = date.split("/")
return "{}-{}-{}".format(tmp[2], tmp[0], tmp[1]) | f1a0149bfd96db557c49becdedb84789daa1168c | 28,602 |
def _doUpgradeApply(sUpgradeDir, asMembers):
"""
# Apply the directories and files from the upgrade.
returns True/False/Exception.
"""
#
# Create directories first since that's least intrusive.
#
for sMember in asMembers:
if sMember[-1] == '/':
sMember = sMember[len('testboxscript/'):];
if sMember != '':
sFull = os.path.join(g_ksValidationKitDir, sMember);
if not os.path.isdir(sFull):
os.makedirs(sFull, 0755);
#
# Move the files into place.
#
fRc = True;
asOldFiles = [];
for sMember in asMembers:
if sMember[-1] != '/':
sSrc = os.path.join(sUpgradeDir, sMember);
sDst = os.path.join(g_ksValidationKitDir, sMember[len('testboxscript/'):]);
# Move the old file out of the way first.
sDstRm = None;
if os.path.exists(sDst):
testboxcommons.log2('Info: Installing "%s"' % (sDst,));
sDstRm = '%s-delete-me-%s' % (sDst, uuid.uuid4(),);
try:
os.rename(sDst, sDstRm);
except Exception, oXcpt:
testboxcommons.log('Error: failed to rename (old) "%s" to "%s": %s' % (sDst, sDstRm, oXcpt));
try:
shutil.copy(sDst, sDstRm);
except Exception, oXcpt:
testboxcommons.log('Error: failed to copy (old) "%s" to "%s": %s' % (sDst, sDstRm, oXcpt));
break;
try:
os.unlink(sDst);
except Exception, oXcpt:
testboxcommons.log('Error: failed to unlink (old) "%s": %s' % (sDst, oXcpt));
break;
# Move/copy the new one into place.
testboxcommons.log2('Info: Installing "%s"' % (sDst,));
try:
os.rename(sSrc, sDst);
except Exception, oXcpt:
testboxcommons.log('Warning: failed to rename (new) "%s" to "%s": %s' % (sSrc, sDst, oXcpt));
try:
shutil.copy(sSrc, sDst);
except:
testboxcommons.log('Error: failed to copy (new) "%s" to "%s": %s' % (sSrc, sDst, oXcpt));
fRc = False;
break;
#
# Roll back on failure.
#
if fRc is not True:
testboxcommons.log('Attempting to roll back old files...');
for sDstRm in asOldFiles:
sDst = sDstRm[:sDstRm.rfind('-delete-me')];
testboxcommons.log2('Info: Rolling back "%s" (%s)' % (sDst, os.path.basename(sDstRm)));
try:
shutil.move(sDstRm, sDst);
except:
testboxcommons.log('Error: failed to rollback "%s" onto "%s": %s' % (sDstRm, sDst, oXcpt));
return False;
return True; | a181e710db010733828099a881ee7239f90674a7 | 28,603 |
def _parse_atat_lattice(lattice_in):
"""Parse an ATAT-style `lat.in` string.
The parsed string will be in three groups: (Coordinate system) (lattice) (atoms)
where the atom group is split up into subgroups, each describing the position and atom name
"""
float_number = Regex(r'[-+]?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?').setParseAction(lambda t: [float(t[0])])
vector = Group(float_number + float_number + float_number)
angles = vector
vector_line = vector + Suppress(LineEnd())
coord_sys = Group((vector_line + vector_line + vector_line) | (vector + angles + Suppress(LineEnd())))
lattice = Group(vector + vector + vector)
atom = Group(vector + Group(OneOrMore(Word(alphas, alphanums + '_'))))
atat_lattice_grammer = coord_sys + lattice + Group(OneOrMore(atom))
# parse the input string and convert it to a POSCAR string
return atat_lattice_grammer.parseString(lattice_in) | 4cb40f7c25519bc300e389d0a2d72383dda3c7f0 | 28,604 |
from typing import Union
def datetime_attribute_timeseries(time_index: Union[pd.DatetimeIndex, TimeSeries],
attribute: str,
one_hot: bool = False) -> TimeSeries:
"""
Returns a new TimeSeries with index `time_index` and one or more dimensions containing
(optionally one-hot encoded) pd.DatatimeIndex attribute information derived from the index.
Parameters
----------
time_index
Either a `pd.DatetimeIndex` attribute which will serve as the basis of the new column(s), or
a `TimeSeries` whose time axis will serve this purpose.
attribute
An attribute of `pd.DatetimeIndex` - e.g. "month", "weekday", "day", "hour", "minute", "second"
one_hot
Boolean value indicating whether to add the specified attribute as a one hot encoding
(results in more columns).
Returns
-------
TimeSeries
New datetime attribute TimeSeries instance.
"""
if isinstance(time_index, TimeSeries):
time_index = time_index.time_index()
raise_if_not(hasattr(pd.DatetimeIndex, attribute), '"attribute" needs to be an attribute '
'of pd.DatetimeIndex', logger)
num_values_dict = {
'month': 12,
'day': 31,
'weekday': 7,
'hour': 24,
'quarter': 4
}
values = getattr(time_index, attribute)
if one_hot:
raise_if_not(attribute in num_values_dict, "Given datetime attribute not supported"
"with one-hot encoding.", logger)
values_df = pd.get_dummies(values)
# fill missing columns (in case not all values appear in time_index)
for i in range(1, num_values_dict[attribute] + 1):
if not (i in values_df.columns):
values_df[i] = 0
values_df = values_df[range(1, num_values_dict[attribute] + 1)]
else:
values_df = pd.DataFrame(values)
values_df.index = time_index
if one_hot:
values_df.columns = [attribute + '_' + str(column_name) for column_name in values_df.columns]
return TimeSeries(values_df) | 9330f22d7b81aaeb57130563a3f32009e48e3fe0 | 28,605 |
def hexscale_from_cmap(cmap, N):
"""
Evaluate a colormap at N points.
Parameters
----------
cmap : function
a function taking a scalar value between 0 and 1 and giving a color as
rgb(a) with values between 0 and 1. These are for example the pyplot
colormaps, like plt.cm.viridis
N : int
number of steps on which to evaluate the colormap
Returns
-------
scale : a list of numbers representing the colors from the map, written in
the format 0xrrggbb
"""
rgb = [(round(255*col[0]), round(255*col[1]), round(255*col[2])) for col in map(cmap, np.arange(N)/(N-1))]
return [0x010000*col[0] + 0x000100*col[1] + 0x000001*col[2] for col in rgb] | 0c26f7b404ac3643317db81eacac83d0d62e5f80 | 28,606 |
def getFirebaseData(userID):
"""
This gets the data from the Firebase database and converts it to a readable dictionary
Args:
userID (string): the id of the user
"""
cred = credentials.Certificate("serviceAccountKey.json")
a = firebase_admin.initialize_app(cred)
ourDatabase = firestore.client()
collection = ourDatabase.collection('pillboxes')
doc = collection.document(userID)
userInfo = doc.get().to_dict()
userAlarms = userInfo['alarms']
firebase_admin.delete_app(a)
return userAlarms | 4da109004771908009ed431ad69a25ac52d0969c | 28,607 |
def gen_tracer(code, f_globals):
""" Generate a trace function from a code object.
Parameters
----------
code : CodeType
The code object created by the Enaml compiler.
f_globals : dict
The global scope for the returned function.
Returns
-------
result : FunctionType
A new function with optimized local variable access
and instrumentation for invoking a code tracer.
"""
bc_code = bc.Bytecode.from_code(code)
optimize_locals(bc_code)
bc_code = inject_tracing(bc_code)
bc_code.flags ^= (bc_code.flags & bc.CompilerFlags.NEWLOCALS)
bc_code.argnames = ['_[tracer]'] + bc_code.argnames
bc_code.argcount += 1
new_code = bc_code.to_code()
return FunctionType(new_code, f_globals) | abb9e043ad12f4ced014883b6aee230095b63a18 | 28,608 |
def MAD(AnalogSignal):
""" median absolute deviation of an AnalogSignal """
X = AnalogSignal.magnitude
mad = sp.median(sp.absolute(X - sp.median(X))) * AnalogSignal.units
return mad | 069231a87e755de4bff6541560c0e5beabc91e0d | 28,609 |
def angular_frequency(vacuum_wavelength):
"""Angular frequency :math:`\omega = 2\pi c / \lambda`
Args:
vacuum_wavelength (float): Vacuum wavelength in length unit
Returns:
Angular frequency in the units of c=1 (time units=length units). This is at the same time the vacuum wavenumber.
"""
return 2 * np.pi / vacuum_wavelength | 305349cff0d7b9489d92eb301c6d058ca11467f0 | 28,610 |
def web_urls():
"""Builds and returns the web_urls for web.py.
"""
urls = (
'/export/?', RestHandler,
'/export/bdbag/?', ExportBag,
'/export/bdbag/([^/]+)', ExportRetrieve,
'/export/bdbag/([^/]+)/(.+)', ExportRetrieve,
'/export/file/?', ExportFiles,
'/export/file/([^/]+)', ExportRetrieve,
'/export/file/([^/]+)/(.+)', ExportRetrieve,
)
return tuple(urls) | d2580499a7b4bad8c94494fd103a2fe0f6d607d6 | 28,611 |
import torch
def box_cxcywh_norm_to_cxcywh(box: TensorOrArray, height: int, width: int) -> TensorOrArray:
"""Converts bounding boxes from (cx, cy, w, h) norm format to (cx, cy, w, h)
format.
(cx, cy) refers to center of bounding box.
(a, r) refers to area (width * height) and aspect ratio (width / height) of
bounding box.
(w, h) refers to width and height of bounding box.
_norm refers to normalized value in the range `[0.0, 1.0]`. For example:
`x_norm = absolute_x / image_width`
`height_norm = absolute_height / image_height`.
Args:
box (TensorOrArray[*, 4]):
Boxes in (cx, cy, w, h) norm format which will be converted.
height (int):
Height of the image.
width (int):
Width of the image.
Returns:
box (TensorOrArray[*, 4]):
Boxes in (cx, cy, w, h) format.
"""
box = upcast(box)
cx_norm, cy_norm, w_norm, h_norm, *_ = box.T
cx = cx_norm * width
cy = cy_norm * height
w = w_norm * width
h = h_norm * height
if isinstance(box, Tensor):
return torch.stack((cx, cy, w, h), -1)
elif isinstance(box, np.ndarray):
return np.stack((cx, cy, w, h), -1)
else:
raise ValueError(f"box must be a `Tensor` or `np.ndarray`.") | 7cf112b7f3420161513e4b70ef531fb586074431 | 28,612 |
def lyrics_from_url(url):
"""Return a tuple with song's name, author and lyrics."""
source = identify_url(url)
extractor = {
'letras': (lyrics_from_letrasmus, info_from_letrasmus),
'vagalume': (lyrics_from_vagalume, info_from_vagalume)
}
html = html_from_url(url)
if source in extractor:
info = extractor[source][1](html)
return (info[0], info[1], extractor[source][0](html))
raise Exception("Unknow url's source.") | 2034c1ee26ce563f227f49de10b7e1b56092c7c8 | 28,613 |
import json
def _get_pycons():
"""Helper function that retrieves required PyCon data
and returns a list of PyCon objects
"""
with open(pycons_file, "r", encoding="utf-8") as f:
return [
PyCon(
pycon["name"],
pycon["city"],
pycon["country"],
parse(pycon["start_date"]),
parse(pycon["end_date"]),
pycon["url"],
)
for pycon in json.load(f)
] | 749947829d4c28b08f957505d8ede02fe8d5ecbb | 28,614 |
def f_function(chromosome):
"""Define Fitness Function Here."""
x = chromosome.convert_to_integer()
return (15 * x[0]) - (x[0] * x[0])
# return (((15 * x[0]) - (x[0] * x[0])) * -1) + 1000 To Find Minimum Solution | aee3744c63ada24302857ef4ddb4e6aff35fc69e | 28,615 |
def _GetRevsAroundRev(data_series, revision):
"""Gets a list of revisions from before to after a given revision.
Args:
data_series: A list of (revision, value).
revision: A revision number.
Returns:
A list of revisions.
"""
if not _MAX_SEGMENT_SIZE_AROUND_ANOMALY:
return [revision]
middle_index = 0
for i in xrange(len(data_series)):
if data_series[i][0] == revision:
middle_index = i
break
start_index = max(0, middle_index - _MAX_SEGMENT_SIZE_AROUND_ANOMALY)
end_index = middle_index + _MAX_SEGMENT_SIZE_AROUND_ANOMALY + 1
series_around_rev = data_series[start_index:end_index]
return [s[0] for s in series_around_rev] | 966e590f4cc1e017ed6d4588ca15655b9de61d7a | 28,616 |
def get_output_length():
"""Returns the length of the convnet output."""
return conv_base.layers[-1].output_shape[-1] | 6471f0b1331a97147be43b464c5fb5384e185980 | 28,617 |
def rules(command, working_directory=None, root=True, **kargs):
"""
Main entry point for build_rules.py.
When ``makeprojects``, ``cleanme``, or ``buildme`` is executed, they will
call this function to perform the actions required for build customization.
The parameter ``working_directory`` is required, and if it has no default
parameter, this function will only be called with the folder that this
file resides in. If there is a default parameter of ``None``, it will be
called with any folder that it is invoked on. If the default parameter is a
directory, this function will only be called if that directory is desired.
The optional parameter of ``root``` alerts the tool if subsequent processing
of other ``build_rules.py`` files are needed or if set to have a default
parameter of ``True``, processing will end once the calls to this
``rules()`` function are completed.
Commands are 'build', 'clean', 'prebuild', 'postbuild', 'project',
'configurations'
Arg:
command: Command to execute.
working_directory: Directory for this function to operate on.
root: If True, stop execution upon completion of this function
kargs: Extra arguments specific to each command.
Return:
Zero on no error or no action.
"""
if command == 'clean':
burger.clean_directories(
working_directory, ('bin', 'temp', 'obj', 'Properties', '.vs'))
burger.clean_files(working_directory, ('Key.snk', '*.user', '*.suo'))
return 0 | 9456822c0956fa847e19917b735d1a6680d0961a | 28,618 |
def get_py_func_body(line_numbers, file_name, annot):
""" Function to get method/function body from files
@parameters
filename: Path to the file
line_num: function/method line number
annot: Annotation condition (Ex: @Test)
@return
This function returns python function/method definitions in the given files"""
func_name = []
func_body = []
line_data = list([line.rstrip() for line
in open(file_name, encoding='utf-8', errors='ignore')]) # pragma: no mutate
data, data_func_name = process_py_methods(file_name, line_numbers, line_data)
if annot is not None:
data_func_name, data = get_py_annot_methods(file_name, data_func_name, data, annot)
if len(data_func_name).__trunc__() != 0:
func_name, func_body = process_py_func_body(data, data_func_name)
return func_name, func_body | c6324e13831008118a39a599cce8b9ec3513b0a1 | 28,619 |
from typing import Any
from typing import Union
def extract_optional_annotation(annotation: Any) -> Any:
"""
Determine if the given annotation is an Optional field
"""
if (
hasattr(annotation, "__origin__")
and annotation.__origin__ is Union
and getattr(annotation, "__args__", None) is not None
and len(annotation.__args__) == 2
and annotation.__args__[-1] is type(None)
):
return extract_inner_annotation(annotation)
return None | 024e28f88005b03e45b96c739a44bd56b2115849 | 28,620 |
import itertools
import json
def combine_pred_and_truth(prediction, truth_file):
"""
Combine the predicted labels and the ground truth labels for testing purposes.
:param prediction: The prediction labels.
:param truth_file: The ground truth file.
:return: The combined prediction and ground truth labels.
"""
f = open(truth_file, 'r')
fr = f.readlines()
prediction_and_truth = []
for pred_labels, truth_line in itertools.izip(prediction, fr):
instance = json.loads(truth_line.strip('\r\n'))
truth_labels = instance['tags']
prediction_and_truth.append([pred_labels[0], truth_labels])
return prediction_and_truth | d7dee4add59a4b3df7e0bd3a6e5fcc981ff23d59 | 28,621 |
def calc_cogs_time_series(days, cogs_annual):
"""
Cost of Goods Sold Formula
Notes
-----
Can adjust for days/weekly/monthly/annually in the future - ASSUMED: CONSUMABLES PURCHASED MONTHLY
"""
cogs_time_series = []
for i in range(days):
if i % DAYS_IN_MONTH == 0:
cogs_time_series.append(cogs_annual / YEARLY_TO_MONTHLY_31)
else:
cogs_time_series.append(0)
return cogs_time_series | b3efffc274676549f23f7a20321dd2aac02c1666 | 28,622 |
import os
def make_regridder_L2L(
llres_in, llres_out, weightsdir='.', reuse_weights=False,
in_extent=[-180, 180, -90, 90],
out_extent=[-180, 180, -90, 90]):
"""
Create an xESMF regridder between two lat/lon grids
Args:
llres_in: str
Resolution of input grid in format 'latxlon', e.g. '4x5'
llres_out: str
Resolution of output grid in format 'latxlon', e.g. '4x5'
Keyword Args (optional):
weightsdir: str
Directory in which to create xESMF regridder NetCDF files
Default value: '.'
reuse_weights: bool
Set this flag to True to reuse existing xESMF regridder NetCDF files
Default value: False
in_extent: list[float, float, float, float]
Describes minimum and maximum latitude and longitude of input grid
in the format [minlon, maxlon, minlat, maxlat]
Default value: [-180, 180, -90, 90]
out_extent: list[float, float, float, float]
Desired minimum and maximum latitude and longitude of output grid
in the format [minlon, maxlon, minlat, maxlat]
Default value: [-180, 180, -90, 90]
Returns:
regridder: xESMF regridder
regridder object between the two specified grids
"""
llgrid_in = make_grid_LL(llres_in, in_extent, out_extent)
llgrid_out = make_grid_LL(llres_out, out_extent)
if in_extent == [-180, 180, -90,
90] and out_extent == [-180, 180, -90, 90]:
weightsfile = os.path.join(
weightsdir, 'conservative_{}_{}.nc'.format(
llres_in, llres_out))
else:
in_extent_str = str(in_extent).replace(
'[', '').replace(
']', '').replace(
', ', 'x')
out_extent_str = str(out_extent).replace(
'[', '').replace(
']', '').replace(
', ', 'x')
weightsfile = os.path.join(
weightsdir, 'conservative_{}_{}_{}_{}.nc'.format(
llres_in, llres_out, in_extent_str, out_extent_str))
if not os.path.isfile(weightsfile) and reuse_weights:
#prevent error with more recent versions of xesmf
reuse_weights=False
try:
regridder = xe.Regridder(
llgrid_in,
llgrid_out,
method='conservative',
filename=weightsfile,
reuse_weights=reuse_weights)
except BaseException:
regridder = xe.Regridder(
llgrid_in,
llgrid_out,
method='conservative',
filename=weightsfile,
reuse_weights=reuse_weights)
return regridder | 164efa1493845e004d67f86f6e364ec77bcc1409 | 28,623 |
def stations():
""" Returning the all Stations """
station_list = session.query(station.name).all()
jsonify_sation = list(np.ravel(station_list))
#Jsonify results
return jsonify(jsonify_sation) | ce51f8551043d740657da7fc7d3f3d9afcead4d1 | 28,624 |
def test_run_sht_rudeadyet_default(tmpdir) -> int:
"""
Purpose:
Test to make sure sht-rudeadyet run works
Args:
N/A
Returns:
(Int): 0 if passed run, -1 if not
"""
attack = "sht_rudeadyet"
run_config = "configs/mw_locust-sht_rudeadyet.json"
return magicwand_run(tmpdir, attack, run_config) | 072a8608dc5ac8007e62e8babbd3047fbb8b8bce | 28,625 |
def _clean_markdown_cells(ntbk):
"""Clean up cell text of an nbformat NotebookNode."""
# Remove '#' from the end of markdown headers
for cell in ntbk.cells:
if cell.cell_type == "markdown":
cell_lines = cell.source.split('\n')
for ii, line in enumerate(cell_lines):
if line.startswith('#'):
cell_lines[ii] = line.rstrip('#').rstrip()
cell.source = '\n'.join(cell_lines)
return ntbk | 8b34ff6713a323340ea27f6d8f498a215ca9d98a | 28,626 |
import asyncio
def get_thread_wrapper(target, name):
"""Returns a target thread that prints unexpected exceptions to the logging.
Args:
target: Func or coroutine to wrap.
name(str): Task name.
"""
@wraps(target)
def wrapper(*args, **kwargs):
try:
result = target(*args, **kwargs)
except Exception:
logger.warning(f"Unexpected exception in Tamarco thread {name}", exc_info=True)
raise
else:
if is_awaitable(result):
thread_loop = asyncio.new_event_loop()
asyncio.set_event_loop(thread_loop)
coro = result
result = thread_loop.run_until_complete(observe_exceptions(coro, name))
return result
return wrapper | 75ddf5ca81825769e51fd8ed4f850ec9db18a31e | 28,627 |
from sys import path
def scale_pairs(arr, scalgo="deseq"):
"""Scales observed paired columns of read-overlap counts.
- arr(``path``) input array regions x (markX in A, markX in B, markY in A, markY in B ...)
- scalgo(``str``) scaling algorithm
"""
chk_exit(*inp_file(path(arr)))
with open(arr) as fh:
names = fh.readline().strip().split("\t")
raw = np.loadtxt(fh, delimiter="\t")
scaled = scapair(raw, scalgo)
ofn = arr.replace(".arr", "_%s.arr" % (scalgo,))
with open(ofn, "wb") as wh:
wh.write("\t".join(names) + "\n")
np.savetxt(wh, scaled, delimiter="\t")
log("saved: %s" % ofn)
return ofn | 99c6805d7c5b7fe171452a61a5013b9a5538b4de | 28,628 |
def getStructType(ea):
"""
Get type information from an ea. Used to get the structure type id
"""
flags = idaapi.getFlags(ea)
ti = idaapi.opinfo_t()
oi = idaapi.get_opinfo(ea, 0, flags, ti)
if oi is not None:
return ti
else:
return None | 3ed8a000405f87b0e069d165dd72215852d22bd5 | 28,629 |
def confirm_email(token):
"""
GET endpoint that confirms the new officer user. This endpoint link is normally within
the confirmation email.
"""
club_email = flask_exts.email_verifier.confirm_token(token, 'confirm-email')
if club_email is None:
raise JsonError(status='error', reason='The confirmation link is invalid.', status_=404)
potential_user = NewOfficerUser.objects(email=club_email).first()
if potential_user is None:
raise JsonError(status='error', reason='The user matching the email does not exist.', status_=404)
# First, revoke the given email token
flask_exts.email_verifier.revoke_token(token, 'confirm-email')
if potential_user.confirmed:
return redirect(LOGIN_URL + LOGIN_CONFIRMED_EXT)
confirmed_on = pst_right_now()
if confirmed_on - potential_user.registered_on > CurrentConfig.CONFIRM_EMAIL_EXPIRY:
raise JsonError(status='error', reason='The account associated with the email has expired. Please request for a new confirmation email by logging in.')
# Then, set the user and club to 'confirmed' if it's not done already
potential_user.confirmed = True
potential_user.confirmed_on = confirmed_on
potential_user.save()
return redirect(LOGIN_URL + LOGIN_CONFIRMED_EXT) | 2e8feb0607361ec0d53b3b62766a83f131ae75c6 | 28,630 |
def xcrun_field_value_from_output(field: str, output: str) -> str:
"""
Get value of a given field from xcrun output.
If field is not found empty string is returned.
"""
field_prefix = field + ': '
for line in output.splitlines():
line = line.strip()
if line.startswith(field_prefix):
return line[len(field_prefix):]
return '' | a99efe76e21239f6ba15b8e7fb12d04d57bfb4de | 28,631 |
def get_all_markets_num():
"""
获取交易所有的市场
:return: "5/2"
"""
markets = list(set([str(i["stockId"]) + "/" + str(i["moneyId"]) for i in res["result"]]))
return markets | 33d9e49aeaa6e6d81ec199f21d4f5e40cdd0fd48 | 28,632 |
def mae(s, o):
"""
Mean Absolute Error
input:
s: simulated
o: observed
output:
maes: mean absolute error
"""
s, o = filter_nan(s, o)
return np.mean(abs(s - o)) | 313d4605bb240d8f32bc13fc62ff2cf12e22cfd8 | 28,633 |
def error_mult_gap_qa_atom(
df_qc, df_qats, target_label, target_charge=0,
basis_set='aug-cc-pV5Z', use_ts=True,
max_qats_order=4, ignore_one_row=False,
considered_lambdas=None, return_qats_vs_qa=False):
"""Computes QATS errors in system multiplicity gaps.
Parameters
----------
df_qc : :obj:`pandas.DataFrame`
Quantum chemistry dataframe.
df_qats : :obj:`pandas.DataFrame`, optional
QATS dataframe.
target_label : :obj:`str`
Atoms in the system. For example, ``'f.h'``.
target_charge : :obj:`int`, optional
The system charge. Defaults to ``0``.
basis_set : :obj:`str`, optional
Specifies the basis set to use for predictions. Defaults to
``'aug-cc-pV5Z'``.
use_ts : :obj:`bool`, optional
Use a Taylor series approximation to make QATS-n predictions
(where n is the order). Defaults to ``True``.
max_qats_order : :obj:`int`, optional
Maximum order to use for the Taylor series. Defaults to ``4``.
ignore_one_row : :obj:`bool`, optional
Used to control errors in ``state_selection`` when there is missing
data (i.e., just one state). If ``True``, no errors are raised. Defaults
to ``False``.
considered_lambdas : :obj:`list`, optional
Allows specification of lambda values that will be considered. ``None``
will allow all lambdas to be valid, ``[1, -1]`` would only report
predictions using references using a lambda of ``1`` or ``-1``.
return_qats_vs_qa : :obj:`bool`, optional
Return the difference of QATS-n - QATS predictions; i.e., the error of
using a Taylor series approximation with repsect to the alchemical
potential energy surface. Defaults to ``False``.
Returns
-------
:obj:`pandas.DataFrame`
"""
if len(df_qc.iloc[0]['atomic_numbers']) == 2:
raise ValueError('Dimers are not supported.')
qc_prediction = hartree_to_ev(
mult_gap_qc_atom(
df_qc, target_label, target_charge=target_charge,
basis_set=basis_set, ignore_one_row=ignore_one_row
)
)
qats_predictions = mult_gap_qa_atom(
df_qc, df_qats, target_label, target_charge=target_charge,
basis_set=basis_set, use_ts=use_ts, ignore_one_row=ignore_one_row,
considered_lambdas=considered_lambdas,
return_qats_vs_qa=return_qats_vs_qa
)
qats_predictions = {key:hartree_to_ev(value) for (key,value) in qats_predictions.items()} # Converts to eV
if use_ts:
qats_predictions = pd.DataFrame(
qats_predictions, index=[f'QATS-{i}' for i in range(max_qats_order+1)]
) # Makes dataframe
else:
qats_predictions = pd.DataFrame(
qats_predictions, index=['QATS']
) # Makes dataframe
if return_qats_vs_qa:
return qats_predictions
else:
qats_errors = qats_predictions.transform(lambda x: x - qc_prediction)
return qats_errors | 3facdc35f0994eb21a74cf0b1fb277db2a70a14b | 28,634 |
def frequency_weighting(frequencies, kind="A", **kw):
"""Compute the weighting of a set of frequencies.
Parameters
----------
frequencies : scalar or np.ndarray [shape=(n,)]
One or more frequencies (in Hz)
kind : str in
The weighting kind. e.g. `'A'`, `'B'`, `'C'`, `'D'`, `'Z'`
min_db : float [scalar] or None
Clip weights below this threshold.
If `None`, no clipping is performed.
Returns
-------
weighting : scalar or np.ndarray [shape=(n,)]
``weighting[i]`` is the weighting of ``frequencies[i]``
See Also
--------
perceptual_weighting
multi_frequency_weighting
A_weighting
B_weighting
C_weighting
D_weighting
Examples
--------
Get the A-weighting for CQT frequencies
>>> import matplotlib.pyplot as plt
>>> freqs = librosa.cqt_frequencies(108, librosa.note_to_hz('C1'))
>>> weights = librosa.frequency_weighting(freqs, 'A')
>>> fig, ax = plt.subplots()
>>> ax.plot(freqs, weights)
>>> ax.set(xlabel='Frequency (Hz)', ylabel='Weighting (log10)',
... title='A-Weighting of CQT frequencies')
"""
if isinstance(kind, str):
kind = kind.upper()
return WEIGHTING_FUNCTIONS[kind](frequencies, **kw) | aa93e01ce17b4f3c15ad8ab95ca358e5b4500b37 | 28,635 |
async def materialize_classpath(request: MaterializedClasspathRequest) -> MaterializedClasspath:
"""Resolve, fetch, and merge various classpath types to a single `Digest` and metadata."""
artifact_requirements_lockfiles = await MultiGet(
Get(CoursierResolvedLockfile, ArtifactRequirements, artifact_requirements)
for artifact_requirements in request.artifact_requirements
)
lockfile_and_requirements_classpath_entries = await MultiGet(
Get(
ResolvedClasspathEntries,
CoursierResolvedLockfile,
lockfile,
)
for lockfile in (*request.lockfiles, *artifact_requirements_lockfiles)
)
merged_snapshot = await Get(
Snapshot,
MergeDigests(
classpath_entry.digest
for classpath_entries in lockfile_and_requirements_classpath_entries
for classpath_entry in classpath_entries
),
)
if request.prefix is not None:
merged_snapshot = await Get(Snapshot, AddPrefix(merged_snapshot.digest, request.prefix))
return MaterializedClasspath(content=merged_snapshot) | 367a0a49acfb16e3d7c0c0f1034ef946e75928a8 | 28,636 |
def Real_Entropy(timeseries):
""" Calculates an approximation of the time-correlated entropy
Input:
timeseries: list of strings or numbers,
e.g. ['1', '2', '3'] or [1, 2, 3]
Output:
approximation of Real Entropy (time-correlated entropy), e.g. 1.09
"""
def is_sublist(alist, asublist):
""" Turns string lists into strings and checks if the sublist is in the list
Input:
list_ : list of strings, e.g. ['1', '2', '3']
sublist : list of strings, ['1', '2']
Output:
True if asublist is in alist, False otherwise
"""
alist = "".join(map(str, alist))
asublist = "".join(map(str, asublist))
if asublist in alist:
return True
return False
def shortest_subsequence(timeseries, i):
""" Calculates length of the shortest subsequence
at time step i that has not appeared before
Input:
timeseries: list of strings or numbers,
e.g. ['1', '2', '3'] or [1, 2, 3]
i: time step index, integer starting from 0
Output:
length of the shortest subsequence
"""
sequences = [timeseries[i]]
count = 1
while is_sublist(timeseries[:i], sequences) and i + count <= len(timeseries) - 1:
sequences = sequences + [timeseries[i+count]]
count +=1
return len(sequences)
timeseries = list(map(str, timeseries))
substring_length_gen = (shortest_subsequence(timeseries, i) for i in range(1, len(timeseries)))
shortest_substring_lengths = [1] + list(map(lambda length: length, substring_length_gen))
return np.log(len(timeseries)) * len(timeseries) / np.sum(shortest_substring_lengths) | a2d8948723b0f62e91a9255b3f0fb35ccf4b26d8 | 28,637 |
def hom(X, mode):
"""
It converts transformation X (translation, rotation or rigid motion)
to homogenous form.
Input:
X: tf.float32 array, which can be either
[B, 3] float32, 3D translation vectors
[B, 3, 3] float32, rotation matrices
[B, 3, 4] float32, rigid motion matrix
mode: one of 'T', 'R' or 'P' denoting the options above,
'T' is for translation
'R' is for rotation
'P' is for rigid motion
Output:
H: [B, 4, 4] float32, the transformation in homogenous form
"""
hom_pad = tf.constant([[[0., 0., 0., 0.],
[0., 0., 0., 0.],
[0., 0., 0., 0.],
[0., 0., 0., 1.]]])
if mode == 'T':
X = X[:,:,tf.newaxis]
padding = [[0, 0], [0, 1], [3, 0]]
if mode == 'R':
padding = [[0, 0], [0, 1], [0, 1]]
if mode == 'P':
padding = [[0, 0], [0, 1], [0, 0]]
H = tf.pad(X, padding) + hom_pad
return H | e7104789e996b745a8978867925b8ea5e2c1ad01 | 28,638 |
import re
def parse_sl(comments: str):
"""Parses comments for SL on an order"""
parsed = None
sl_at = "(SL\s{0,1}@\s{0,1})"
sl_price = "([0-9]{0,3}\.[0-9]{1,2}((?!\S)|(?=[)])))"
pattern = sl_at + sl_price
match = re.search(pattern, comments)
if match:
match.groups()
parsed = match.group(2)
return parsed | d993fc1686fa2623423269812c834aedb0d504e2 | 28,639 |
def nhpp_thinning(rate_fn, tmax, delta, lbound=None):
"""Nonhomogeneous Poisson process with intensity function `rate_fn` for
time range (0, tmax) using the algorithm by Lewis and Shelder 1978.
rate_fn: a function `f(t)` of one variable `t` that returns a finite non negative
value for `t` in trange.
tmax: right bound of time (the event times will be for (0, t] time interval)
delta: interval for evaluating the rate_fn.
lbound: upper bound on lambda. This is used as the rate of the HPP to be
thinned. If unspecified then use the maximum value of rate_fn evaluated at
points of rate change (0, delta, 2 delta ....)
"""
trange = np.arange(0, tmax+delta, delta)
if lbound is None:
lbound = max(rate_fn(t) for t in trange) * 1.0
st = [0]
while st[-1] < trange[-1]:
isi = np.random.exponential(1/lbound, size=len(trange))
st_ = st[-1] + np.cumsum(isi)
st += list(st_)
st = np.array(st[1:]) # remove the dummy 0
st = st[st <= tmax].copy()
if len(st) == 0:
return np.empty(0)
accept_prob = np.random.uniform(0, lbound, size=len(st))
intensity = np.array([rate_fn(t) for t in st])
return st[accept_prob <= intensity].copy() | 7761ec918ca1098c17dd997426841e04b93186c2 | 28,640 |
import traceback
import json
def execute_rule_engine(rule_name, body):
"""
:param rule_name:
:param body:
:return:
"""
__logger.info("inside execute_rule_engine for " + rule_name)
__logger.info(json.dumps(body, indent=4, sort_keys=True, default=str))
try:
result = rule_engine_service.execute_rule_engine_service(rule_name, body)
if "code" in result:
if result["code"] == 0:
resp = response_util.get_response(200, "Success", result["message"])
else:
resp = response_util.get_response(400, "Error", result["message"])
else:
resp = response_util.get_response(500, "Error", "Unknown exception")
return resp
except:
__logger.error("Unhandled exception while executing rule engine!!!")
__logger.error(traceback.format_exc())
resp = response_util.get_response(500, "Error", traceback.format_exc())
return resp | b413ec4723d1c030e798e21c670ee607a4f4d373 | 28,641 |
import subprocess
def get_tags():
"""
Updates tags, then returns a list of all tags.
"""
print("Pulling latest tags...")
subprocess.run(["git", "fetch", "--tags"], cwd=CPY_DIR, check=True)
result = subprocess.run(
["git", "tag"], cwd=CPY_DIR, check=True, text=True, capture_output=True
)
tags = set(result.stdout.split("\n"))
tags.remove("")
return tags | 62691146c4e6897e98e81d8f2453b073438ef200 | 28,642 |
def get():
"""
Create and return an instance of the FileSelectionContext
subclass which is appropriate to the currently active application.
"""
windowClass = ContextUtils.getForegroundClassNameUnicode()
if windowClass == u"ConsoleWindowClass":
fsContext = NullFileSelectionContext()
elif windowClass == u"Emacs":
fsContext = NullFileSelectionContext()
else:
fsContext = DefaultFileSelectionContext()
return fsContext | 14e6e94b55801e9b2eb9c8c2738ca6cb8510939a | 28,643 |
def calc_accuracy(y_true, y_predict, display=True):
"""Analysis the score with sklearn.metrics.
This module includes score functions, performance metrics
and pairwise metrics and distance computations.
Parameters
==========
y_true: numpy.array
y_predict: numpy.array
display: Boolean
Return
======
result: Dict{name:value}
Examples
========
>>> result = analysis(y_true, y_predict, display=True)
"""
score = ["explained_variance_score", "r2_score"]
error = ["max_error", "mean_absolute_error", "mean_squared_error",
"mean_squared_log_error", "median_absolute_error"]
result = dict()
names = ["score", "error"]
ignore = []
for name in names:
result[name] = dict()
for item in locals()[name]:
try:
result[name][item] = getattr(metrics, item)(y_true, y_predict)
except Exception as e:
print(color(("↓ %s has been removed since `%s`." % \
(item.capitalize(), e))))
ignore.append(item)
if display:
tabu,numerical = None, None
for name in names:
tabu = PrettyTable(["Name of %s"%color(name), "Value"])
for item in locals()[name]:
if item in ignore:
continue
numerical = "%.3e" % result[name][item]
tabu.add_row([color(item,"青"), numerical])
print(tabu)
return result | e71cc9773dc593ea6456f28f96c90410e6043fe0 | 28,644 |
def HSV_to_HSL(hsv):
"""Converts HSV color space to HSL"""
rgb = HSV_to_RGB(hsv)
return RGB_to_HSL(rgb) | dc847755135f0d96f5b8980154b9ade496c1753f | 28,645 |
def prosodic_meter_query(
collection,
):
"""
Function for returning all Prosodic Meters that contain the queried collection of
:obj:`fragment.GreekFoot` objects.
:param collection: an iterable collection of :obj:`fragment.GreekFoot` objects.
"""
all_prosodic_meters = get_all_prosodic_meters()
res = []
for meter in all_prosodic_meters:
if all(x in set(meter.components) for x in collection):
res.append(meter)
return res | ec55bc910c246051504f4a66fa12dc10211725d5 | 28,646 |
def get_model(loss=keras.losses.MeanSquaredError(), optimizer=keras.optimizers.Adam(), metrics=[keras.metrics.MeanSquaredError()]):
"""
Loads and compiles the model
"""
model = unet3D_model()
model.compile(loss=loss, optimizer=optimizer, metrics=[metrics])
return model | 0a30796893b2d20885fc5496385317c9fc4f2d08 | 28,647 |
def mps_kph(m_per_s):
"""Convert speed from m/s to km/hr.
:kbd:`m_per_s` may be either a scalar number or a
:py:class:`numpy.ndarray` object,
and the return value will be of the same type.
:arg m_per_s: Speed in m/s to convert.
:returns: Speed in km/hr.
"""
return m_per_s * M_PER_S__KM_PER_HR | a0cb03d04edcb21bb6820918c262c8a3d6e9afc3 | 28,648 |
import torch
def evaluate(model: nn.Module, loss_func: nn.Module, loader: iter, logger: Logger, device: str = None):
""" Evaluate the parameters of the model by computing the loss on the data. """
if device is None:
device = next(model.parameters()).device
model.eval()
y_hats, y_s = [], []
with torch.no_grad():
for x_m, x_a, y in loader:
x_m, x_a, y = x_m.to(device), x_a.to(device), y.to(device)
logits, _ = model(x_m, x_a)
loss = loss_func(logits, y)
if logger is not None:
logger.log_step(loss.item())
y_hats.append(logits.cpu().numpy())
y_s.append(y.cpu().numpy())
y_hats = np.concatenate(y_hats, axis=0)
y_s = np.concatenate(y_s, axis=0)
if logger is not None:
return y_hats, y_s, logger.losses
else:
return y_hats, y_s, loss.item() | 7c7d769dde86771e052703ecd312f4ee62235419 | 28,649 |
def glm_likelihood_bernoulli(parms, X, Y, lamb=1, l_p=1, neg=True, log=True):
"""The likelihood for a logistic regression or bernoulli model with a penalty
term (can accept any norm, default is 1 for L1).
Parameters
----------
parms : numpy array (numeric)
The coefficients (including intercept, which is first)
X : numpy array (numeric)
The independent variables (or feature matrix), where the first column
is a dummy column of 1's (for the intercept).
Y : numpy array or pandas dataframe
The response value (should be 0 or 1, but could be float as well if
you're willing to deal with those consequences).
lamb : int, optional
The size of the penalty (lambda). Note this is the inverse of the
common sklearn parameter C (i.e. C=1/lambda. The default is 1.
l_p : int, optional
The mathematical norm to be applied to the coefficients.
The default is 1, representing an L1 penalty.
neg : bool, optional
Return negative likelihood. The default is True.
log : bool, optional
Return log-likelihood. The default is True.
Returns
-------
float
The likelihood.
Examples
--------
>>> import numpy as np
>>> from tsdst.distributions import glm_likelihood_bernoulli
>>> intercept = np.array([3])
>>> betas = np.array([2,4,5])
>>> params = np.concatenate((intercept, betas))
>>> np.random.seed(123)
>>> X = np.random.normal(size=(100, 3))
>>> X = np.hstack((np.repeat(1, 100).reshape(-1, 1), X))
>>> Y = np.round(np.random.uniform(low=0, high=1, size=100))
>>> glm_likelihood_bernoulli(params, X, Y, lamb=1, l_p=1)
386.6152600787893
"""
#intercept = parms[0]
betas = parms[1:]
mu = X.dot(parms)
Ypred = 1.0/(1.0 + np.exp(-mu))
### Alternate formulation (placing here for my notes)
# loglike = Y*mu - np.log(1 + np.exp(mu))
loglike = np.sum(xlogy(Y, Ypred) + xlogy(1.0 - Y, 1.0 - Ypred)) - lamb*norm(betas, l_p)
if not log:
loglike = np.exp(loglike)
if neg:
return -loglike
else:
return loglike | e125d7284045036d412b5afa76730103369142a4 | 28,650 |
import json
def to_tvm(graph, shape_dict, layout, mode='tensorflow'):
"""convert frontend graph to nnvm graph"""
assert mode in FRAME_SUPPORTED
if mode == 'tensorflow':
mod, params = tvm.relay.frontend.from_tensorflow(graph, layout=layout, shape=shape_dict)
elif mode == 'keras':
mod, params = tvm.relay.frontend.from_keras(graph)
elif mode == 'mxnet':
mod, params = tvm.relay.frontend.from_mxnet(graph)
else:
mod, params = tvm.relay.frontend.from_onnx(graph)
mod = tvm.relay.transform.InferType()(mod)
target = 'llvm'
target_host = 'llvm'
with tvm.relay.build_config(opt_level=0):
tvm_graph_json, lib, params = tvm.relay.build(mod, target=target, target_host=target_host, params=params)
#with open("./json/resnet_v1_50_tvm_0.json", 'w') as fp:
#fp.write(tvm_graph)
tvm_graph = json.loads(tvm_graph_json)
_attrstr_to_number(tvm_graph)
return tvm_graph, params | 861184aafd6e2d428e08acc8f718a5ef30152d27 | 28,651 |
def getAxes():
""" Get each of the axes over which the data is measured. """
df = load_file("atyeo_covid")
df = df.filter(regex='SampleID|Ig|Fc|SNA|RCA', axis=1)
axes = df.filter(regex='Ig|Fc|SNA|RCA', axis=1)
axes = axes.columns.str.split(" ", expand = True)
subject = df['SampleID']
subject = subject[0:22]
antigen = []
receptor = []
for row in axes:
if (row[0] not in antigen):
antigen.append(row[0])
if (row[1] not in receptor):
receptor.append(row[1])
return subject, receptor, antigen | d489f1c261a8a92b44563b6842a30cc2c7a880c5 | 28,652 |
import scipy
def stretching_current(ref, cur, dvmin, dvmax, nbtrial, window,t_vec):
"""
Function to perform the stretching of the waveforms:
This function compares the Reference waveform to stretched/compressed current waveforms to get the relative seismic velocity variation (and associated error).
It also computes the correlation coefficient between the Reference waveform and the current waveform.
INPUTS:
- ref = Reference waveform (np.ndarray, size N)
- cur = Current waveform (np.ndarray, size N)
- dvmin = minimum bound for the velocity variation; example: dvmin=-0.03 for -3% of relative velocity change ('float')
- dvmax = maximum bound for the velocity variation; example: dvmax=0.03 for 3% of relative velocity change ('float')
- nbtrial = number of stretching coefficient between dvmin and dvmax, no need to be higher than 50 ('float')
- window = vector of the indices of the cur and ref windows on wich you want to do the measurements (np.ndarray, size tmin*delta:tmax*delta)
- t_vec = time vector, common to both ref and cur (np.ndarray, size N)
OUTPUTS:
- dv = Relative velocity change dv/v (in %)
- cc = correlation coefficient between the reference waveform and the best stretched/compressed current waveform
- Eps = Vector of Epsilon values (Epsilon =-dt/t = dv/v)
"""
Eps = np.asmatrix(np.linspace(dvmin, dvmax, nbtrial))
L = 1 + Eps
tt = np.matrix.transpose(np.asmatrix(t_vec))
tau = tt.dot(L) # stretched/compressed time axis
C = np.zeros((1, np.shape(Eps)[1]))
for j in np.arange(np.shape(Eps)[1]):
s = np.interp(x=np.ravel(tt), xp=np.ravel(tau[:, j]), fp=cur)
waveform_ref = ref[window]
waveform_cur = s[window]
C[0, j] = np.corrcoef(waveform_ref, waveform_cur)[0, 1]
imax = np.nanargmax(C)
if imax >= np.shape(Eps)[1]-1:
imax = imax - 1
if imax <= 2:
imax = imax + 1
dtfiner = np.linspace(Eps[0, imax-1], Eps[0,imax+1], 500)
func = scipy.interpolate.interp1d(np.ravel(Eps[0,np.arange(imax-2, imax+2)]), np.ravel(C[0,np.arange(imax-2, imax+2)]), kind='cubic')
CCfiner = func(dtfiner)
cc = np.max(CCfiner) # Find maximum correlation coefficient of the refined analysis
dv = 100 * dtfiner[np.argmax(CCfiner)] # Final dv/v measurement (x100 to get the dv/v in %)
return dv, cc, Eps | 9442a940e9013c8ef77bb2c2ecc774c4276a99c5 | 28,653 |
def convert_to_int(var):
"""
Tries to convert an number to int.
:param var
:returns the value of the int or None if it fails
"""
try:
return int(var)
except ValueError:
return None | 19ba35d351096f2c7b29d78b8df692fc63a75a6f | 28,654 |
def eval_input_fn(filepath, example_parser, batch_size):
"""
模型的eval阶段input_fn
Args:
filepath (str): 训练集/验证集的路径
example_parser (function): 解析example的函数
batch_size (int): 每个batch样本大小
Returns:
dataset
"""
dataset = tf.data.TFRecordDataset(filepath)
dataset = dataset.batch(batch_size)
dataset = dataset.map(example_parser, num_parallel_calls=tf.data.experimental.AUTOTUNE)
dataset = dataset.prefetch(1)
return dataset | 1a67adfe1decd8b38fe8a8d973b7caf1fb6ec85a | 28,655 |
def ring_forming_scission_ts_zmatrix(rxn, ts_geo):
""" z-matrix for a ring-forming scission transition state geometry
:param rxn: a Reaction object
:param ts_geo: a transition state geometry
"""
rxn = rxn.copy()
# 1. Get keys to linear or near-linear atoms
lin_idxs = list(automol.geom.linear_atoms(ts_geo))
# 2. Add dummy atoms over the linear atoms
rcts_gra = ts.reactants_graph(rxn.forward_ts_graph)
geo, dummy_key_dct = automol.geom.insert_dummies_on_linear_atoms(
ts_geo, lin_idxs=lin_idxs, gra=rcts_gra)
# 3. Add dummy atoms to the Reaction object as well
rxn = add_dummy_atoms(rxn, dummy_key_dct)
# 4. Generate a z-matrix for the geometry
rng_keys, = ts.forming_rings_atom_keys(rxn.forward_ts_graph)
att_key, tra_key, _ = ring_forming_scission_atom_keys(rxn)
# First, cycle the transferring atom to the front of the ring keys and, if
# needed, reverse the ring so that the attacking atom is last
# (transferring atom, ... , atom, attackin atom)
rng_keys = automol.graph.cycle_ring_atom_key_to_front(
rng_keys, tra_key, end_key=att_key)
# Now, cycle the secont-to-last key to the front so that the ring order is:
# (atom, attacking atom, transferring atom, ....)
rng_keys = automol.graph.cycle_ring_atom_key_to_front(
rng_keys, rng_keys[-2])
vma, zma_keys = automol.graph.vmat.vmatrix(rxn.forward_ts_graph)
zma_geo = automol.geom.from_subset(geo, zma_keys)
zma = automol.zmat.from_geometry(vma, zma_geo)
return zma, zma_keys, dummy_key_dct | 0d9f09210a533a56b64dd3ba559b5f645381c7b7 | 28,656 |
def trip(u, v):
"""
Returns the scalar triple product of vectors u and v and z axis.
The convention is z dot (u cross v). Dotting with the z axis simplifies
it to the z component of the u cross v
The product is:
positive if v is to the left of u, that is,
the shortest right hand rotation from u to v is ccw
negative if v is to the right of u, that is,
the shortest right hand rotation from u to v is cw
zero if v is colinear with u
Essentially trip is the z component of the cross product of u x v
"""
return (u[0] * v[1] - u[1] * v[0]) | 5f687ee4b16dc6c1b350ed574cb632a7c9ca996b | 28,657 |
def one_cpc(request, hmc_session): # noqa: F811
"""
Fixture representing a single, arbitrary CPC managed by the HMC.
Returns a `zhmcclient.Cpc` object, with full properties.
"""
client = zhmcclient.Client(hmc_session)
cpcs = client.cpcs.list()
assert len(cpcs) >= 1
cpc = cpcs[0]
cpc.pull_full_properties()
return cpc | 18a42e9777881bbab5f54cbeebefb1bf487d0994 | 28,658 |
import time
def yield_with_display(future_or_iterable, every, timeout=None):
""" Yields for a future and display status every x seconds
:param future_or_iterable: A future to yield on, or a list of futures
:param every: The number of seconds between updates
:param timeout: The total number of seconds to wait for the future, otherwise throws TimeoutError
"""
start_time = time.time()
last_status = start_time
futures = [future_or_iterable] if not isinstance(future_or_iterable, list) else future_or_iterable
# Looping until timeout or future is done
while [1 for future in futures if not future.done()]:
current_time = time.time()
# Timeout reached
if timeout and current_time - start_time > timeout:
raise TimeoutError('Waited %d seconds for future.' % timeout)
# Displaying status
if current_time - last_status > every:
last_status = current_time
LOGGER.info('Still waiting for future(s). %s/%s', int(current_time - start_time), timeout or '---')
# Sleeping
yield gen.sleep(0.1)
# Futures are done, rethrowing exception
for future in futures:
exception = future.exception()
if exception is not None:
raise exception
# Returning results
results = [future.result() for future in futures]
if not isinstance(future_or_iterable, list):
results = results[0]
return results | 07501c913eaf4a4c497ad0c0c3facc87b946e1f5 | 28,659 |
def query(cmd, db, cgi='http://www.ncbi.nlm.nih.gov/sites/entrez',
**keywds):
"""query(cmd, db, cgi='http://www.ncbi.nlm.nih.gov/sites/entrez',
**keywds) -> handle
Query Entrez and return a handle to the results, consisting of
a web page in HTML format.
See the online documentation for an explanation of the parameters:
http://www.ncbi.nlm.nih.gov/books/bv.fcgi?rid=helplinks.chapter.linkshelp
Raises an IOError exception if there's a network error.
"""
variables = {'cmd' : cmd, 'db' : db}
variables.update(keywds)
return _open(cgi, variables) | 1287e3551eae2be337abeba31ce7888d60938111 | 28,660 |
def Q2B(uchar):
"""单个字符 全角转半角"""
inside_code = ord(uchar)
if inside_code == 0x3000:
inside_code = 0x0020
else:
inside_code -= 0xfee0
if inside_code < 0x0020 or inside_code > 0x7e: # 转完之后不是半角字符返回原来的字符
return uchar
return chr(inside_code) | fa58980c7eb251fa7278caa7bbf6645ad492ed2b | 28,661 |
from datetime import datetime
def parsedate(date, formats=None, bias=None):
"""parse a localized date/time and return a (unixtime, offset) tuple.
The date may be a "unixtime offset" string or in one of the specified
formats. If the date already is a (unixtime, offset) tuple, it is returned.
>>> parsedate(b' today ') == parsedate(
... datetime.date.today().strftime('%b %d').encode('ascii'))
True
>>> parsedate(b'yesterday ') == parsedate(
... (datetime.date.today() - datetime.timedelta(days=1)
... ).strftime('%b %d').encode('ascii'))
True
>>> now, tz = makedate()
>>> strnow, strtz = parsedate(b'now')
>>> (strnow - now) < 1
True
>>> tz == strtz
True
>>> parsedate(b'2000 UTC', formats=extendeddateformats)
(946684800, 0)
"""
if bias is None:
bias = {}
if not date:
return 0, 0
if isinstance(date, tuple) and len(date) == 2:
return date
if not formats:
formats = defaultdateformats
date = date.strip()
if date == b'now' or date == _(b'now'):
return makedate()
if date == b'today' or date == _(b'today'):
date = datetime.date.today().strftime('%b %d')
date = encoding.strtolocal(date)
elif date == b'yesterday' or date == _(b'yesterday'):
date = (datetime.date.today() - datetime.timedelta(days=1)).strftime(
r'%b %d'
)
date = encoding.strtolocal(date)
try:
when, offset = map(int, date.split(b' '))
except ValueError:
# fill out defaults
now = makedate()
defaults = {}
for part in (b"d", b"mb", b"yY", b"HI", b"M", b"S"):
# this piece is for rounding the specific end of unknowns
b = bias.get(part)
if b is None:
if part[0:1] in b"HMS":
b = b"00"
else:
# year, month, and day start from 1
b = b"1"
# this piece is for matching the generic end to today's date
n = datestr(now, b"%" + part[0:1])
defaults[part] = (b, n)
for format in formats:
try:
when, offset = strdate(date, format, defaults)
except (ValueError, OverflowError):
pass
else:
break
else:
raise error.ParseError(
_(b'invalid date: %r') % pycompat.bytestr(date)
)
# validate explicit (probably user-specified) date and
# time zone offset. values must fit in signed 32 bits for
# current 32-bit linux runtimes. timezones go from UTC-12
# to UTC+14
if when < -0x80000000 or when > 0x7FFFFFFF:
raise error.ParseError(_(b'date exceeds 32 bits: %d') % when)
if offset < -50400 or offset > 43200:
raise error.ParseError(_(b'impossible time zone offset: %d') % offset)
return when, offset | 512608fb413fa062a4dff00557fbd95120c5441c | 28,662 |
import glob
def prepare_lv2_data(change_price=False):
"""
read lv1 data and ensemble to make submission
"""
train_files = glob('./models/level1_model_files/train/*')
test_files = glob('./models/level1_model_files/test/*')
num_feat = len(train_files)
nrow = pd.read_csv(train_files[0]).shape[0]
X_train = np.zeros((nrow, num_feat))
num_feat = len(train_files)
X_test = np.zeros((7662, num_feat))
for i, path in enumerate(train_files):
X_train[:, i] = pd.read_csv(path).drop(['index', 'reponse'], axis=1).values.reshape(-1)
for i, train_path in enumerate(train_files):
model_name = train_path.split('{')[0].split('/')[-1]
for test_path in test_files:
if model_name in test_path:
print((model_name))
X_test[:, i] = pd.read_csv(test_path).price_doc.values
y_train = pd.read_csv(train_files[0]).reponse.values
# print(pd.DataFrame(X_train).corr(),pd.DataFrame(X_test).corr())
return X_train, X_test, y_train | a10786bf92dccba9cccf7d04c3301b77e008c584 | 28,663 |
def main(global_config, **settings):
""" This function returns a Pyramid WSGI application.
"""
config = Configurator(settings=settings)
config.include('clldmpg')
config.include('clld_glottologfamily_plugin')
config.include('clld_phylogeny_plugin')
config.register_datatable('familys', datatables.Families)
config.registry.registerUtility(GrambankCtxFactoryQuery(), ICtxFactoryQuery)
config.registry.registerUtility(GrambankMapMarker(), IMapMarker)
config.registry.registerUtility(link_attrs, ILinkAttrs)
return config.make_wsgi_app() | 6d6eaa8b6c3425023e3550ddae1d3455c939c6fd | 28,664 |
import numpy
def dwwc(graph, metapath, damping=0.5, dense_threshold=0, dtype=numpy.float64, dwwc_method=None):
"""
Compute the degree-weighted walk count (DWWC) in which nodes can be
repeated within a path.
Parameters
----------
graph : hetio.hetnet.Graph
metapath : hetio.hetnet.MetaPath
damping : float
dense_threshold : float (0 <= dense_threshold <= 1)
sets the density threshold at which a sparse matrix will be
converted to a dense automatically.
dtype : dtype object
dwwc_method : function
dwwc method to use for computing DWWCs. If set to None, use
module-level default (default_dwwc_method).
"""
return dwwc_method(
graph=graph,
metapath=metapath,
damping=damping,
dense_threshold=dense_threshold,
dtype=dtype,
) | aa6d30ed04baf2561e3bac7a992ae9e00b985da8 | 28,665 |
def fba_and_min_enzyme(cobra_model, coefficients_forward, coefficients_reverse):
"""
Performs FBA followed by minimization of enzyme content
"""
with cobra_model as model:
model.optimize()
cobra.util.fix_objective_as_constraint(model)
set_enzymatic_objective(model, coefficients_forward, coefficients_reverse)
sol = cobra_model.optimize()
return sol | 2e6614be30c7d0f343b9d4206d1e7c34d54436f3 | 28,666 |
def logi_led_shutdown():
""" shutdowns the SDK for the thread. """
if led_dll:
return bool(led_dll.LogiLedShutdown())
else:
return False | fdb7d77b7fb59804458247c32a35e80da44f6c1f | 28,667 |
import os
def is_ci() -> bool:
"""Return whether running in CI environment."""
return os.environ.get("CI", "") != "" | 642b714d55fe52c93849b2775c4e0b4fede9f197 | 28,668 |
from typing import Dict
def get_blank_adjustments_for_strat(transitions: list) -> Dict[str, dict]:
"""
Provide a blank set of flow adjustments to be populated by the update_adjustments_for_strat function below.
Args:
transitions: All the transition flows we will be modifying through the clinical stratification process
Returns:
Dictionary of dictionaries of dictionaries of blank dictionaries to be populated later
"""
flow_adjs = {}
for agegroup in COVID_BASE_AGEGROUPS:
flow_adjs[agegroup] = {}
for clinical_stratum in CLINICAL_STRATA:
flow_adjs[agegroup][clinical_stratum] = {}
for transition in transitions:
flow_adjs[agegroup][clinical_stratum][transition] = {}
return flow_adjs | b2e5391280bae48202f92832aa8821d47a288135 | 28,669 |
def get_instance(module, name, config):
"""
Get module indicated in config[name]['type'];
If there are args to specify the module, specify in config[name]['args']
"""
func_args = config[name]['args'] if 'args' in config[name] else None
# if any argument specified in config[name]['args']
if func_args:
return getattr(module, config[name]['type'])(**func_args)
# if not then just return the module
return getattr(module, config[name]['type'])() | ea57e7097665343199956509bb302e3806fb383a | 28,670 |
def separable_hnn(num_points, input_h_s=None, input_model=None,
save_path='temp_save_path', train=True, epoch_save=100):
"""
Separable Hamiltonian network.
:return:
"""
if input_h_s:
h_s = input_h_s
model = input_model
else:
h_s = HNN1DWaveSeparable(nn.Sequential(
nn.Linear(3*num_points, 20),
nn.Tanh(),
nn.Linear(20, 20),
nn.Tanh(),
nn.Linear(20, 20),
nn.Tanh(),
nn.Linear(20, 20),
nn.Tanh(),
nn.Linear(20, 20),
nn.Tanh(),
nn.Linear(20, 1))).to(device)
model = DENNet(h_s, case='1DWave').to(device)
if train:
learn_sep = Learner(model, num_boundary=num_boundary, save_path=save_path,
epoch_save=epoch_save)
logger = TensorBoardLogger('separable_logs')
trainer_sep = pl.Trainer(min_epochs=701, max_epochs=701, logger=logger, gpus=1)
trainer_sep.fit(learn_sep)
return h_s, model | c9912f69b4367a2ed83ce367970551f31e0cb087 | 28,671 |
def get_n_largest(n, lst, to_compare=lambda x: x):
"""
This returns largest n elements from list in descending order
"""
largests = [lst[0]]*n # this will be in descending order
for x in lst[1:]:
if to_compare(x) <= to_compare(largests[-1]):
continue
else:
for i, y in enumerate(largests):
if to_compare(x) >= to_compare(y):
largests = largests[:i] + [x] + largests[i:-1]
break
return largests | 4ef85d8656ae152ecab65d3a01bce7f885c47577 | 28,672 |
from natsort import natsorted
import collections
from typing import Optional
from typing import Union
def base_scatter(
x: Optional[Union[np.ndarray, list]],
y: Optional[Union[np.ndarray, list]],
hue: Optional[Union[np.ndarray, list]] = None,
ax=None,
title: str = None,
x_label: str = None,
y_label: str = None,
color_bar: bool = False,
bad_color: str = "lightgrey",
dot_size: int = None,
palette: Optional[Union[str, list]] = 'stereo',
invert_y: bool = True,
legend_ncol=2,
show_legend=True,
show_ticks=False,
vmin=None,
vmax=None,
SegmentedColormap = None,
): # scatter plot, 聚类后表达矩阵空间分布
"""
scatter plotter
:param invert_y: whether to invert y-axis.
:param x: x position values
:param y: y position values
:param hue: each dot's values, use for color set, eg. ['1', '3', '1', '2']
:param ax: matplotlib Axes object
:param title: figure title
:param x_label: x label
:param y_label: y label
:param color_bar: show color bar or not, color_values must be int array or list when color_bar is True
:param bad_color: the name list of clusters to show.
:param dot_size: marker size.
:param palette: customized colors
:param legend_ncol: number of legend columns
:param show_legend
:param show_ticks
:param vmin:
:param vmax:
:return: matplotlib Axes object
color_values must be int array or list when color_bar is True
"""
if not ax:
_, ax = plt.subplots(figsize=(7, 7))
dot_size = 120000 / len(hue) if dot_size is None else dot_size
# add a color bar
if color_bar:
colors = conf.linear_colors(palette)
cmap = ListedColormap(colors)
cmap.set_bad(bad_color)
sns.scatterplot(x=x, y=y, hue=hue, ax=ax, palette=cmap, size=hue, sizes=(dot_size, dot_size), vmin=vmin,
vmax=vmax)
if vmin is None and vmax is None:
norm = plt.Normalize(hue.min(), hue.max())
sm = plt.cm.ScalarMappable(cmap=cmap, norm=norm)
sm.set_array([])
ax.figure.colorbar(sm)
ax.legend_.remove()
else:
g = natsorted(set(hue))
colors = conf.get_colors(palette)
color_dict = collections.OrderedDict(dict([(g[i], colors[i]) for i in range(len(g))]))
sns.scatterplot(x=x, y=y, hue=hue, hue_order=g,
palette=color_dict, size=hue, sizes=(dot_size, dot_size), ax=ax)
handles, labels = ax.get_legend_handles_labels()
ax.legend_.remove()
ax.legend(handles, labels, ncol=legend_ncol, bbox_to_anchor=(1.02, 1),
loc='upper left', borderaxespad=0, frameon=False)
for lh in ax.legend_.legendHandles:
lh.set_alpha(1)
lh._sizes = [40]
if invert_y:
ax.invert_yaxis()
if not show_legend:
ax.legend_.remove()
if not show_ticks:
ax.set_aspect('equal', adjustable='datalim')
ax.set_title(title, fontsize=18, fontweight='bold')
ax.set_ylabel(y_label, fontsize=15) # 设置y轴标签
ax.set_xlabel(x_label, fontsize=15) # 设置x轴标签
if not show_ticks:
ax.set_yticks([])
ax.set_xticks([])
return ax | 72e159af3ffad86e66b53789368edbb3a7bc406a | 28,673 |
def lambda_sum_largest_canon(expr, real_args, imag_args, real2imag):
"""Canonicalize nuclear norm with Hermitian matrix input.
"""
# Divide by two because each eigenvalue is repeated twice.
real, imag = hermitian_canon(expr, real_args, imag_args, real2imag)
real.k *= 2
if imag_args[0] is not None:
real /= 2
return real, imag | 41e2d460fc5d18d65e1d7227093ecaf88a925151 | 28,674 |
def dp_palindrome_length(dp, S, i, j):
"""
Recursive function for finding the length
of the longest palindromic sequence
in a string
This is the algorithm covered in the lecture
It uses memoization to improve performance,
dp "dynamic programming" is a Python dict
containing previously computed values
"""
if i == j:
return 1
if (i, j) in dp:
return dp[(i, j)]
if S[i] == S[j]:
if i + 1 == j:
dp[(i, j)] = 2
else:
dp[(i, j)] = 2 + \
dp_palindrome_length(dp, S, i + 1, j - 1)
else:
dp[(i, j)] = \
max(
dp_palindrome_length(dp, S, i + 1, j),
dp_palindrome_length(dp, S, i, j - 1))
return dp[(i, j)] | 10a8ac671674ba1ef57cd473413211a339f94e62 | 28,675 |
def ellip_enclose(points, color, inc=1, lw=2, nst=2):
"""
Plot the minimum ellipse around a set of points.
Based on:
https://github.com/joferkington/oost_paper_code/blob/master/error_ellipse.py
"""
def eigsorted(cov):
vals, vecs = np.linalg.eigh(cov)
order = vals.argsort()[::-1]
return vals[order], vecs[:,order]
x = points[:,0]
y = points[:,1]
cov = np.cov(x, y)
vals, vecs = eigsorted(cov)
theta = np.degrees(np.arctan2(*vecs[:,0][::-1]))
w, h = 2 * nst * np.sqrt(vals)
center = np.mean(points, 0)
ell = patches.Ellipse(center, width=inc*w, height=inc*h, angle=theta,
facecolor=color, alpha=0.2, lw=0)
edge = patches.Ellipse(center, width=inc*w, height=inc*h, angle=theta,
facecolor='none', edgecolor=color, lw=lw)
return ell, edge | c6f3fabfb306f29c5c09ffee732d5afea2c1fe33 | 28,676 |
def catalog_dictionary_per_observation(cats, obs_nums, targets, defaults):
"""Translate a dictionary of catalogs from a case of either:
1. Separate catalogs for each target name
2. Separate catalogs for each target name and instrument
into a dictionary of catalogs for each instrument and observation
Parameters
----------
cats : dict
Dictionary of catalogs. Can be:
Same catalogs for all instruments within each observation
catalogs = {'my_targ_1': {'point_source': 'ptsrc1.cat',
'galaxy': 'galaxy1.cat',
'extended': 'ex1.cat'},
'my_targ_2': {'point_source': 'ptsrc2.cat',
'galaxy': 'galaxy2.cat',
'extended': 'ex2.cat'}}
Different catalogs for each instrument in each observation
catalogs = {'my_targ_1': {'nircam': {'point_source': 'ptsrc1.cat',
'galaxy': 'galaxy1.cat',
'extended': 'ex1.cat'},
'niriss': {'pointsource': 'ptsrc_nis.cat',
'galaxy': 'galaxy_nis.cat'}},
'my_targ_2': {'nircam': {'point_source': 'ptsrc2.cat',
'galaxy': 'galaxy2.cat',
'extended': 'ex2.cat'}}}
obs_nums : numpy.ndarray
1D array of observation ID numbers
targets : numpy.ndarray
1d array of target names, with a 1:1 correspondence to obs_nums
defaults : dict
Dictionary of default catalog values
Returns
-------
obs_cats : dict
Dictionary of catalogs per observation, with keys that match
those in the defaults
obs_cats = {'001': {'nircam': {'PointsourceCatalog': 'ptsrc1.cat',
'GalaxyCatalog': 'galaxy1.cat',
'ExtendedCatalog': 'ex1.cat'},
'niriss': {'PointsourceCatalog': 'ptsrc_nis.cat',
'GalaxyCatalog': 'galaxy_nis.cat'},
}
'002': {'nircam': {'PointsourceCatalog': 'ptsrc2.cat',
'GalaxyCatalog': 'galaxy2.cat',
'ExtendedCatalog': 'ex2.cat'},
'niriss': {'PointsourceCatalog': 'ptsrc_nis2.cat',
'GalaxyCatalog': 'galaxy_nis2.cat'}
}
}
"""
# Set up the output dictionary. Populate with keys for all observations
# and default catalog values to cover any entries in obs_cats that are
# note present
obs_cats = {}
for number in obs_nums:
obs_cats[number] = {'nircam': {}, 'niriss': {}, 'fgs': {}, 'miri':{}, 'nirspec': {}}
for cat_type in POSSIBLE_CATS:
obs_cats[number]['nircam'][CAT_TYPE_MAPPING[cat_type]] = defaults[CAT_TYPE_MAPPING[cat_type]]
obs_cats[number]['niriss'][CAT_TYPE_MAPPING[cat_type]] = defaults[CAT_TYPE_MAPPING[cat_type]]
obs_cats[number]['fgs'][CAT_TYPE_MAPPING[cat_type]] = defaults[CAT_TYPE_MAPPING[cat_type]]
obs_cats[number]['miri'][CAT_TYPE_MAPPING[cat_type]] = 'None'
obs_cats[number]['nirspec'][CAT_TYPE_MAPPING[cat_type]] = 'None'
# Loop over the keys in the top level of the input dictionary
for key1 in cats:
# Find the observation numbers that use this target
match = np.array(targets) == key1
# Check to see if the second level of the input dictionary is
# a dictionary of catalogs, or a dictionary of instruments
keys2 = cats[key1].keys()
keys_present = [True if poss in keys2 else False for poss in POSSIBLE_CATS]
if any(keys_present):
# Dictionary contains catalog names, so we use the same catalogs
# for all instruments
# Loop over the observation numbers that use this target and
# populate the entries for each with the catalog names. In
# this case the catalog names are the same for all instruments
for obs_number in obs_nums[match]:
for key2 in keys2:
obs_cats[obs_number]['nircam'][CAT_TYPE_MAPPING[key2]] = cats[key1][key2]
obs_cats[obs_number]['niriss'][CAT_TYPE_MAPPING[key2]] = cats[key1][key2]
obs_cats[obs_number]['fgs'][CAT_TYPE_MAPPING[key2]] = cats[key1][key2]
else:
# Dictionary contains instrument names
# Loop over observation numbers that use this target and
# populate the different catalogs for each instrument
for obs_number in obs_nums[match]:
for instrument in keys2:
ctypes = cats[key1][instrument].keys()
for ctype in ctypes:
obs_cats[obs_number][instrument][CAT_TYPE_MAPPING[ctype]] = cats[key1][instrument][ctype]
return obs_cats | b418e0315b242c251d6796636fdd3fdbcfefbfa5 | 28,677 |
def sierpinkspi(p1, p2, p3, degree, draw, image, colors):
"""
Draw Sierpinksi Triangles.
"""
colour = colors
draw.polygon(((p1[0], p1[1]), (p2[0], p2[1]), (p3[0], p3[1])), fill=colour[degree])
if degree > 0:
sierpinkspi(p1, mid(p1, p2), mid(p1, p3), degree-1, draw, image, colors)
sierpinkspi(p2, mid(p1, p2), mid(p2, p3), degree-1, draw, image, colors)
sierpinkspi(p3, mid(p1, p3), mid(p2, p3), degree-1, draw, image, colors)
else:
return image | c43662d50a655eed4298e34d2f9830e678a0ca96 | 28,678 |
def generate_depth_map(camera, Xw, shape):
"""Render pointcloud on image.
Parameters
----------
camera: Camera
Camera object with appropriately set extrinsics wrt world.
Xw: np.ndarray (N x 3)
3D point cloud (x, y, z) in the world coordinate.
shape: np.ndarray (H, W)
Output depth image shape.
Returns
-------
depth: np.array
Rendered depth image.
"""
assert len(shape) == 2, 'Shape needs to be 2-tuple.'
# Move point cloud to the camera's (C) reference frame from the world (W)
Xc = camera.p_cw * Xw
# Project the points as if they were in the camera's frame of reference
uv = Camera(K=camera.K).project(Xc).astype(int)
# Colorize the point cloud based on depth
z_c = Xc[:, 2]
# Create an empty image to overlay
H, W = shape
depth = np.zeros((H, W), dtype=np.float32)
in_view = np.logical_and.reduce([(uv >= 0).all(axis=1), uv[:, 0] < W, uv[:, 1] < H, z_c > 0])
uv, z_c = uv[in_view], z_c[in_view]
depth[uv[:, 1], uv[:, 0]] = z_c
return depth | f219d2128bdecf56e8e03aef7d6249b518d55f06 | 28,679 |
def zeros(shape, dtype, allocator=drv.mem_alloc):
"""
Return an array of the given shape and dtype filled with zeros.
Parameters
----------
shape : tuple
Array shape.
dtype : data-type
Data type for the array.
allocator : callable
Returns an object that represents the memory allocated for
the requested array.
Returns
-------
out : pycuda.gpuarray.GPUArray
Array of zeros with the given shape and dtype.
Notes
-----
This function exists to work around the following numpy bug that
prevents pycuda.gpuarray.zeros() from working properly with
complex types in pycuda 2011.1.2:
http://projects.scipy.org/numpy/ticket/1898
"""
out = gpuarray.GPUArray(shape, dtype, allocator)
out.fill(0)
return out | 5d3969e4c3d72cc0699e15e43594ba29657ca68b | 28,680 |
def create_own_child_column(X):
"""
Replaces the column 'relationship' with a binary one called own-child
"""
new_column = X['relationship'] == 'own-child'
X_transformed = X.assign(own_child=new_column)
X_transformed = X_transformed.drop('relationship', axis=1)
return X_transformed | 303ec8f073920f0bba6704740b200c7f3306b7bd | 28,681 |
def find_next_gate(wires, op_list):
"""Given a list of operations, finds the next operation that acts on at least one of
the same set of wires, if present.
Args:
wires (Wires): A set of wires acted on by a quantum operation.
op_list (list[Operation]): A list of operations that are implemented after the
operation that acts on ``wires``.
Returns:
int or None: The index, in ``op_list``, of the earliest gate that uses one or more
of the same wires, or ``None`` if no such gate is present.
"""
next_gate_idx = None
for op_idx, op in enumerate(op_list):
if len(Wires.shared_wires([wires, op.wires])) > 0:
next_gate_idx = op_idx
break
return next_gate_idx | 287a3b2905f86dff0c75027bcba6bd00bab82fd8 | 28,682 |
def FDilatedConv1d(xC, xP, nnModule):
"""1D DILATED CAUSAL CONVOLUTION"""
convC = nnModule.convC # current
convP = nnModule.convP # previous
output = F.conv1d(xC, convC.weight, convC.bias) + \
F.conv1d(xP, convP.weight, convP.bias)
return output | 900065f6618f1b4c12191b1363ce6706ec28d222 | 28,683 |
def load_spans(file):
"""
Loads the predicted spans
"""
article_id, span_interval = ([], [])
with open(file, 'r', encoding='utf-8') as f:
for line in f.readlines():
art_id, span_begin, span_end = [int(x) for x in line.rstrip().split('\t')]
span_interval.append((span_begin, span_end))
article_id.append(art_id)
return article_id, span_interval | 8f8de31e1d1df7f0d2a44d8f8db7f846750bd89f | 28,684 |
def is_stupid_header_row(row):
"""returns true if we believe row is what the EPN-TAP people used
as section separators in the columns table.
That is: the text is red:-)
"""
try:
perhaps_p = row.contents[0].contents[0]
perhaps_span = perhaps_p.contents[0]
if perhaps_span.get("style")=='color: rgb(255,0,0);':
return True
except (AttributeError, KeyError):
pass # Fall through to False
return False | 124108520486c020d2da64a8eb6f5d266990ae02 | 28,685 |
def get_cli_parser() -> ArgumentParser:
"""Return an ArgumentParser instance."""
parser = ArgumentParser(description="CLI options for Alice and Bob key share")
parser.add_argument('-p', help='Prime p for information exchange', type=int)
parser.add_argument('-g', help='Prime g for information exchange', type=int)
parser.add_argument('--bits', help="The number of bits for the private encryption key", type=int, default=512)
return parser | 2ca9feff2940064163d8b5724b647ab56f4ea5e6 | 28,686 |
import re
def _get_http_and_https_proxy_ip(creds):
"""
Get the http and https proxy ip.
Args:
creds (dict): Credential information according to the dut inventory
"""
return (re.findall(r'[0-9]+(?:\.[0-9]+){3}', creds.get('proxy_env', {}).get('http_proxy', ''))[0],
re.findall(r'[0-9]+(?:\.[0-9]+){3}', creds.get('proxy_env', {}).get('https_proxy', ''))[0]) | b18d89718456830bdb186b3b1e120f4ae7c673c7 | 28,687 |
import os
def multiple_files_multiple_tracks():
"""Returns a path to a CUE file with multiple tracks per source file."""
cue_file = "Non-ISO_extended-ASCII_text_with_CRLF.cue"
return os.path.join("tests", "files", cue_file) | 36659616d2e065a8f8d9b1d7956e2c8326cdc805 | 28,688 |
def geometric_expval(p):
"""
Expected value of geometric distribution.
"""
return 1. / p | 3afb3adb7e9dafa03026f22074dfcc1f81c58ac8 | 28,689 |
def make_ticc_dataset(
clusters=(0, 1, 0), n_dim=3, w_size=5, break_points=None,
n_samples=200, n_dim_lat=0, sparsity_inv_matrix=0.5, T=9,
rand_seed=None, **kwargs):
"""Generate data as the TICC method.
Library implementation of `generate_synthetic_data.py`, original can be
found at https://github.com/davidhallac/TICC
"""
if (len(clusters) * n_samples) % T != 0:
raise ValueError(
'n_clusters * n_samples should be a multiple of n_times '
'to avoid having samples in the same time period in different '
'clusters')
id_cluster = np.repeat(np.asarray(list(clusters)), n_samples)
y = np.repeat(np.arange(T), len(clusters) * n_samples // T)
cluster_mean = np.zeros(n_dim)
cluster_mean_stack = np.zeros(n_dim * w_size)
clusters = np.unique(list(clusters))
# Generate two inverse matrices
precisions = {}
covs = {}
for i, cluster in enumerate(clusters):
precisions[cluster] = make_ticc(
rand_seed=i, num_blocks=w_size, n_dim_obs=n_dim,
n_dim_lat=n_dim_lat, sparsity_inv_matrix=sparsity_inv_matrix,
**kwargs)
covs[cluster] = linalg.pinvh(precisions[cluster])
# Data matrix
X = np.empty((id_cluster.size, n_dim))
precs = []
n = n_dim
for i, label in enumerate(id_cluster):
# for num in range(old_break_pt, break_pt):
if i == 0:
# conditional covariance and mean
cov_tom = covs[label][:n_dim, :n_dim]
mean = cluster_mean_stack[n_dim * (w_size - 1):]
elif i < w_size:
cov = covs[label][:(i + 1) * n, :(i + 1) * n]
Sig11, Sig22, Sig21, Sig12 = _block_matrix(cov, i * n, i * n)
Sig21Theta11 = Sig21.dot(linalg.pinvh(Sig11))
cov_tom = Sig22 - Sig21Theta11.dot(Sig12) # sigma2|1
mean = cluster_mean + Sig21Theta11.dot(
X[:i].flatten() - cluster_mean_stack[:i * n_dim])
else:
cov = covs[label][:w_size * n, :w_size * n]
Sig11, Sig22, Sig21, Sig12 = _block_matrix(
cov, (w_size - 1) * n, (w_size - 1) * n)
Sig21Theta11 = Sig21.dot(linalg.pinvh(Sig11))
cov_tom = Sig22 - Sig21Theta11.dot(Sig12) # sigma2|1
mean = cluster_mean + Sig21Theta11.dot(
X[i - w_size + 1:i].flatten() -
cluster_mean_stack[:(w_size - 1) * n_dim])
X[i] = np.random.multivariate_normal(mean, cov_tom)
precs.append(linalg.pinvh(cov_tom))
id_cluster_group = []
for c in np.unique(y):
idx = np.where(y == c)[0]
# check samples at same time belong to a single cluster
assert np.unique(id_cluster[idx]).size == 1
id_cluster_group.append(id_cluster[idx][0])
data = Bunch(
X=X, y=y, id_cluster=id_cluster, covs=covs, precs=precs,
id_cluster_group=np.asarray(id_cluster_group))
return data | 7c77d5ea4ff9e87681b0494333c49e24360b7072 | 28,690 |
def tag_tuple(tag):
"""Simple function to decompose a tag for debugging."""
return (tag.tagClass, tag.tagNumber, tag.tagLVT, tag.tagData) | df036529cb4b8bcdbd628d13ee59832df96ef7a5 | 28,691 |
from dustmaps import sfd
from dustmaps import planck
def get_dustmap(sourcemap, useweb=False):
""" get the dustmap (from the dustmaps package) of the given source.
Parameters
---------
sourcemap: [string]
origin of the MW extinction information.
currently implemented: planck, sfd
useweb: [bool] -optional-
shall this query from the web
= only implemented for sfd, ignored otherwise =
Returns
-------
dustmaps.Dustmap
"""
if sourcemap.lower() == "sfd":
return sfd.SFDQuery() if not useweb else sfd.SFDWebQuery()
if sourcemap.lower() == "planck":
return planck.PlanckQuery()
raise NotImplementedError(f"Only Planck and SFD maps implemented. {sourcemap} given.") | a5daec02601c968d25942afe1577ad301bbb6a55 | 28,692 |
def make_retro_pulse(x, y, z, zenith, azimuth):
"""Retro pulses originate from a DOM with an (x, y, z) coordinate and
(potentially) a zenith and azimuth orientation (though for now the latter
are ignored).
"""
pulse = I3CLSimFlasherPulse()
pulse.type = I3CLSimFlasherPulse.FlasherPulseType.retro
pulse.pos = I3Position(x, y, z)
pulse.dir = I3Direction(zenith, azimuth)
pulse.time = 0.0
pulse.numberOfPhotonsNoBias = 10000.
# Following values don't make a difference
pulse.pulseWidth = 1.0 * I3Units.ns
pulse.angularEmissionSigmaPolar = 360.0 * I3Units.deg
pulse.angularEmissionSigmaAzimuthal = 360.0 * I3Units.deg
return pulse | de6fa8905276122c501b5a80842a12abfa2a81f1 | 28,693 |
def shiftLeft(col, numBits):
"""Shift the given value numBits left.
>>> spark.createDataFrame([(21,)], ['a']).select(shiftLeft('a', 1).alias('r')).collect()
[Row(r=42)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.shiftLeft(_to_java_column(col), numBits)) | 769cbcb4f66473bdeb789c1326aa58e763c4f320 | 28,694 |
def lerp(x0: float, x1: float, p: float) -> float:
"""
Interplates linearly between two values such that when p=0
the interpolated value is x0 and at p=1 it's x1
"""
return (1 - p) * x0 + p * x1 | c4114dcb5636e70b30cd72a6e7ceab1cd683fa8d | 28,695 |
def discard_events(library, session, event_type, mechanism):
"""Discards event occurrences for specified event types and mechanisms in a session.
Corresponds to viDiscardEvents function of the VISA library.
:param library: the visa library wrapped by ctypes.
:param session: Unique logical identifier to a session.
:param event_type: Logical event identifier.
:param mechanism: Specifies event handling mechanisms to be disabled.
(Constants.QUEUE, .Handler, .SUSPEND_HNDLR, .ALL_MECH)
:return: return value of the library call.
:rtype: :class:`pyvisa.constants.StatusCode`
"""
return library.viDiscardEvents(session, event_type, mechanism) | 72010fae64bb0a1e615ce859d150f7f24f2c7171 | 28,696 |
def getSpeed(spindle=0):
"""Gets the interpreter's speed setting for the specified spindle.
Args:
spindle (int, optional) : The number of the spindle to get the speed
of. If ``spindle`` is not specified spindle 0 is assumed.
Returns:
float: The interpreter speed setting, with any override applied if
override enabled.
"""
raw_speed = STAT.settings[2]
if raw_speed == 0:
raw_speed = abs(DEFAULT_SPEED)
return raw_speed | a7c759ff91c079aacd77d7aa0141f42aa9ca60af | 28,697 |
def appointment() -> any:
"""
Defines route to appointment booking page.
:return: String of HTML template for appointment booking page or homepage if booking was successful.
"""
if request.method == 'POST':
user_input = request.form.to_dict()
try:
request_is_valid(request=user_input)
except ValueError as e:
flash(f"{e}", category='error')
return render_template('appointment.html',
user_input=user_input,
slots=[dt.time(hour=h, minute=m) for h in range(8, 23) for m in [0, 15, 30, 45]],
today=dt.date.today(),
max_days=dt.date.today() + dt.timedelta(days=14))
add_person(person=user_input)
try:
app_added = add_appointment(email=user_input['email1'],
appointment_day=user_input['appointment_day'],
appointment_time=user_input['appointment_time'])
except TypeError as e:
flash(f"{e}", category='error')
return redirect(url_for('views.appointment'))
if app_added:
send_booking_confirmation(email=user_input['email1'],
first_name=user_input['first_name'],
appointment_day=user_input['appointment_day'],
appointment_time=user_input['appointment_time'])
flash('Appointment booked successfully! Please check your inbox for the booking confirmation.',
category='success')
else:
flash('Appointment is already booked! Please check your inbox for the booking confirmation.',
category='error')
return redirect(url_for('views.home'))
return render_template('appointment.html',
slots=[dt.time(hour=h, minute=m) for h in range(8, 23) for m in [0, 15, 30, 45]],
today=dt.date.today(),
max_days=dt.date.today()+dt.timedelta(days=14)) | 5c55c0387300f21cfea45809bdf534ace4137fc6 | 28,698 |
import os
def getfile(basedir, manifest_value, user_argument):
"""Get name for a file that is referenced in a workflow manifest. If the
user argument is given it overrides the respective value in the manifest.
For user arguments we first assume that the path references a file on disk,
either as absolute path or as a path relative to the current working
directory. If no file exists at the specified location an attempt is made
to read the file relative to the base directory. For manifest values, they
are always assumed to be relative to the base directory.
Parameters
----------
basedir: string
manifest_value: string
Relative path to the file in the base directory.
user_argument: string
User provided value that overrides the manifest value. This value can
be None.
Returns
-------
string
"""
if user_argument is not None:
if os.path.isfile(user_argument):
# If the user argument points to an existing file that file is
# returned.
return user_argument
# Assume that the user argument points to a file relative to the base
# directory.
return os.path.join(basedir, user_argument)
return os.path.join(basedir, manifest_value) | 6ec02bf01ce280843d74bedc731964c6e5f74de4 | 28,699 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.