content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def white_noise(template, rms_uKarcmin_T, rms_uKarcmin_pol=None):
"""Generate a white noise realisation corresponding to the template pixellisation
Parameters
----------
template: ``so_map`` template
the template for the white noise generalisation
rms_uKarcmin_T: float
the white noise temperature rms in uK.arcmin
rms_uKarcmin_pol: float
the white noise polarisation rms in uK.arcmin
if None set it to sqrt(2)*rms_uKarcmin_T
"""
noise = template.copy()
rad_to_arcmin = 60 * 180 / np.pi
if noise.pixel == "HEALPIX":
nside = noise.nside
pixArea = hp.pixelfunc.nside2pixarea(nside) * rad_to_arcmin ** 2
if noise.pixel == "CAR":
pixArea = noise.data.pixsizemap() * rad_to_arcmin ** 2
if noise.ncomp == 1:
if noise.pixel == "HEALPIX":
size = len(noise.data)
noise.data = np.random.randn(size) * rms_uKarcmin_T / np.sqrt(pixArea)
if noise.pixel == "CAR":
size = noise.data.shape
noise.data = np.random.randn(size[0], size[1]) * rms_uKarcmin_T / np.sqrt(pixArea)
if noise.ncomp == 3:
if rms_uKarcmin_pol is None:
rms_uKarcmin_pol = rms_uKarcmin_T * np.sqrt(2)
if noise.pixel == "HEALPIX":
size = len(noise.data[0])
noise.data[0] = np.random.randn(size) * rms_uKarcmin_T / np.sqrt(pixArea)
noise.data[1] = np.random.randn(size) * rms_uKarcmin_pol / np.sqrt(pixArea)
noise.data[2] = np.random.randn(size) * rms_uKarcmin_pol / np.sqrt(pixArea)
if noise.pixel == "CAR":
size = noise.data[0].shape
noise.data[0] = np.random.randn(size[0], size[1]) * rms_uKarcmin_T / np.sqrt(pixArea)
noise.data[1] = np.random.randn(size[0], size[1]) * rms_uKarcmin_pol / np.sqrt(pixArea)
noise.data[2] = np.random.randn(size[0], size[1]) * rms_uKarcmin_pol / np.sqrt(pixArea)
return noise | 34,100 |
def run_state_machine(state, rate, domain=None):
""" Convenience method for running a state machine. Equivalent to RunState(state, rate).run().
"""
RunState(state, rate, domain).run() | 34,101 |
def index():
"""process request to the root."""
return render_template('index.html') | 34,102 |
def aromatic_bonds(mol: IndigoObject) -> dict:
"""Get whether bonds in a molecule are aromatic or not.
Args:
IndigoObject: molecule object
Returns:
dict: key - feature name, value - torch.tensor of booleans
"""
is_aromatic = []
for bond in mol.iterateBonds():
is_aromatic.append(bond.bondOrder() == 4)
return {"is_aromatic": torch.tensor(is_aromatic * 2).unsqueeze(1).float()} | 34,103 |
def dt_bestrefs_na_undefined_single_ctx_na_matters():
"""
>>> old_state = test_config.setup()
Instantiate a BestrefsScript with default settings and dummy parameters, simulating pipeline bestrefs defaults:
>>> script = BestrefsScript(argv="crds.bestrefs --load-pickles data/bestrefs.special.json --na-differences-matter --new-context hst_0315.pmap")
>>> script.complex_init()
CRDS - INFO - Loading file 'data/bestrefs.special.json'
CRDS - INFO - Loaded 1 datasets from file 'data/bestrefs.special.json' completely replacing existing headers.
CRDS - INFO - No comparison context or source comparison requested.
True
Skip ahead past bestrefs (faked) into special values results evaluation.
First a postitive result: OK, lowercase-original-result, uppercase-final-result-for-update
>>> script.handle_na_and_not_found("New:", BESTREFS, "LA9K03C3Q", "COS", "FLATFILE")
(True, 'X5V1944HL_FLAT.FITS')
Second a formal N/A result: OK, bestrefs string, final update value
>>> script.handle_na_and_not_found("New:", BESTREFS, "LA9K03C3Q", "COS", "BADTTAB")
(True, 'N/A')
An explicit response of UNDEFINED, by default converted to N/A for update, considered OK
>>> script.handle_na_and_not_found("New:", BESTREFS, "LA9K03C3Q", "COS", "BRFTAB")
(True, 'N/A')
An implicit response of UNDEFINED, i.e. OMIT, also coverted to N/A for update
>>> script.handle_na_and_not_found("New:", BESTREFS, "LA9K03C3Q", "COS", "XTRACTAB")
(True, 'N/A')
An explicit error response from CRDS, not OK
>>> script.handle_na_and_not_found("New:", BESTREFS, "LA9K03C3Q", "COS", "TRACETAB")
CRDS - ERROR - instrument='COS' type='TRACETAB' data='LA9K03C3Q' :: New: Bestref FAILED: some other error
(False, 'NOT FOUND SOME OTHER ERROR')
>>> test_config.cleanup(old_state)
""" | 34,104 |
def get_unitroot(df: pd.DataFrame, fuller_reg: str, kpss_reg: str) -> pd.DataFrame:
"""Calculate test statistics for unit roots
Parameters
----------
df : pd.DataFrame
DataFrame of target variable
fuller_reg : str
Type of regression of ADF test
kpss_reg : str
Type of regression for KPSS test
Returns
-------
pd.DataFrame
Dataframe with results of ADF test and KPSS test
"""
# The Augmented Dickey-Fuller test
# Used to test for a unit root in a univariate process in the presence of serial correlation.
try:
result = adfuller(df, regression=fuller_reg)
except MissingDataError:
df = df.dropna(axis=0)
result = adfuller(df, regression=fuller_reg)
cols = ["Test Statistic", "P-Value", "NLags", "Nobs", "ICBest"]
vals = [result[0], result[1], result[2], result[3], result[5]]
data = pd.DataFrame(data=vals, index=cols, columns=["ADF"])
# Kwiatkowski-Phillips-Schmidt-Shin test
# Test for level or trend stationarity
# This test seems to produce an Interpolation Error which says
# The test statistic is outside of the range of p-values available in the
# look-up table. The actual p-value is greater than the p-value returned.
# Wrap this in catch_warnings to prevent
with warnings.catch_warnings():
warnings.simplefilter("ignore")
res2 = kpss(df, regression=kpss_reg, nlags="auto")
vals2 = [res2[0], res2[1], res2[2], "", ""]
data["KPSS"] = vals2
return data | 34,105 |
def visit_desc(translator: LaTeXTranslator, node: addnodes.desc) -> None:
"""
Visit an :class:`addnodes.desc` node and add a custom table of contents label for the item, if required.
:param translator:
:param node:
"""
if node["domain"] == "py":
translator.body.append(r"\needspace{5\baselineskip}")
if "sphinxcontrib.toctree_plus" in translator.config.extensions:
# 3rd party
from sphinxcontrib import toctree_plus # nodep
toctree_plus.visit_desc(translator, node)
else:
LaTeXTranslator.visit_desc(translator, node) | 34,106 |
def lat_avg(data, lat_wgt):
"""Perform latitude average of data:
Inputs:
data - n dimensional spatial data. The last 2 dimensions are assumed to lat and
lon respectively
lat_wgt - weights by latitudes"""
lat_shape = lat_wgt.shape
data_shape = data.shape
# If one dimensional:
if(len(lat_wgt) == 1):
lat_wgt_re = np.broadcast_to(lat_wgt, data.shape)
elif(len(lat_shape) > 1):
raise ValueError ("lat_wgt must be 1 dimensional latitude weights")
else:
lat_2d = np.broadcast_to(lat_wgt.reshape(len(lat_wgt), 1), data_shape[-2:])
lat_wgt_re = np.broadcast_to(lat_2d, data_shape)
return (data * lat_wgt_re).mean(axis = -2) | 34,107 |
def create_line(line_coefficients, height=5, step=0.5, vis=False):
"""
Args:
line_coefficients: A dictionary containing cylindrical coefficients:
(r, x0, y0, z0_, a, b, c
r not used: to keep the same form between cylinder coefficients and line coefficients,
so that a same group of coefficients can generate a cylinder and a line, then the line is
the Central axis of the cylinder
x0,y0,z0 the Starting center of the cylinder
a, b, c the axis coefficient of the cylinder)
height: length of the line
step: Density of line point cloud
vis: whether to visualize the cylinder
Returns:
numpy form of the line point cloud: n x 3
@Author: Carlos_Lee 202111
"""
x0 = line_coefficients['x0']
y0 = line_coefficients['y0']
z0 = line_coefficients['z0']
a = line_coefficients['a']
b = line_coefficients['b']
c = line_coefficients['c']
v = np.arange(0, height, step)
npy = np.zeros((len(v), 3))
for idx_, i in enumerate(v):
x = x0 + a / np.power(a * a + b * b + c * c, 0.5) * i
y = y0 + b / np.power(a * a + b * b + c * c, 0.5) * i
z = z0 + c / np.power(a * a + b * b + c * c, 0.5) * i
npy[idx_] = [x, y, z]
if vis:
coordinate_ = o3d.geometry.TriangleMesh.create_coordinate_frame(size=height / 2., origin=[0.0, 0.0, 0.0])
pcd_ = o3d.geometry.PointCloud()
pcd_.points = o3d.utility.Vector3dVector(npy)
o3d.visualization.draw_geometries([coordinate_, pcd_], window_name="generate line",
width=960, height=900, left=960, top=100)
return npy | 34,108 |
def get_result_summaries_query(start, end, sort, state, tags):
"""Returns TaskResultSummary.query() with these filters.
Arguments:
start: Earliest creation date of retrieved tasks.
end: Most recent creation date of retrieved tasks, normally None.
sort: Order to use. Must default to 'created_ts' to use the default. Cannot
be used along start and end.
state: One of State enum value as str. Use 'all' to get all tasks.
tags: List of search for one or multiple task tags.
"""
# Disable the in-process local cache. This is important, as there can be up to
# a thousand entities loaded in memory, and this is a pure memory leak, as
# there's no chance this specific instance will need these again, therefore
# this leads to 'Exceeded soft memory limit' AppEngine errors.
q = TaskResultSummary.query(
default_options=ndb.QueryOptions(use_cache=False))
# Filter by one or more tags.
if tags:
# Add TaskResultSummary indexes if desired.
if sort != 'created_ts':
raise ValueError(
'Add needed indexes for sort:%s and tags if desired' % sort)
for tag in tags:
parts = tag.split(':', 1)
if len(parts) != 2 or any(i.strip() != i or not i for i in parts):
raise ValueError('Invalid tags')
values = parts[1].split(OR_DIM_SEP)
separated_tags = ['%s:%s' % (parts[0], v) for v in values]
q = q.filter(TaskResultSummary.tags.IN(separated_tags))
return filter_query(TaskResultSummary, q, start, end, sort, state) | 34,109 |
def approx_jacobian(tform, image, delta=0.01):
"""approximate the image pixel gradient wrt tform using central differences
(This has been so helpful while troubleshooting jacobians,
let's keep it around for unit testing.
Parameters
----------
tform : TForm
current transform, to be applied to image and its gradient
image : ndarray(h, v)
untransformed image
delta : real or ndarray(nparams)
stepsize
Returns
-------
jacobian : ndarray(h * v, nparams)
transformation parameter derivatives at each image pixel.
out-of-bounds points will be populated with 0's
"""
if not isinstance(delta, np.ndarray):
delta = np.ones(len(tform.paramv)) * delta
npixels = np.prod(tform.output_shape)
gradvecs = np.empty((npixels, len(tform.paramv)))
for i in range(len(tform.paramv)):
dimage = np.zeros(tform.output_shape)
for sign in (-1, 1):
paramv = tform.paramv.copy()
paramv[i] += delta[i] * sign
stepT = tform.clone(paramv)
dimage += stepT.imtransform(image) * sign
gradvecs[:, i] = (dimage / (2 * delta[i])).flatten()
return np.nan_to_num(gradvecs) | 34,110 |
def assert_equal(
actual: statsmodels.tools.tools.Bunch, desired: statsmodels.tools.tools.Bunch
):
"""
usage.statsmodels: 8
"""
... | 34,111 |
def built_in_demo():
"""Using built-in sequence functions.
>>> bcd = ['b', 'c', 'd']
>>> [x.upper() for x in bcd]
['B', 'C', 'D']
>>> caps = map(lambda x: x.upper(), bcd)
>>> next(caps)
'B'
>>> next(caps)
'C'
>>> s = range(3, 7)
>>> doubled = map(double, s)
>>> next(doubled)
*** 3 => 6 ***
6
>>> next(doubled)
*** 4 => 8 ***
8
>>> list(doubled)
*** 5 => 10 ***
*** 6 => 12 ***
[10, 12]
>>> f = lambda x: x < 10
>>> a = filter(f, map(double, reversed(s)))
>>> list(a)
*** 6 => 12 ***
*** 5 => 10 ***
*** 4 => 8 ***
*** 3 => 6 ***
[8, 6]
>>> t = [1, 2, 3, 2, 1]
>>> reversed(t) == t
False
>>> list(reversed(t)) == t
True
>>> d = {'a': 1, 'b': 2, 'c': 3}
>>> items = zip(d.keys(), d.values()) # Call next(items)
""" | 34,112 |
def create_collection(collection_id: str) -> Collection:
"""Creates a STAC Collection for Landsat Collection 2 Level-1 or Level-2
data.
Args:
collection_id (str): ID of the STAC Collection. Must be one of
"landsat-c2-l1" or "landsat-c2-l2".
Returns:
Collection: The created STAC Collection.
"""
if collection_id not in COLLECTION_IDS:
raise ValueError(f"Invalid collection id: {collection_id}")
fragment = CollectionFragments(collection_id).collection()
collection = Collection(id=collection_id,
title=fragment["title"],
description=fragment["description"],
license=fragment["license"],
keywords=fragment["keywords"],
providers=fragment["providers"],
extent=fragment["extent"],
summaries=fragment["summaries"])
collection.add_links(fragment["links"])
item_assets = ItemAssetsExtension(collection)
item_assets.item_assets = fragment["item_assets"]
ItemAssetsExtension.add_to(collection)
ViewExtension.add_to(collection)
ScientificExtension.add_to(collection)
RasterExtension.add_to(collection)
EOExtension.add_to(collection)
return collection | 34,113 |
def rician_noise(image, sigma, rng=None):
"""
Add Rician distributed noise to the input image.
Parameters
----------
image : array-like, shape ``(dim_x, dim_y, dim_z)`` or ``(dim_x, dim_y,
dim_z, K)``
sigma : double
rng : random number generator (a numpy.random.RandomState instance).
"""
n1 = rng.normal(loc=0, scale=sigma, size=image.shape)
n2 = rng.normal(loc=0, scale=sigma, size=image.shape)
return np.sqrt((image + n1)**2 + n2**2) | 34,114 |
def compose_pinned_post(post):
"""
1.Verify that this is the pinned post
2.Obtain the results json from the results rig
3.Compose the HTML for the compact graphic
"""
pinned_post = post
# Get the timestamps collection
client = MongoClient(app_config.MONGODB_URL)
database = client['liveblog']
collection = database.pinned
try:
post['pinned']
except KeyError:
logger.error("First post should always be the pinned post")
# Cache pinned post contents
if post['published mode'] != 'yes':
result = collection.find_one({'_id': post['slug']})
if not result:
logger.debug('did not find pinned post %s' % post['slug'])
collection.insert({
'_id': post['slug'],
'cached_contents': post['contents'],
'cached_headline': post['headline'],
})
post['cached_contents'] = post['contents']
post['cached_headline'] = post['headline']
else:
logger.debug('found pinned post %s' % post['slug'])
post['cached_contents'] = result['cached_contents']
post['cached_headline'] = result['cached_headline']
logger.debug('returning cached headline %s' % (
post['cached_headline']))
else:
# Update mongodb cache
post['cached_contents'] = post['contents']
post['cached_headline'] = post['headline']
logger.debug("update cached headline to %s" % post['headline'])
collection.update({'_id': post['slug']},
{'cached_contents': post['contents'],
'cached_headline': post['headline']})
return pinned_post | 34,115 |
def panLeft(self):
"""
TOWRITE
"""
qDebug("panLeft()")
gview = self.activeView() # View*
stack = gview.getUndoStack() # QUndoStack*
if gview and stack:
cmd = UndoableNavCommand("PanLeft", gview, None)
stack.push(cmd) | 34,116 |
def compute_composition_df(seq_df):
"""
Compute the composition matrix for all proteins.
Args:
seq_df: df, dataframe with sequences
Returns:
df, with the composition of the proteins
"""
# get composition table
df_seq_comp = pd.DataFrame(
list(seq_df["sequence"].apply(parser.amino_acid_composition).values)) * 1.0
# add column with 0s for amino acids that didnt occur in the protein fasta file
for i in parser.std_amino_acids:
if i not in df_seq_comp.columns:
df_seq_comp[i] = 0
df_seq_comp = df_seq_comp.fillna(0.0)
df_seq_comp.index = seq_df.index
return df_seq_comp | 34,117 |
def make_gradient_squared(
grid: CylindricalSymGrid, central: bool = True
) -> OperatorType:
"""make a discretized gradient squared operator for a cylindrical grid
{DESCR_CYLINDRICAL_GRID}
Args:
grid (:class:`~pde.grids.cylindrical.CylindricalSymGrid`):
The grid for which the operator is created
central (bool):
Whether a central difference approximation is used for the gradient
operator. If this is False, the squared gradient is calculated as
the mean of the squared values of the forward and backward
derivatives.
Returns:
A function that can be applied to an array of values
"""
# use processing for large enough arrays
dim_r, dim_z = grid.shape
parallel = dim_r * dim_z >= config["numba.parallel_threshold"]
if central:
# use central differences
scale_r, scale_z = 0.25 / grid.discretization ** 2
@jit(parallel=parallel)
def gradient_squared(arr: np.ndarray, out: np.ndarray) -> None:
"""apply gradient operator to array `arr`"""
for i in nb.prange(1, dim_r + 1): # iterate radial points
for j in range(1, dim_z + 1): # iterate axial points
term_r = (arr[i + 1, j] - arr[i - 1, j]) ** 2
term_z = (arr[i, j + 1] - arr[i, j - 1]) ** 2
out[i, j] = term_r * scale_r + term_z * scale_z
else:
# use forward and backward differences
scale_r, scale_z = 0.5 / grid.discretization ** 2
@jit(parallel=parallel)
def gradient_squared(arr: np.ndarray, out: np.ndarray) -> None:
"""apply gradient operator to array `arr`"""
for i in nb.prange(1, dim_r + 1): # iterate radial points
for j in range(1, dim_z + 1): # iterate axial points
arr_z_l, arr_c, arr_z_h = arr[i, j - 1], arr[i, j], arr[i, j + 1]
term_r = (arr[i + 1, j] - arr_c) ** 2 + (arr_c - arr[i - 1, j]) ** 2
term_z = (arr_z_h - arr_c) ** 2 + (arr_c - arr_z_l) ** 2
out[i, j] = term_r * scale_r + term_z * scale_z
return gradient_squared | 34,118 |
def test_frame_attr_getattr():
"""
When accessing frame attributes like equinox, the value should come
from self.frame when that object has the relevant attribute, otherwise
from self.
"""
sc = SkyCoord(1, 2, frame='icrs', unit='deg', equinox='J1999', obstime='J2001')
assert sc.equinox == 'J1999' # Just the raw value (not validated)
assert sc.obstime == 'J2001'
sc = SkyCoord(1, 2, frame='fk4', unit='deg', equinox='J1999', obstime='J2001')
assert sc.equinox == Time('J1999') # Coming from the self.frame object
assert sc.obstime == Time('J2001')
sc = SkyCoord(1, 2, frame='fk4', unit='deg', equinox='J1999')
assert sc.equinox == Time('J1999')
assert sc.obstime == Time('J1999') | 34,119 |
def state(git_root):
"""Return a hash of the current state of the .git directory. Only considers
fsck verbose output and refs.
"""
if not git_root.is_dir():
return 0
rc, stdout, stderr = util.captured_run(*"git fsck --full -v".split(), cwd=git_root)
refs = "".join([ref.name + ref.value for ref in collect_refs(git_root)])
return hash(stdout + stderr + refs) | 34,120 |
def search_handler(data_type_name, search_key=None, search_value=None):
"""
Purpose: Adapt PathError and QueryError to appropriate Django error types.
Input Parameters:
data_type_name - One of the searchable types 'PasswordData' or 'GroupData'.
search_key - Name of searchable field for type specified Optional, default = None.
search_value - Value of defined field to match from data, default = None.
Return: HttpResponse with json representation of returned values.
Exceptions: Http404 on QueryError,
ImproperlyConfigured on PathError """
result_list = []
try:
search_type = settings.PWDSVC_SEARCH
if search_type == 'DataBaseSearch':
db_search = DataBaseSearch(DATAMGR)
result_list = db_search.search(data_type_name, search_key, search_value)
else:
result_list = DATAMGR.search(data_type_name, search_key, search_value)
except PathError, path_error:
raise ImproperlyConfigured(path_error)
except QueryError, query_error:
raise Http404(query_error)
if not result_list:
raise Http404('No results.')
return result_list | 34,121 |
def render_CardsCounter_edit(self, h, comp, *args):
"""Render the title of the associated object"""
text = var.Var(self.text)
with h.div(class_='list-counter'):
with h.div(class_='cardCounter'):
with h.form(onsubmit='return false;'):
action = h.input(type='submit').action(lambda: self.validate(text(), comp)).get('onclick')
id_ = h.generate_id()
h << h.input(id=id_, type='text', value=self.column.nb_max_cards or '', onblur=action).action(text)
h << h.script(
"""YAHOO.util.Event.on(%s, 'keyup', function (e) {
if (e.keyCode == 13) {
e.preventDefault();
this.blur();
}
var result = this.value.replace(/[^0-9]/g, '')
if (this.value !=result) {
this.value = result;
}
});""" % ajax.py2js(id_)
)
h << h.script(
"YAHOO.kansha.app.selectElement(%s);" % ajax.py2js(id_)
)
if self.error is not None:
with h.div(class_='nagare-error-message'):
h << self.error
return h.root | 34,122 |
def get_embedding_tids(tids, mapping):
"""Obtain token IDs based on our own tokenization, through the mapping to BERT tokens."""
mapped = []
for t in tids:
mapped += mapping[t]
return mapped | 34,123 |
def reloadConfiguration():
"""Reloads the configuration.
This can be used for reloading a new configuration from disk.
At the present time it has no use other than setting different configurations
for testing, since the framework is restarted every time an analysis is
performed."""
global _config
_config = { BASE_DIR:'evaluation_system',
BASE_DIR_LOCATION: os.path.expanduser('~'),
DIRECTORY_STRUCTURE_TYPE: DIRECTORY_STRUCTURE.LOCAL,
PLUGINS: {}}
#now check if we have a configuration file, and read the defaults from there
config_file = os.environ.get(_DEFAULT_ENV_CONFIG_FILE,
_DEFAULT_CONFIG_FILE_LOCATION)
#print os.environ.get('EVALUATION_SYSTEM_CONFIG_FILE', None), 'the config file'
log.debug("Loading configuration file from: %s", config_file)
if config_file and os.path.isfile(config_file):
config_parser = SafeConfigParser()
with open(config_file, 'r') as fp:
config_parser.readfp(fp)
if not config_parser.has_section(CONFIG_SECTION_NAME):
raise ConfigurationException(("Configuration file is missing section %s.\n"
+ "For Example:\n[%s]\nprop=value\n...") \
% (CONFIG_SECTION_NAME, CONFIG_SECTION_NAME))
else:
_config.update(config_parser.items(CONFIG_SECTION_NAME))
for plugin_section in [s for s in config_parser.sections() if s.startswith(PLUGINS)]:
_config[PLUGINS][plugin_section[len(PLUGINS):]] = \
SPECIAL_VARIABLES.substitute(dict(config_parser.items(plugin_section)))
log.debug('Configuration loaded from %s', config_file)
else:
log.debug('No configuration file found in %s. Using default values.',
config_file)
_config = SPECIAL_VARIABLES.substitute(_config, recursive=False)
#perform all special checks
if not DIRECTORY_STRUCTURE.validate(_config[DIRECTORY_STRUCTURE_TYPE]):
raise ConfigurationException("value (%s) of %s is not valid. Should be one of: %s" \
% (_config[DIRECTORY_STRUCTURE_TYPE], DIRECTORY_STRUCTURE_TYPE,
', '.join(DIRECTORY_STRUCTURE.toDict().values()))) | 34,124 |
def delete_configuration(timestamp: AnyStr) -> AnyStr:
"""
Delete the configuration folder.
:timestamp (AnyStr) The name of the configuration to delete
Return the name of the deleted configuration, or crash
"""
rating_rates_dir = envvar('RATING_RATES_DIR')
with Lockfile(rating_rates_dir):
try:
shutil.rmtree('{}/{}'.format(rating_rates_dir, timestamp))
except OSError as err:
logging.error(
f'An error happened while removing {timestamp} configuration directory.')
if err.errno == errno.ENOENT:
logging.error(
f'Configuration directory {timestamp} does not exist.')
sys.exit(1)
else:
logging.info(f'removed {timestamp} configuration')
return timestamp | 34,125 |
def extract_stream_url(ashx_url):
""" Extract real stream url from tunein stream url """
r = requests.get(ashx_url)
for l in r.text.splitlines():
if len(l) != 0:
return l | 34,126 |
def check_for_rematch(player_id1, player_id2):
"""Checks whether the two players specified have played a match before.
Args:
player_id1: ID of first player
player_id2: ID of second player
Returns:
Bool: True if they have met before, False if they have not.
"""
query = """SELECT EXISTS(SELECT 1
FROM matches
WHERE winner_pid=%(id1)s AND loser_pid=%(id2)s
OR winner_pid=%(id2)s AND loser_pid=%(id1)s);"""
parameter = {'id1': player_id1, 'id2': player_id2}
with connect_to_db() as database:
database['cursor'].execute(query, parameter)
is_rematch = database['cursor'].fetchone()[0]
return is_rematch
# Credits
# Idea for using the EXISTS PSQL keyword found on this Stack Overflow page:
# http://stackoverflow.com/questions/7471625/
| 34,127 |
def nextrandombitsAES(cipher, bitlength):
"""
<Purpose>
generate random bits using AES-CTR
<Arguments>
bitlength: the lenght of the random string in BITS
<Side Effects>
Increases the AES counter
<Returns>
A random string with the supplied bitlength (the rightmost bits are zero if bitlength is not a multiple of 8)
"""
# offset for the last byte
bytelength = bits_to_bytes(bitlength)
bitoffset = bitlength % 8
if bitoffset > 0:
# if the bitlength is not a multiple of 8, clear the rightmost bits
pt = (bytelength - 1) * b'\0'
randombytes = cipher.encrypt(pt)
b = cipher.encrypt(b'\0')
b = (b[0] & ((0xff00 >> bitoffset) & 0xff)).to_bytes(1, byteorder = 'big')
randombytes += b
return randombytes
else:
pt = bytelength * b'\0'
return cipher.encrypt(pt) | 34,128 |
def _binparams2img(mc, param):
"""
Maximum data of all the bins
Parameters
----------
mc : dict
Molecular cloud dimensions
param : boolean
Parameter
----------
"""
if not param in sos.all_params:
raise Exception('Parameter not valid')
# Get binned or full dimensions
dims, nbins, mc_binned = _get_mc_dims(mc)
sx, sy = dims
# Define paremeter matrix
param_matrix = np.zeros((sx, sy))
# Scan all the bins
for b in range(nbins):
if mc_binned:
# Get bin name
name = 'B'+str(b)
# Get coordinates
i, j = mc[name]['pos']
if not mc[name]['flag']:
# Get parameter value
m = mc[name][param]
else:
m = np.nan
else:
# Get coordinate
i, j = 0, 0
# Get parameter value
m = mc[param]
#if param == 'den':
# m = m/(1.28*1e3*1.672622e-27) # Neutral gas * proton mass [g]
param_matrix[i][j] = m
return param_matrix, dims | 34,129 |
def setup_sample_weighting_columns(df):
"""
"""
# samples will be weighted twice as highly compared to 1 season before
df["1_season_half_life_weight"] = (2**1) ** (df["season"] - 2001)
# samples will be weighted twice as highly compared to 2 seasons before
df["2_season_half_life_weight"] = (2**(1/2)) ** (df["season"] - 2001)
# samples will be weighted twice as highly compared to 4 seasons before
df["4_season_half_life_weight"] = (2**(1/4)) ** (df["season"] - 2001)
# samples will be weighted twice as highly compared to 8 seasons before
df["8_season_half_life_weight"] = (2**(1/8)) ** (df["season"] - 2001)
# samples will be weighted twice as highly compared to 16 seasons before
df["16_season_half_life_weight"] = (2**(1/16)) ** (df["season"] - 2001)
# samples will be weighted twice as highly compared to 1 season before
df["unweighted"] = 1 | 34,130 |
def arccos(x):
"""
Compute the inverse cosine of x.
Return the "principal value" (for a description of this, see
`numpy.arccos`) of the inverse cosine of `x`. For real `x` such that
`abs(x) <= 1`, this is a real number in the closed interval
:math:`[0, \\pi]`. Otherwise, the complex principle value is returned.
Parameters
----------
x : array_like or scalar
The value(s) whose arccos is (are) required.
Returns
-------
out : ndarray or scalar
The inverse cosine(s) of the `x` value(s). If `x` was a scalar, so
is `out`, otherwise an array object is returned.
See Also
--------
numpy.arccos
Notes
-----
For an arccos() that returns ``NAN`` when real `x` is not in the
interval ``[-1,1]``, use `numpy.arccos`.
Examples
--------
>>> np.set_printoptions(precision=4)
>>> np.emath.arccos(1) # a scalar is returned
0.0
>>> np.emath.arccos([1,2])
array([ 0.-0.j , 0.+1.317j])
"""
x = _fix_real_abs_gt_1(x)
return nx.arccos(x) | 34,131 |
def assert_topic_lists_are_equal_without_automatic_topics(expected, actual):
"""Check for equality in topic lists after filtering topics that start with
an underscore."""
filtered_actual = list(filter(lambda x: not x.startswith('_'), actual))
assert expected == filtered_actual | 34,132 |
def request_authentication(user, organization_id, short_code):
"""
Request for an authentication token from Safaricom's MPesa API
"""
mpesa_api_account = get_object_or_404(
MpesaAPIAccount.objects.filter(
organization__owner=user,
linked_account__identifier=short_code,
organization__organization_id=organization_id
))
return api.authenticate(
env="production" if mpesa_api_account.in_production else "sandbox",
app_key=mpesa_api_account.consumer_key,
app_secret=mpesa_api_account.consumer_secret) | 34,133 |
def test_basic():
"""
The test launches the simulator, loads the system and prints the kinematic chain and state of the robot.
"""
# Setup simulator
kit, system = setup_kit_and_system()
# display information
system.robot.display() | 34,134 |
def none_to_null(value):
""" Returns None if the specified value is null, else returns the value
"""
return "null" if value == None else value | 34,135 |
def get_dsd_url():
"""Returns the remote URL to the global SDMX DSD for the SDGs."""
return 'https://registry.sdmx.org/ws/public/sdmxapi/rest/datastructure/IAEG-SDGs/SDG/latest/?format=sdmx-2.1&detail=full&references=children' | 34,136 |
def PrintUsageString():
"""Prints the correct call for running the sample."""
print ('python emailsettings_pop_settings.py'
'--consumer_key [ConsumerKey] --consumer_secret [ConsumerSecret]'
'--domain [domain]') | 34,137 |
def range_closest(ranges, b, left=True):
"""
Returns the range that's closest to the given position. Notice that the
behavior is to return ONE closest range to the left end (if left is True).
This is a SLOW method.
>>> ranges = [("1", 30, 40), ("1", 33, 35), ("1", 10, 20)]
>>> b = ("1", 22, 25)
>>> range_closest(ranges, b)
('1', 10, 20)
>>> range_closest(ranges, b, left=False)
('1', 33, 35)
>>> b = ("1", 2, 5)
>>> range_closest(ranges, b)
"""
from jcvi.utils.orderedcollections import SortedCollection
key = (lambda x: x) if left else (lambda x: (x[0], x[2], x[1]))
rr = SortedCollection(ranges, key=key)
try:
if left:
s = rr.find_le(b)
assert key(s) <= key(b), (s, b)
else:
s = rr.find_ge(b)
assert key(s) >= key(b), (s, b)
except ValueError:
s = None
return s | 34,138 |
def _fill_array(data, mask=None, fill_value=None):
"""
Mask numpy array and/or fill array value without demasking.
Additionally set fill_value to value.
If data is not a MaskedArray and mask is None returns silently data.
:param mask: apply mask to array
:param fill_value: fill value
"""
if mask is not None and mask is not False:
data = np.ma.MaskedArray(data, mask=mask, copy=False)
if np.ma.is_masked(data) and fill_value is not None:
data._data[data.mask] = fill_value
np.ma.set_fill_value(data, fill_value)
# elif not np.ma.is_masked(data):
# data = np.ma.filled(data)
return data | 34,139 |
def make_Dog(size, name):
"""Create dog entity."""
new_dog = Dog(size=size, name=str(name))
if new_dog.called() == "":
return f"The {size} dog says {new_dog.talk()}."
return f"{new_dog.called()}, the {size} dog says {new_dog.talk()}." | 34,140 |
def get_spectra_onepixel(data, indx, MakeMock, seed, log, ntarget,
maxiter=1, no_spectra=False, calib_only=False):
"""Wrapper function to generate spectra for all targets on a single healpixel.
Parameters
----------
data : :class:`dict`
Dictionary with all the mock data (candidate mock targets).
indx : :class:`int` or :class:`numpy.ndarray`
Indices of candidate mock targets to consider.
MakeMock : :class:`desitarget.mock.mockmaker` object
Object to assign spectra to each target class.
seed: :class:`int`
Seed for the random number generator.
log : :class:`desiutil.logger`
Logger object.
ntarget : :class:`int`
Desired number of targets to generate.
maxiter : :class:`int`
Maximum number of iterations to generate targets.
no_spectra : :class:`bool`, optional
Do not generate spectra, e.g., for use with quicksurvey. Defaults to False.
calib_only : :class:`bool`, optional
Use targets as calibration (standard star) targets, only. Defaults to False.
Returns
-------
targets : :class:`astropy.table.Table`
Target catalog.
truth : :class:`astropy.table.Table`
Corresponding truth table.
objtruth : :class:`astropy.table.Table`
Corresponding objtype-specific truth table (if applicable).
trueflux : :class:`numpy.ndarray`
Array [npixel, ntarget] of observed-frame spectra. Only computed
and returned for non-sky targets and if no_spectra=False.
"""
targname = data['TARGET_NAME']
rand = np.random.RandomState(seed)
targets = list()
truth = list()
objtruth = list()
trueflux = list()
if ntarget == 0:
return [targets, truth, objtruth, trueflux]
# Generate the spectra iteratively until we achieve the required target
# density. Randomly divide the possible targets into each iteration.
iterseeds = rand.randint(2**31, size=maxiter)
rand.shuffle(indx)
iterindx = np.array_split(indx, maxiter)
makemore, itercount, ntot = True, 0, 0
while makemore:
chunkflux, _, chunktargets, chunktruth, chunkobjtruth = MakeMock.make_spectra(
data, indx=iterindx[itercount], seed=iterseeds[itercount], no_spectra=no_spectra)
MakeMock.select_targets(chunktargets, chunktruth, targetname=data['TARGET_NAME'])
keep = np.where(chunktargets['DESI_TARGET'] != 0)[0]
#if 'CONTAM_NAME' in data.keys():
# import pdb ; pdb.set_trace()
nkeep = len(keep)
if nkeep > 0:
ntot += nkeep
log.debug('Generated {} / {} ({} / {} total) {} targets on iteration {} / {}.'.format(
nkeep, len(chunktargets), ntot, ntarget, targname, itercount+1, maxiter))
targets.append(chunktargets[keep])
truth.append(chunktruth[keep])
if len(chunkobjtruth) > 0: # skies have no objtruth
objtruth.append(chunkobjtruth[keep])
if not no_spectra:
trueflux.append(chunkflux[keep, :])
itercount += 1
if itercount == maxiter or ntot >= ntarget:
if maxiter > 1:
log.debug('Generated {} / {} {} targets after {} iterations.'.format(
ntot, ntarget, targname, itercount))
makemore = False
else:
need = np.where(chunktargets['DESI_TARGET'] == 0)[0]
#import matplotlib.pyplot as plt
#noneed = np.where(chunktargets['DESI_TARGET'] != 0)[0]
#gr = -2.5 * np.log10( chunktargets['FLUX_G'] / chunktargets['FLUX_R'] )
#rz = -2.5 * np.log10( chunktargets['FLUX_R'] / chunktargets['FLUX_Z'] )
#plt.scatter(rz[noneed], gr[noneed], color='red', alpha=0.5, edgecolor='none', label='Made Cuts')
#plt.scatter(rz[need], gr[need], alpha=0.5, color='green', edgecolor='none', label='Failed Cuts')
#plt.legend(loc='upper left')
#plt.show()
if len(need) > 0:
# Distribute the objects that didn't pass target selection
# to the remaining iterations.
iterneed = np.array_split(iterindx[itercount - 1][need], maxiter - itercount)
for ii in range(maxiter - itercount):
iterindx[ii + itercount] = np.hstack( (iterindx[itercount:][ii], iterneed[ii]) )
if len(targets) > 0:
targets = vstack(targets)
truth = vstack(truth)
if ntot > ntarget: # Only keep up to the number of desired targets.
log.debug('Removing {} extraneous targets.'.format(ntot - ntarget))
keep = rand.choice(ntot, size=ntarget, replace=False)
targets = targets[keep]
truth = truth[keep]
if len(objtruth) > 0: # skies have no objtruth
objtruth = vstack(objtruth)
if ntot > ntarget:
objtruth = objtruth[keep]
if not no_spectra:
trueflux = np.concatenate(trueflux)
if ntot > ntarget:
trueflux = trueflux[keep, :]
return [targets, truth, objtruth, trueflux] | 34,141 |
def one_way_mi(df, feature_list, group_column, y_var, bins):
"""
Calculates one-way mutual information group variable and a
target variable (y) given a feature list regarding.
Parameters
----------
df : pandas DataFrame
df with features used to train model, plus a target variable
and a group column.
feature_list : list DataFrame
List of strings, feature names.
group_column : string
name of column for testing bias, should contain numeric categories
y_var : string
name of target variable column
bins : tuple
number of bins for each dimension
Returns
-------
mi_table : pandas DataFrame
data frame with mutual information values, with one row per feature
in the feature_list, columns for group and y.
"""
group_cats = df[group_column].values
y_cats = df[y_var].values
c_g = [
np.histogramdd([np.array(df[feature]), group_cats], bins=bins)[0]
for feature in feature_list
]
c_y = [
np.histogramdd([np.array(df[feature]), y_cats], bins=bins)[0]
for feature in feature_list
]
# compute mutual information (MI) between trait and gender/eth/y
mi_g = [mutual_info_score(None, None, contingency=i) for i in c_g]
mi_y = [mutual_info_score(None, None, contingency=i) for i in c_y]
mi_table = pd.DataFrame({'feature': feature_list,
group_column: mi_g,
y_var: mi_y})
# NOTE: Scale group and y where the highest MI is scaled to 1 to
# facilitate interpreting relative importance to bias and performance
mi_table["{}_scaled".format(group_column)] = (
mi_table[group_column] / mi_table[group_column].max()
)
mi_table["{}_scaled".format(y_var)] = (
mi_table[y_var] / mi_table[y_var].max()
)
return mi_table | 34,142 |
def test_whole_range_same(check_ranges, accounts, nft):
"""whole range, merge both sides"""
nft.transferRange(accounts[3], 5000, 10001, {"from": accounts[1]})
nft.transferRange(accounts[1], 25001, 30001, {"from": accounts[3]})
nft.transferRange(accounts[3], 10001, 20001, {"from": accounts[2]})
check_ranges([(1, 5000), (25001, 30001)], [], [(5000, 25001)], []) | 34,143 |
def _comments_for_2zxglv(sc, submission_id):
"""Shared set of assertions for submission 2zxglv.
"""
assert isinstance(sc, list)
assert len(sc) == 7
# matchup first comment keys with DB model schema
assert sorted(sc[0].keys()) == sorted(scraper._model_columns(Comment))
# first comment individual values
c0_expected_values = [
('id', "cpnchwj"),
('created_utc', 1427062942.0),
('submission_id', submission_id),
('name', "t1_cpnchwj"),
('parent_id', "t3_2zxglv"),
('author', "hansolo669"),
]
for k, v in c0_expected_values:
assert sc[0][k] == v
assert sc[0]["body"].startswith("You could always try paginating")
# scores
c0_expected_scores = [
('ups', 1),
('downs', 0),
('score', 1)
]
for k, v in c0_expected_scores:
assert sc[0][k] >= v | 34,144 |
def draw_parametric_bs_reps_mle(
mle_fun, gen_fun, data, args=(), size=1, progress_bar=False
):
"""Draw parametric bootstrap replicates of maximum likelihood estimator.
Parameters
----------
mle_fun : function
Function with call signature mle_fun(data, *args) that computes
a MLE for the parameters
gen_fun : function
Function to randomly draw a new data set out of the model
distribution parametrized by the MLE. Must have call
signature `gen_fun(*params, size)`.
data : one-dimemsional Numpy array
Array of measurements
args : tuple, default ()
Arguments to be passed to `mle_fun()`.
size : int, default 1
Number of bootstrap replicates to draw.
progress_bar : bool, default False
Whether or not to display progress bar.
Returns
-------
output : numpy array
Bootstrap replicates of MLEs.
"""
params = mle_fun(data, *args)
if progress_bar:
iterator = tqdm(range(size))
else:
iterator = range(size)
return np.array(
[mle_fun(gen_fun(*params, size=len(data), *args)) for _ in iterator]
) | 34,145 |
def parse_subpalette(words):
"""Turn palette entry into a list of color-to-index mappings.
For example, #AAA=2 or #AAAAAA=2 means that (170, 170, 170) will be
recognized as color 2 in that subpalette.
If no =number is specified, indices are recognized sequentially from 1.
Return a list of ((r, g, b), index) tuples.
"""
out = []
for i, word in enumerate(words):
color_index = word.split("=", 1)
color = parse_color(color_index[0])
index = int(color_index[1]) if len(color_index) > 1 else i + 1
out.append((color, index))
return out | 34,146 |
def data_word2vec(input_file, num_labels, word2vec_model):
"""
Create the research data tokenindex based on the word2vec model file.
Return the class _Data() (includes the data tokenindex and data labels).
Args:
input_file: The research data
num_labels: The number of classes
word2vec_model: The word2vec model file
Returns:
The Class _Data() (includes the data tokenindex and data labels)
Raises:
IOError: If the input file is not the .json file
"""
vocab = dict([(k, v.index) for (k, v) in word2vec_model.wv.vocab.items()])
def _token_to_index(content):
result = []
for item in content:
word2id = vocab.get(item)
if word2id is None:
word2id = 0
result.append(word2id)
return result
def _create_onehot_labels(labels_index):
label = [0] * num_labels
for item in labels_index:
label[int(item)] = 1
return label
if not input_file.endswith('.json'):
raise IOError("[Error] The research data is not a json file. "
"Please preprocess the research data into the json file.")
with open(input_file) as fin:
testid_list = []
content_index_list = []
labels_list = []
onehot_labels_list = []
labels_num_list = []
total_line = 0
for eachline in fin:
data = json.loads(eachline)
testid = data['testid']
features_content = data['features_content']
labels_index = data['labels_index']
labels_num = data['labels_num']
testid_list.append(testid)
content_index_list.append(_token_to_index(features_content))
labels_list.append(labels_index)
onehot_labels_list.append(_create_onehot_labels(labels_index))
labels_num_list.append(labels_num)
total_line += 1
class _Data:
def __init__(self):
pass
@property
def number(self):
return total_line
@property
def testid(self):
return testid_list
@property
def tokenindex(self):
return content_index_list
@property
def labels(self):
return labels_list
@property
def onehot_labels(self):
return onehot_labels_list
@property
def labels_num(self):
return labels_num_list
return _Data() | 34,147 |
def find_version_files(
root_dir: str, dont_search_dir_names: set = {"tests", "test"}
) -> list:
"""You can use this.
This function will recursively find the __init__.py(s) in a nontest directory.
:param str root_dir: Description of parameter `root_dir`.
:return: Description of returned object.
:rtype: List[Path]
"""
root_dir = Path(str(root_dir)).expanduser()
dont_search_dir_names = set(map(str, dont_search_dir_names))
try:
assert root_dir.exists() and root_dir.is_dir()
except AssertionError:
raise ValueError(
"Root directory is invalid: it either does not exist or is not a directory"
)
from os import scandir
def recursive_find(rd, dsdn):
version_files = []
rd = Path(str(rd)).expanduser()
with scandir(str(rd)) as scan_rd:
for entry in scan_rd:
if entry.is_dir() and not (
entry.name.startswith(".")
or NONE_ALPHABET.sub("", entry.name.lower()) in dsdn
):
version_files.extend(recursive_find(entry.path, dsdn))
elif entry.name == "__init__.py" and entry.is_file():
version_files.append(Path(entry.path))
return version_files
return recursive_find(root_dir, dont_search_dir_names) | 34,148 |
def test_out_of_permission_scope(tesla_model_s):
"""
status code: 403, no "error" code
"""
try:
tesla_model_s.odometer()
except Exception as e:
assert isinstance(e, SmartcarException)
# 8 fields stated in exception.py + 'message'
assert len(e.__dict__.keys()) == 9
assert e.status_code == 403
assert e.code is None
# message formatted correctly (i.e. without colon: <code>)
assert e.message[:13] == "PERMISSION - "
assert "type" in e.resolution.keys() and "url" in e.resolution.keys() | 34,149 |
def object_difference():
"""Compute the difference parts between selected shapes.
- Select two objects.
Original code from HighlightDifference.FCMacro
https://github.com/FreeCAD/FreeCAD-macros/blob/master/Utility/HighlightDifference.FCMacro
Authors = 2015 Gaël Ecorchard (Galou)
"""
global verbose
msg = verbose
m_actDoc = get_ActiveDocument(info=msg)
if m_actDoc is None:
return None
createFolders('WorkObjects')
error_msg =\
"INCORRECT Object(s) Selection:\n" +\
"You Must Select Two(2) Objects !"
result_msg = ": Difference object created into WorkFeatures/WorkObjects/"
name = "Part"
part = "Part::Feature"
grp = "WorkObjects"
try:
selectionObjects = Gui.Selection.getSelection()
if len(selectionObjects) < 2:
printError_msg(error_msg)
return
object_list = []
for obj in selectionObjects:
object_list.append(obj)
for i, object_a in enumerate(object_list):
shape_a = object_a.Shape
label_a = object_a.Label
for object_b in object_list[(i + 1):]:
shape_b = object_b.Shape
label_b = object_b.Label
shape_addition = shape_a.cut(shape_b)
if shape_addition.Volume < 1e-6:
print_gui_msg("No Cut of " +\
str(label_a.encode('utf-8')) +\
" by " +\
str(label_b.encode('utf-8')))
else:
print_msg("Volume of the red " +\
str(label_a.encode('utf-8')) +\
" Cut by " +\
str(label_b.encode('utf-8')) +\
": " +\
str(shape_addition.Volume) + "\n")
if not(App.ActiveDocument.getObject(grp)):
App.ActiveDocument.addObject("App::DocumentObjectGroup", grp)
added = FreeCAD.ActiveDocument.addObject(part)
added.Label = "Cut red (" +\
str(label_a.encode('utf-8')) +\
"-" +\
str(label_b.encode('utf-8')) +\
")"
added.Shape = shape_addition
App.ActiveDocument.getObject(grp).addObject(added)
added.ViewObject.ShapeColor = (1.0, 0.0, 0.0, 1.0)
shape_removal = shape_b.cut(shape_a)
if shape_removal.Volume < 1e-6:
print_gui_msg("No Cut of " +\
str(label_b.encode('utf-8')) +\
" by " +\
str(label_a.encode('utf-8')))
else:
print_msg("Volume of the green " +\
str(label_b.encode('utf-8')) +\
" Cut by " +\
str(label_a.encode('utf-8')) +\
": " +\
str(shape_removal.Volume) + "\n")
if not(App.ActiveDocument.getObject(grp)):
App.ActiveDocument.addObject("App::DocumentObjectGroup", grp)
removed = FreeCAD.ActiveDocument.addObject(part)
removed.Label = "Cut green (" +\
str(label_b.encode('utf-8')) +\
"-" +\
str(label_a.encode('utf-8')) +\
")"
removed.Shape = shape_removal
App.ActiveDocument.getObject(grp).addObject(removed)
removed.ViewObject.ShapeColor = (0.0, 0.5, 0.0, 1.0)
object_a.ViewObject.Transparency = 80
object_b.ViewObject.Transparency = 80
except:
printError_msg(error_msg) | 34,150 |
def diffusionkernel(sigma, N=4, returnt=False):
""" diffusionkernel(sigma, N=4, returnt=False)
A discrete analog to the continuous Gaussian kernel,
as proposed by Toni Lindeberg.
N is the tail length factor (relative to sigma).
"""
# Make sure sigma is float
sigma = float(sigma)
# Often refered to as the scale parameter, or t
sigma2 = sigma*sigma
# Where we start, from which we go backwards
# This is also the tail length
if N > 0:
nstart = int(np.ceil(N*sigma)) + 1
else:
nstart = abs(N) + 1
# Allocate kernel and times
t = np.arange(-nstart, nstart+1, dtype='float64')
k = np.zeros_like(t)
# Make a start
n = nstart # center (t[nstart]==0)
k[n+nstart] = 0
n = n-1
k[n+nstart] = 0.01
# Iterate!
for n in range(nstart-1,0,-1):
# Calculate previous
k[(n-1)+nstart] = 2*n/sigma2 * k[n+nstart] + k[(n+1)+nstart]
# The part at the left can be erroneous, so let's use the right part only
k[:nstart] = np.flipud(k[-nstart:])
# Remove the tail, which is zero
k = k[1:-1]
t = t[1:-1]
# Normalize
k = k / k.sum()
# the function T that we look for is T = e^(-sigma2) * I(n,sigma2)
# We found I(n,sigma2) and because we normalized it, the normalization term
# e^(-sigma2) is no longer necesary.
# Done
if returnt:
return k, t
else:
return k | 34,151 |
def CalculateTopologicalTorsionFingerprint(mol):
"""
#################################################################
Calculate Topological Torsion Fingerprints
Usage:
result=CalculateTopologicalTorsionFingerprint(mol)
Input: mol is a molecule object.
Output: result is a tuple form. The first is the number of
fingerprints. The second is a dict form whose keys are the
position which this molecule has some substructure. The third
is the DataStructs which is used for calculating the similarity.
#################################################################
"""
res = Torsions.GetTopologicalTorsionFingerprint(mol)
return res.GetLength(), res.GetNonzeroElements(), res | 34,152 |
def prepare_answer_extraction_samples(context: str, answer_list: List[Dict] = None):
"""
Args:
context: str (assumed to be normalized via normalize_text)
answer_list: [
{'text': str, 'answer_start': int},
{'text': str, 'answer_start': int},
...
]
"""
prepare_target = True if answer_list else False
# split into sentences
sentence_list = sentence_tokenize(context)
num_sentences = len(sentence_list)
if prepare_target:
answer_list_per_sentence = get_answer_list_per_sentence(sentence_list, answer_list)
# prepare sources (and targets)
samples = []
for sentence_ind in range(num_sentences):
source_text = "extract answers:"
if prepare_target:
answer_list = answer_list_per_sentence[sentence_ind]
answer_list = [answer["text"] for answer in answer_list]
if not answer_list:
continue
answer_list = list(dict.fromkeys(answer_list)) # remove duplicate answers without changing the order
target_text = " <sep> ".join(answer_list) + " <sep>"
else:
target_text = None
for sentence_ind2, sentence in enumerate(sentence_list):
if sentence_ind == sentence_ind2:
sentence = f"<hl> {sentence} <hl>"
source_text = f"{source_text} {sentence}"
source_text = source_text.strip()
sample = {"source_text": source_text, "target_text": target_text, "answer_list": answer_list}
if sample["target_text"] is None:
sample
samples.append(sample)
return samples | 34,153 |
def mix_in(
source: type,
target: type,
should_copy: Optional[Callable[[str, bool], bool]] = None,
) -> List[str]:
"""
Copy all defined functions from mixin into target. It could be
usefull when you cannot inherit from mixin because incompatible
metaclass. It does not copy abstract functions. If `source` is
`ABCMeta`, will register `target` with it.
Returns list of copied methods.
"""
mixed_in_methods = []
try:
abstract_methods = source.__abstractmethods__ # type:ignore
except AttributeError:
abstract_methods = set()
target_members = dir(target)
for n in dir(source):
fn = getattr(source, n)
if isfunction(fn) and n not in abstract_methods:
already_exists = n not in target_members
if should_copy is None or should_copy(n, already_exists):
setattr(target, n, fn)
mixed_in_methods.append(n)
if isinstance(source, abc.ABCMeta):
source.register(target)
return mixed_in_methods | 34,154 |
def get_img_num_per_cls(cifar_version, imb_factor=None):
"""
Get a list of image numbers for each class, given cifar version
Num of imgs follows emponential distribution
img max: 5000 / 500 * e^(-lambda * 0);
img min: 5000 / 500 * e^(-lambda * int(cifar_version - 1))
exp(-lambda * (int(cifar_version) - 1)) = img_max / img_min
args:
cifar_version: str, '10', '100', '20'
imb_factor: float, imbalance factor: img_min/img_max,
None if geting default cifar data number
output:
img_num_per_cls: a list of number of images per class
"""
cls_num = int(cifar_version)
img_max = img_num(cifar_version)
if imb_factor is None:
return [img_max] * cls_num
img_num_per_cls = []
for cls_idx in range(cls_num):
num = img_max * (imb_factor**(cls_idx / (cls_num - 1.0)))
img_num_per_cls.append(int(num))
return img_num_per_cls | 34,155 |
async def cmd_tournament_next(ctx: discord.ext.commands.Context):
"""
Get information about the starting time of the next month's tournament.
Usage:
/tournament next
/tourney next
Examples:
/tournament next - Displays information about the starting time of next month's tournament.
"""
async with ctx.typing():
utc_now = util.get_utcnow()
start_of_tourney = tourney.get_next_tourney_start()
embed_colour = util.get_bot_member_colour(bot, ctx.guild)
embed = tourney.embed_tourney_start(start_of_tourney, utc_now, embed_colour)
await ctx.send(embed=embed) | 34,156 |
def report_model_activations(model, labels=None, output_folder=None, model_name="model",
categories="none",
conv_layers="all", conv_layer_filters="all"):
"""
Plots the convolution filters and predictions activation images for the given model.
:param model: model to take in account
:param labels: categories labels
:param output_folder:
:param model_name: name for the image output file.
:param categories:
:param conv_layers:
:param conv_layer_filters:
:return:
"""
if categories != "none":
file_path = os.path.join(output_folder, "activations.png")
plot_activation_dense(model, output_file=file_path, labels=labels, categories=categories)
if not type(conv_layers) == list:
conv_layers = get_conv_layers_idx(model)
for conv in conv_layers:
file_path = os.path.join(output_folder, "filters_conv_{}.png".format(conv))
plot_activation_filters(model, layer_idx=conv, filters=conv_layer_filters, output_file=file_path) | 34,157 |
def xml():
"""
Returns the lti.xml file for the app.
"""
try:
return Response(render_template(
'lti.xml'), mimetype='application/xml'
)
except:
app.logger.error("Error with XML.")
return return_error('''Error with XML. Please refresh and try again. If this error persists,
please contact support.''') | 34,158 |
async def test_siren_change_default_tone(hass: HomeAssistant):
"""Test changing the default tone on message."""
entry = configure_integration(hass)
test_gateway = HomeControlMockSiren()
test_gateway.devices["Test"].status = 0
with patch(
"homeassistant.components.devolo_home_control.HomeControl",
side_effect=[test_gateway, HomeControlMock()],
):
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
state = hass.states.get(f"{DOMAIN}.test")
assert state is not None
with patch(
"devolo_home_control_api.properties.multi_level_switch_property.MultiLevelSwitchProperty.set"
) as set:
test_gateway.publisher.dispatch("Test", ("mss:Test", 2))
await hass.services.async_call(
"siren",
"turn_on",
{"entity_id": f"{DOMAIN}.test"},
blocking=True,
)
set.assert_called_once_with(2) | 34,159 |
def log_get_level(client):
"""Get log level
Returns:
Current log level
"""
return client.call('log_get_level') | 34,160 |
def cal_pivot(n_losses,network_block_num):
"""
Calculate the inserted layer for additional loss
"""
num_segments = n_losses + 1
num_block_per_segment = (network_block_num // num_segments) + 1
pivot_set = []
for i in range(num_segments - 1):
pivot_set.append(min(num_block_per_segment * (i + 1), network_block_num - 1))
return pivot_set | 34,161 |
async def scan_host(
host: IPv4Address,
semaphore: asyncio.Semaphore,
timeout: int,
verbose: bool,
):
"""
Locks the "semaphore" and tries to ping "host" with timeout "timeout" s.
Prints out the result of the ping to the standard output.
"""
async with semaphore:
try:
delay = await aioping.ping(
str(host), timeout, family=AddressFamily.AF_INET
)
print(f"{host} responded after {delay:.4f} ms")
return True
except TimeoutError:
if verbose:
print(f"{host} has not responded")
return False
except OSError as error:
if verbose:
print(
f"Ping to host {host} failed for the following reason: {error}"
)
return False | 34,162 |
def _emit_post_update_statements(
base_mapper, uowtransaction, cached_connections, mapper, table, update
):
"""Emit UPDATE statements corresponding to value lists collected
by _collect_post_update_commands()."""
needs_version_id = (
mapper.version_id_col is not None
and mapper.version_id_col in mapper._cols_by_table[table]
)
def update_stmt():
clauses = BooleanClauseList._construct_raw(operators.and_)
for col in mapper._pks_by_table[table]:
clauses.clauses.append(
col == sql.bindparam(col._label, type_=col.type)
)
if needs_version_id:
clauses.clauses.append(
mapper.version_id_col
== sql.bindparam(
mapper.version_id_col._label,
type_=mapper.version_id_col.type,
)
)
stmt = table.update().where(clauses)
if mapper.version_id_col is not None:
stmt = stmt.return_defaults(mapper.version_id_col)
return stmt
statement = base_mapper._memo(("post_update", table), update_stmt)
# execute each UPDATE in the order according to the original
# list of states to guarantee row access order, but
# also group them into common (connection, cols) sets
# to support executemany().
for key, records in groupby(
update,
lambda rec: (rec[3], set(rec[4])), # connection # parameter keys
):
rows = 0
records = list(records)
connection = key[0]
assert_singlerow = (
connection.dialect.supports_sane_rowcount
if mapper.version_id_col is None
else connection.dialect.supports_sane_rowcount_returning
)
assert_multirow = (
assert_singlerow
and connection.dialect.supports_sane_multi_rowcount
)
allow_multirow = not needs_version_id or assert_multirow
if not allow_multirow:
check_rowcount = assert_singlerow
for state, state_dict, mapper_rec, connection, params in records:
c = cached_connections[connection].execute(statement, params)
_postfetch_post_update(
mapper_rec,
uowtransaction,
table,
state,
state_dict,
c,
c.context.compiled_parameters[0],
)
rows += c.rowcount
else:
multiparams = [
params
for state, state_dict, mapper_rec, conn, params in records
]
check_rowcount = assert_multirow or (
assert_singlerow and len(multiparams) == 1
)
c = cached_connections[connection].execute(statement, multiparams)
rows += c.rowcount
for state, state_dict, mapper_rec, connection, params in records:
_postfetch_post_update(
mapper_rec,
uowtransaction,
table,
state,
state_dict,
c,
c.context.compiled_parameters[0],
)
if check_rowcount:
if rows != len(records):
raise orm_exc.StaleDataError(
"UPDATE statement on table '%s' expected to "
"update %d row(s); %d were matched."
% (table.description, len(records), rows)
)
elif needs_version_id:
util.warn(
"Dialect %s does not support updated rowcount "
"- versioning cannot be verified."
% c.dialect.dialect_description
) | 34,163 |
def build_grid_search_config(params_dict):
"""
传入一个json,按网格搜索的方式构造出符合条件的N个json, 目前网格搜索只作用在optimization范围内
:param params_dict:
:return: param_config_list
"""
model_params_dict = params_dict.get("model")
opt_params = model_params_dict.get("optimization", None)
if not opt_params:
raise ValueError("optimization's params can't be none")
# 获取待网格搜索的dict
train_data_params = params_dict.get("dataset_reader").get("train_reader").get("config", None)
if not train_data_params:
raise ValueError("train_reader config's params can't be none")
# 在need_operate_params中加入待处理网格搜索的dict
need_operate_params = [opt_params, train_data_params]
all_combination_list = []
all_single_param_dict = []
dict_list_key_num = []
for one_operate_param in need_operate_params:
combination_list, single_param_dict = get_list_params(one_operate_param)
all_combination_list.extend(combination_list)
all_single_param_dict.append(single_param_dict)
dict_list_key_num.append(len(combination_list))
task_param_list = []
for params in product(*all_combination_list):
one_task_param = copy.deepcopy(params_dict)
# 在need_update_param中加入待更新的网格搜索的dict,注意顺序要和need_operate_params保持一致
need_update_param = [one_task_param["model"]["optimization"],
one_task_param["dataset_reader"]["train_reader"]["config"]]
i = 0
for index, one_single_param in enumerate(all_single_param_dict):
single_param = copy.deepcopy(one_single_param)
for one_grid in params[i:i + dict_list_key_num[index]]:
single_param.update(one_grid)
need_update_param[index].update(single_param)
i += dict_list_key_num[index]
task_param_list.append(one_task_param)
return task_param_list | 34,164 |
def test_ses_get_subarray_id_for_requested_pid():
"""
Verify that the private method _get_subarray_id returns
subarray id correctly
"""
subarray_id = 123
process_pid = 456
procedure = Procedure("test://a")
init_args = ProcedureInput(subarray_id=subarray_id)
procedure.script_args["init"] = init_args
procedures = {process_pid: procedure}
process_summary = ProcedureSummary(
id=process_pid,
script_uri=procedure.script_uri,
script_args=procedure.script_args,
history=procedure.history,
state=procedure.state,
)
expected = [process_summary]
with mock.patch(
"oet.procedure.application.application.domain.ProcessManager"
) as mock_pm:
# get the mock ProcessManager instance
instance = mock_pm.return_value
# the manager's procedures attribute holds created procedures and is
# used for retrieval
instance.procedures = procedures
service = ScriptExecutionService()
returned = service._get_subarray_id(
process_pid
) # pylint: disable=protected-access
assert returned == expected[0].script_args["init"].kwargs["subarray_id"] | 34,165 |
def cutoff_countmin_wscore(y, scores, score_cutoff, n_cm_buckets, n_hashes):
""" Learned Count-Min (use predicted scores to identify heavy hitters)
Args:
y: true counts of each item (sorted, largest first), float - [num_items]
scores: predicted scores of each item - [num_items]
score_cutoff: threshold for heavy hitters
n_cm_buckets: number of buckets of Count-Min
n_hashes: number of hash functions
Returns:
loss_avg: estimation error
space: space usage in bytes
"""
if len(y) == 0:
return 0 # avoid division of 0
y_ccm = y[scores > score_cutoff]
y_cm = y[scores <= score_cutoff]
loss_cf = 0 # put y_ccm into cutoff buckets, no loss
loss_cm = count_min(y_cm, n_cm_buckets, n_hashes)
assert len(y_ccm) + len(y_cm) == len(y)
loss_avg = (loss_cf * np.sum(y_ccm) + loss_cm * np.sum(y_cm)) / np.sum(y)
print('\tloss_cf %.2f\tloss_rd %.2f\tloss_avg %.2f' % (loss_cf, loss_cm, loss_avg))
space = len(y_ccm) * 4 * 2 + n_cm_buckets * n_hashes * 4
return loss_avg, space | 34,166 |
def test_lcc_like_epi():
"""
Takes about 5 mins with epicyclic
If burnin is too short (say 200 steps) won't actually find true solution
"""
TORB_FUNC = trace_epicyclic_orbit
mean_now = np.array([50., -100., 25., 1.1, -7.76, 2.25])
age = 10.
mean = TORB_FUNC(mean_now, times=-age)
dx = 5.
dv = 2.
covmatrix = np.identity(6)
covmatrix[:3,:3] *= dx**2
covmatrix[3:,3:] *= dv**2
true_comp = SphereComponent(
attributes={'mean':mean, 'covmatrix':covmatrix, 'age':age,},
trace_orbit_func=TORB_FUNC,
)
nstars = 1000
tiny_measurement_error = 1e-10
# import ipdb; ipdb.set_trace()
best_comp, chain, lnprob, data_dict = run_fit_helper(
true_comp=true_comp, starcounts=nstars,
measurement_error=tiny_measurement_error,
trace_orbit_func=TORB_FUNC,
run_name='lcc_like',
)
assert np.allclose(true_comp.get_mean(), best_comp.get_mean(),
atol=3.0)
assert np.allclose(true_comp.get_age(), best_comp.get_age(),
atol=1.0)
assert np.allclose(true_comp.get_covmatrix(),
best_comp.get_covmatrix(),
atol=5.0)
comp_filename = 'temp_data/{}_compfitter_lcc_like_true_and_best_comp.npy'.format(
PY_VERS
)
SphereComponent.store_raw_components(comp_filename, [true_comp, best_comp])
return true_comp, best_comp, lnprob | 34,167 |
def uniquePandasIndexMapping(inputColumn):
"""quickly mapps the unique name entries back to input entries
Keyword arguments:
inputDataToAssess -- a SINGLE column from a pandas dataframe, presumably with
duplications. Will create a frequency table and a mapping back to the source entries.
"""
import numpy as np
import pandas as pd
inputColumn.sort_values(by=['company'], inplace=True)
sortedInputColumn=inputColumn.reset_index()
sortedInputColumn.rename(columns={"index":"userIndex"},inplace=True)
tableUniqueFullNameCounts=inputColumn.iloc[:,0].value_counts()
tableUniqueFullNameCounts=tableUniqueFullNameCounts.reset_index()
tableUniqueFullNameCounts.rename(columns={"company":"count","index":"company"},inplace=True)
tableUniqueFullNameCounts.sort_values(by=['company'], inplace=True)
sortedTableUniqueFullNameCounts=tableUniqueFullNameCounts.reset_index()
sortedTableUniqueFullNameCounts['inputIndexMapping']=''
currentSum=0
for index, row in sortedTableUniqueFullNameCounts.iterrows():
currentRange=np.arange(currentSum,currentSum+sortedTableUniqueFullNameCounts['count'].iloc[index])
sortedTableUniqueFullNameCounts['inputIndexMapping'].iloc[index]=sortedInputColumn['userIndex'].iloc[currentRange].array
currentSum=currentSum+sortedTableUniqueFullNameCounts['count'].iloc[index]
return sortedInputColumn, sortedTableUniqueFullNameCounts; | 34,168 |
def non_max_suppression(boxlist, thresh, max_output_size, scope=None):
"""Non maximum suppression.
This op greedily selects a subset of detection bounding boxes, pruning
away boxes that have high IOU (intersection over union) overlap (> thresh)
with already selected boxes. Note that this only works for a single class ---
to apply NMS to multi-class predictions, use MultiClassNonMaxSuppression.
Args:
boxlist: BoxList holding N boxes. Must contain a 'scores' field
representing detection scores.
thresh: scalar threshold
max_output_size: maximum number of retained boxes
scope: name scope.
Returns:
a BoxList holding M boxes where M <= max_output_size
Raises:
ValueError: if thresh is not in [0, 1]
"""
with tf.name_scope(scope, 'NonMaxSuppression'):
if not 0 <= thresh <= 1.0:
raise ValueError('thresh must be between 0 and 1')
if not isinstance(boxlist, box_list.BoxList):
raise ValueError('boxlist must be a BoxList')
if not boxlist.has_field('scores'):
raise ValueError('input boxlist must have \'scores\' field')
selected_indices = tf.image.non_max_suppression(
boxlist.get(), boxlist.get_field('scores'),
max_output_size, iou_threshold=thresh)
return gather(boxlist, selected_indices) | 34,169 |
def client_role_setting(realm, client_id):
"""クライアントロール設定 client role setting
Args:
realm (str): realm
client_id (str): client id
Returns:
[type]: [description]
"""
try:
globals.logger.debug('#' * 50)
globals.logger.debug('CALL {}: realm[{}] client_id[{}]'.format(inspect.currentframe().f_code.co_name, realm, client_id))
globals.logger.debug('#' * 50)
token_user = os.environ["EXASTRO_KEYCLOAK_USER"]
token_password = os.environ["EXASTRO_KEYCLOAK_PASSWORD"]
token_realm_name = os.environ["EXASTRO_KEYCLOAK_MASTER_REALM"]
# 引数を展開 Expand arguments
payload = request.json.copy()
globals.logger.debug(payload)
# tokenの取得 get toekn
token = api_keycloak_call.get_user_token(token_user, token_password, token_realm_name)
# ロールのみを最初に登録する Register only the role first
for role in payload["roles"]:
role_name = role["name"]
# 追加するロールを設定 Set the role to add
add_role = {
"name": role_name,
"attributes": role["attributes"],
}
# 1 role client role set
api_keycloak_call.keycloak_client_role_create(realm, client_id, add_role, token)
# tokenの取得 get toekn
token = api_keycloak_call.get_user_token(token_user, token_password, token_realm_name)
# 続いてロールの配下がある場合は、その要素を追加する Next, if there is a subordinate of the role, add that element
# ロール数分繰り返し処理する Repeat for the number of rolls
for role in payload["roles"]:
role_name = role["name"]
# 要素があれば子供のロール情報を取得して設定する If there is an element, get and set the child's role information
if len(role["composite_roles"]) > 0:
composite_roles = []
for composite_role in role["composite_roles"]:
role_info = api_keycloak_call.keycloak_client_role_get(realm, client_id, composite_role, token)
composite_roles.append(json.loads(role_info))
# user client role set
api_keycloak_call.keycloak_client_role_composite_create(realm, client_id, role_name, composite_roles, token)
ret = {
"result": "200",
}
return jsonify(ret), 200
except Exception as e:
return common.serverError(e) | 34,170 |
def gen_weekly_ccy_df( start,end ):
""" Generate weekly ccy data table
"""
currency_li =[ "USD_Index",
"EURUSD","GBPUSD","AUDUSD","CADUSD",
"JPYUSD",
"CNYUSD","HKDUSD","TWDUSD",
"KRWUSD","THBUSD","SGDUSD","MYRUSD",
"BRLUSD","INRUSD",
"CNY_raw","JPY_raw"
]
currency_df = get_histroical_ccy(start,end)
temp = currency_df[["JPYUSD","CNYUSD"]]
currency_df["EURUSD"] = 1/currency_df["USDEUR"]
currency_df["GBPUSD"] = 1/currency_df["USDGBP"]
currency_df["AUDUSD"] = 1/currency_df["USDAUD"]
currency_df = currency_df/currency_df.iloc[0]
currency_df["CNY_raw"] = temp["CNYUSD"]
currency_df["JPY_raw"] = temp["JPYUSD"]
return currency_df[currency_li],currency_li | 34,171 |
def compute_stats_array(
cfg,
dem,
ref,
dem_nodata=None,
ref_nodata=None,
display=False,
final_json_file=None,
):
"""
Compute Stats from numpy arrays
:param cfg: configuration dictionary
:param dem: numpy array, dem raster
:param ref: numpy array, reference dem raster to be coregistered to dem
:param dem_nodata: int/float, nodata value in dem
:param ref_nodata: int/float, nodata value in ref
:param display: boolean, choose between plot show and plot save
:param final_json_file: filename of final_cfg
:return:
"""
if "stats_opts" not in cfg:
cfg["stats_opts"] = {}
if "to_be_classification_layers" not in cfg["stats_opts"]:
cfg["stats_opts"]["to_be_classification_layers"] = {}
if "classification_layers" not in cfg["stats_opts"]:
cfg["stats_opts"]["classification_layers"] = {}
if "stats_results" not in cfg:
cfg["stats_results"] = {}
# default config
cfg["plani_results"] = {}
cfg["plani_results"]["dx"] = 1
cfg["plani_results"]["dy"] = 1
cfg["alti_results"] = {}
cfg["alti_results"]["rectifiedRef"] = {}
cfg["alti_results"]["rectifiedRef"]["nb_valid_points"] = 10
cfg["alti_results"]["rectifiedRef"]["nb_points"] = 10
cfg["alti_results"]["rectifiedDEM"] = {}
cfg["alti_results"]["rectifiedDEM"]["nb_valid_points"] = 10
cfg["alti_results"]["rectifiedDEM"]["nb_points"] = 10
cfg["stats_opts"]["alti_error_threshold"] = {}
cfg["stats_opts"]["alti_error_threshold"]["value"] = 0
cfg["stats_opts"]["plot_real_hists"] = False
cfg["stats_opts"]["remove_outliers"] = False
dem_a3d = read_img_from_array(dem, no_data=dem_nodata)
ref_a3d = read_img_from_array(ref, no_data=ref_nodata)
final_dh = dem_a3d["im"].data - ref_a3d["im"].data
final_dh_a3d = read_img_from_array(final_dh)
if final_json_file is None:
final_json_file = cfg["outputDir"] + "/final_stats.json"
alti_diff_stats(
cfg,
dem_a3d,
ref_a3d,
final_dh_a3d,
display=display,
remove_outliers=cfg["stats_opts"]["remove_outliers"],
geo_ref=False,
)
# save results
with open(final_json_file, "w") as outfile:
json.dump(cfg, outfile, indent=2) | 34,172 |
def remove_group_common(group_id, username, org_id=None):
"""Common function to remove a group, and it's repos,
If ``org_id`` is provided, also remove org group.
Arguments:
- `group_id`:
"""
seaserv.ccnet_threaded_rpc.remove_group(group_id, username)
seaserv.seafserv_threaded_rpc.remove_repo_group(group_id)
if org_id and org_id > 0:
seaserv.ccnet_threaded_rpc.remove_org_group(org_id, group_id)
# remove record of share to group when group deleted
ExtraGroupsSharePermission.objects.filter(group_id=group_id).delete() | 34,173 |
def demean_and_normalise(points_a: np.ndarray,
points_b: np.ndarray):
"""
Independently centre each point cloud around 0,0,0, then normalise
both to [-1,1].
:param points_a: 1st point cloud
:type points_a: np.ndarray
:param points_b: 2nd point cloud
:type points_b: np.ndarray
:return: normalised points clouds, scale factor & translations
"""
translate_a = np.mean(points_a, axis=0)
translate_b = np.mean(points_b, axis=0)
a_demean = points_a - translate_a
b_demean = points_b - translate_b
norm_factor = np.max([np.max(np.abs(a_demean)),
np.max(np.abs(b_demean))])
a_normalised = a_demean / norm_factor
b_normalised = b_demean / norm_factor
scale_matrix = create_scaling_matrix(norm_factor)
translate_a_matrix = create_translation_matrix(translate_a)
translate_b_matrix = create_translation_matrix(translate_b)
return a_normalised, b_normalised, scale_matrix, \
translate_a_matrix, translate_b_matrix | 34,174 |
def parallel_login_of_multiple_users(self, ldap_server, ldap_user, timeout=200, role_count=10):
"""Check that valid and invalid logins of multiple LDAP authenticated users
with mapped roles works in parallel.
"""
parallel_login(user_count=10, ldap_user=ldap_user,ldap_server=ldap_server,
timeout=timeout, role_count=role_count) | 34,175 |
def save_json(data, filepath: PathLike):
"""Saves a dictionary as a json file"""
with open(filepath, "w") as outfile:
json.dump(data, outfile, indent=3) | 34,176 |
def test_get_common_option_defaults_from_configuration():
"""Defaults should come from configuration file.
"""
defaults = orchestrate.utils.get_common_option_defaults()
expected = dict(
project='gcloud_project',
zone='gcloud_zone',
api_project='config_project',
api_host='config_host',
api_key='config_key',
verbose=False,
)
assert defaults == expected | 34,177 |
def ParseCustomLevel(api_version):
"""Wrapper around ParseCustomLevel to accept api version."""
def VersionedParseCustomLevel(path):
"""Parse a YAML representation of custom level conditions.
Args:
path: str, path to file containing custom level expression
Returns:
string of CEL expression.
Raises:
ParseError: if the file could not be read into the proper object
"""
data = yaml.load_path(path)
if not data:
raise ParseError(path, 'File is empty')
messages = util.GetMessages(version=api_version)
message_class = messages.Expr
try:
expr = encoding.DictToMessage(data, message_class)
except Exception as err:
raise InvalidFormatError(path, six.text_type(err), message_class)
_ValidateAllCustomFieldsRecognized(path, expr)
return expr
return VersionedParseCustomLevel | 34,178 |
def specMergeMSA(*msa, **kwargs):
"""Returns an :class:`.MSA` obtained from merging parts of the sequences
of proteins present in multiple *msa* instances. Sequences are matched
based on species section of protein identifiers found in the sequence labels.
Order of sequences in the merged MSA will follow the order of sequences in the
first *msa* instance. Note that protein identifiers that map to multiple
sequences will be excluded."""
if len(msa) <= 1:
raise ValueError('more than one msa instances are needed')
lbl={}
try:
arrs = [m._getArray() for m in msa]
sets = []
labells = []
for m in msa:
aset = set([])
labell = {}
count = m.countLabel
for label in m.iterLabels():
lbl[label]=label.rsplit('_')[1]
if count(label) == 1 and lbl[label] not in aset:
aset.add(lbl[label])
labell[lbl[label]]=label
sets.append(aset)
labells.append(labell)
except AttributeError:
raise TypeError('all msa arguments must be MSA instances')
sets = iter(sets)
common = next(sets)
for aset in sets:
common = common.intersection(aset)
if not common:
return None
lens = [m.numResidues() for m in msa]
rngs = [0]
rngs.extend(cumsum(lens))
rngs = [(start, end) for start, end in zip(rngs[:-1], rngs[1:])]
idx_arr_rng = list(zip([m.getIndex for m in msa], arrs, rngs))
merger = zeros((len(common), sum(lens)), '|S1')
index = 0
labels = []
mapping = {}
for lbl in common:
merger[index, 0:start]=list(str(msa[0][msa[0].getIndex(labells[0][lbl])]))
merger[index, start:end]=list(str(msa[1][msa[1].getIndex(labells[1][lbl])]))
label = labells[0][lbl]
labels.append(label)
mapping[label] = index
index += 1
merger = MSA(merger, labels=labels, mapping=mapping,
title=' + '.join([m.getTitle() for m in msa]))
return merger | 34,179 |
def parse(filePath):
"""
Returns a full parsed Maya ASCII file.
:type filePath: str
:rtype: mason.asciiscene.AsciiScene
"""
return asciifileparser.AsciiFileParser(filePath).scene | 34,180 |
def test_records_match_shapes():
"""
Assert that the number of records matches
the number of shapes in the shapefile.
"""
with shapefile.Reader("shapefiles/blockgroups") as sf:
records = sf.records()
shapes = sf.shapes()
assert len(records) == len(shapes) | 34,181 |
def getSpectrumFromMlinptFolder(inpFolder, fwhm, hv, angle, polarised=None, multEnergiesByMinusOne=True, database=None):
""" Description of function
Args:
inpFolder: (str) Path to folder containing *MLinpt.txt files
fwhm: (float) Full-Width at half maximum for the broadening function
hv: (float) Photon energy to calculate spectrum at (None means density-of-states)
angle: (float) Emission angle to calculate spectrum at (None means ignore angular effects)
polarised: (str, Optional) If None (default) then unpolarised light is assumed, If "linear" then simulate for linearly polarised light in direction of beam
multEnergiesByMinusOne: (Bool) Whether to multiply parsed energies by -1, to convert from eigenvalues(more -ve means more stable) to binding energies (more positive is more stable). Default is True
database: (Optional, CrossSectionDatabaseBase object), default = YehLindauXSectionDatabase
Returns
outSpectrum: (GenSpectraOutput object) - contains total spectrum (totalSpectraContributions) and all contributions (spectraContributions)
specCreator: (SpectrumCreatorStandard object) - contains all options used to create spectrum
"""
mlInptPaths = [x for x in os.listdir(inpFolder) if x.endswith('MLinpt.txt')]
assert len(mlInptPaths) > 0, "Need at least 1 input file, but none found in folder {}".format(inpFolder)
if database is None:
database = yhDb.YehLindauXSectionDatabase()
return getSpectrumFromMlinptFileList( mlInptPaths, fwhm, hv, angle, polarised, database, multEnergiesByMinusOne=multEnergiesByMinusOne ) | 34,182 |
def extract_events_from_stream(stream_df, event_type):
""" Extracts specific event from stream.
"""
events = stream_df.loc[stream_df.EventType == event_type][['EventTime', 'Event']]
events_json = events['Event'].to_json(orient="records")
json_struct = json.loads(events_json)
# TODO : get rid of structs containing all `int` types
event_extracted = json_normalize(json_struct)
event_extracted = pd.merge(events['EventTime'].reset_index(), event_extracted, left_index=True, right_index=True)
if not event_extracted.empty:
event_extracted = event_extracted[['EventTime', 'order_id', 'limit_price', 'quantity', 'is_buy_order']]
event_extracted.rename(columns={'EventTime': 'TIMESTAMP',
'order_id': 'ORDER_ID',
'limit_price': 'PRICE',
'quantity': 'SIZE',
'is_buy_order': 'BUY_SELL_FLAG'}, inplace=True)
else:
event_extracted = pd.DataFrame({
'TIMESTAMP': [],
'ORDER_ID': [],
'PRICE': [],
'SIZE': [],
'BUY_SELL_FLAG': []
})
return event_extracted | 34,183 |
def d_psi(t):
"""Compute the derivative of the variable transform from Ogata 2005."""
t = np.array(t, dtype=float)
a = np.ones_like(t)
mask = t < 6
t = t[mask]
a[mask] = (np.pi * t * np.cosh(t) + np.sinh(np.pi * np.sinh(t))) / (
1.0 + np.cosh(np.pi * np.sinh(t))
)
return a | 34,184 |
def build_environ(request: HTTPRequest, errors: Errors) -> Dict[str, Any]:
"""
参考 https://www.python.org/dev/peps/pep-3333/ 构建 environ
"""
headers = {
f"HTTP_{k.upper().replace('-','_')}": v for k, v in request.header.items()
}
environ = {
# 保持与阿里云函数计算 HTTP 触发器的一致
"fc.context": request.context,
"fc.request_uri": request.path,
# WSGI 标准值
"wsgi.version": (1, 0),
"wsgi.url_scheme": "http",
"wsgi.input": io.BytesIO(request.body),
"wsgi.errors": errors,
"wsgi.multithread": False,
"wsgi.multiprocess": False,
"wsgi.run_once": True,
"SERVER_NAME": "127.0.0.1",
"SERVER_PORT": "80",
"SERVER_PROTOCOL": "HTTP/1.0",
"REQUEST_METHOD": request.method,
"SCRIPT_NAME": "",
"PATH_INFO": request.path,
"QUERY_STRING": "?" + "&".join([f"{k}={v}" for k, v in request.query.items()]),
"CONTENT_TYPE": headers.pop("HTTP_CONTENT_TYPE", ""),
"CONTENT_LENGTH": headers.pop("HTTP_CONTENT_LENGTH", ""),
}
environ.update(headers)
return environ | 34,185 |
def test_peekleft_after_two_appendleft(deque_fixture):
"""Test peekleft after appending to the left of deque."""
deque_fixture.appendleft(7)
deque_fixture.appendleft(8)
assert deque_fixture.peekleft() == 8 | 34,186 |
def loadPage(url, filename):
"""
作用:根据url发送请求,获取服务器响应文件
url: 需要爬取的url地址
filename : 处理的文件名
"""
print "正在下载 " + filename
headers = {
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_0) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11"}
request = urllib2.Request(url, headers=headers)
return urllib2.urlopen(request).read() | 34,187 |
def get_exporter_class():
"""Returns exporter class based on preferences and support."""
if _use_xlsx() is True:
return XLSXExporter
else:
return CSVExporter | 34,188 |
def get_twitter_token():
"""This is used by the API to look for the auth token and secret
it should use for API calls. During the authorization handshake
a temporary set of token and secret is used, but afterwards this
function has to return the token and secret. If you don't want
to store this in the database, consider putting it into the
session instead.
"""
return session.get('twitter_token') | 34,189 |
def is_text_serializer(serializer):
"""Checks whether a serializer generates text or binary."""
return isinstance(serializer.dumps({}), str) | 34,190 |
def plot_power(ngroups, mesh_shape, directory, mode="show"):
"""Plot the integrated fission rates from OpenMC and OpenMOC, as well as
the relative and absolute error of OpenMOC relative to OpenMC.
Parameters:
-----------
ngroups: int; number of energy groups
mesh_shape: str; name of the mesh shape
directory: str; path to the data
"""
mode = _check_mode(mode)
directory, shape = _check_params(directory, mesh_shape)
montecarlo_power = np.zeros(shape)
moc_power = np.zeros(shape)
# Integrate over all energy groups
for g in range(ngroups):
rates_name = "fission_{:02d}-of-{}_{}".format(g+1, ngroups, mesh_shape)
fname = directory + "montecarlo_" + rates_name
montecarlo_group_rates = np.loadtxt(fname)
montecarlo_power += montecarlo_group_rates
fname = directory + "moc_" + rates_name
moc_group_rates = np.loadtxt(fname)
moc_power += moc_group_rates
# Filter out results that are essentially zero
mc_mean = np.nanmean(montecarlo_power)*0.1
indices = (montecarlo_power < mc_mean) + (moc_power < mc_mean)
montecarlo_power[indices] = np.nan
moc_power[indices] = np.nan
# Normalize
montecarlo_power /= np.nanmean(montecarlo_power)
moc_power /= np.nanmean(moc_power)
# Find the errors in the normalized distributions
errors = np.divide(moc_power - montecarlo_power, montecarlo_power/100)
pcmerr = (moc_power - montecarlo_power)*100
if mode == "return":
return montecarlo_power, moc_power, errors, pcmerr
# Plot OpenMC's fission rates in the upper left subplot
plt.subplot(231)
plt.imshow(montecarlo_power.squeeze(), interpolation='none', cmap='jet')
plt.title('OpenMC Power Distribution\n{} groups'.format(ngroups))
cmin = min(np.nanmin(montecarlo_power), np.nanmin(moc_power))
cmax = max(np.nanmax(montecarlo_power), np.nanmax(moc_power))
plt.clim(cmin, cmax)
plt.colorbar()
# Plot OpenMOC's fission rates in the upper right subplot
plt.subplot(232)
plt.imshow(moc_power.squeeze(), interpolation='none', cmap='jet')
plt.title('OpenMOC Power Distribution\n{} groups'.format(ngroups))
plt.clim(cmin, cmax)
plt.colorbar()
# Plot the relative error in the lower left subplot
plt.subplot(233)
pct = plt.imshow(errors.squeeze(), interpolation='none', cmap='jet')
posmax = np.nanmax(errors)
negmax = np.nanmin(errors)
cmax = np.ceil(max(abs(posmax), abs(negmax)))
plt.clim(-cmax, +cmax)
plt.title('Relative error (%)')
plt.colorbar(pct)
# Plot the absolute error in the lower right subplot
plt.subplot(234)
pct = plt.imshow(pcmerr.squeeze(), interpolation='none', cmap='jet')
posmax = np.nanmax(pcmerr)
negmax = np.nanmin(pcmerr)
cmax = np.ceil(max(abs(posmax), abs(negmax)))
plt.clim(-cmax, +cmax)
plt.title('Absolute error (%)')
plt.colorbar(pct)
if mode == "show":
plt.show()
elif mode == "save":
# Save and/or show the plot
plt.tight_layout()
fname = directory + "power_{}-groups.pdf".format(ngroups)
plt.savefig(fname)
print("Figure saved to:", fname)
return montecarlo_power, moc_power, errors, pcmerr | 34,191 |
def glDeleteFramebuffersEXT( baseOperation, n, framebuffers=None ):
"""glDeleteFramebuffersEXT( framebuffers ) -> None
"""
if framebuffers is None:
framebuffers = arrays.GLuintArray.asArray( n )
n = arrays.GLuintArray.arraySize( framebuffers )
return baseOperation( n, framebuffers ) | 34,192 |
def projection_matching(input_model, projs, nside, dir_suffix=None, **kwargs):
"""
Parameters:
-------
input_model: path of input mrc file
projections: numpy array
"""
try:
WD = kwargs['WD']
except KeyError as error:
raise KeyError('lack working directory')
EAs_grid = gen_ref_EAs_grid(nside=nside, psi_step=360)
if dir_suffix:
dstpath = os.path.join(WD, dir_suffix, 'model_projections')
else:
dstpath = os.path.join(WD, 'model_projections')
N = projs.shape[0]
num_projs = projs.shape[2]
num_model_imgs = EAs_grid.shape[0]
num_threads = mp.cpu_count()
tic = time.time()
global model_proj_imgs
model_proj_imgs = gen_mrcs_from_EAs(EAs_grid, input_model, dstpath)
print('Time to recover projections from mrcs file: {0:.4f} s'.format(
time.time() - tic))
print('Projection matching: multiprocess start')
with mp.Pool(processes=num_threads) as pool:
idx = pool.map(
find_similar, (projs[:, :, i] for i in range(num_projs)))
print('\nFinish orientation!')
idx_arr = np.asarray(idx)
orientations = EAs_grid[idx_arr[:, 0]]
orientations[:, 2] = idx_arr[:, 1]
return orientations | 34,193 |
def backtostr(dayback=1, format="%Y/%m/%d", thedate=date.today()):
"""Print backto datetime in string format."""
return(backto(dayback=dayback, thedate=thedate).strftime(format)) | 34,194 |
def main():
"""Framework for cross test generation and execution.
Builds and executes cross tests from the space of all possible attribute
combinations. The space can be restricted by providing subsets of attributes
to specifically include or exclude.
"""
# pypath is where to find other Subzero python scripts.
pypath = os.path.abspath(os.path.dirname(sys.argv[0]))
root = FindBaseNaCl()
# The rest of the attribute sets.
targets = [ 'x8632', 'x8664', 'arm32', 'mips32' ]
sandboxing = [ 'native', 'sandbox', 'nonsfi' ]
opt_levels = [ 'Om1', 'O2' ]
arch_attrs = { 'x8632': [ 'sse2', 'sse4.1' ],
'x8664': [ 'sse2', 'sse4.1' ],
'arm32': [ 'neon', 'hwdiv-arm' ],
'mips32': [ 'base' ]
}
flat_attrs = []
for v in arch_attrs.values():
flat_attrs += v
arch_flags = { 'x8632': [],
'x8664': [],
'arm32': [],
'mips32': []
}
# all_keys is only used in the help text.
all_keys = '; '.join([' '.join(targets), ' '.join(sandboxing),
' '.join(opt_levels), ' '.join(flat_attrs)])
argparser = argparse.ArgumentParser(
description=' ' + main.__doc__ +
'The set of attributes is the set of tests plus the following:\n' +
all_keys, formatter_class=argparse.RawTextHelpFormatter)
argparser.add_argument('--config', default='crosstest.cfg', dest='config',
metavar='FILE', help='Test configuration file')
argparser.add_argument('--print-tests', default=False, action='store_true',
help='Print the set of test names and exit')
argparser.add_argument('--include', '-i', default=[], dest='include',
action='append', metavar='ATTR_LIST',
help='Attributes to include (comma-separated). ' +
'Can be used multiple times.')
argparser.add_argument('--exclude', '-e', default=[], dest='exclude',
action='append', metavar='ATTR_LIST',
help='Attributes to include (comma-separated). ' +
'Can be used multiple times.')
argparser.add_argument('--verbose', '-v', default=False, action='store_true',
help='Use verbose output')
argparser.add_argument('--defer', default=False, action='store_true',
help='Defer execution until all executables are built')
argparser.add_argument('--no-compile', '-n', default=False,
action='store_true',
help="Don't build; reuse binaries from the last run")
argparser.add_argument('--dir', dest='dir', metavar='DIRECTORY',
default=('{root}/toolchain_build/src/subzero/' +
'crosstest/Output').format(root=root),
help='Output directory')
argparser.add_argument('--lit', default=False, action='store_true',
help='Generate files for lit testing')
argparser.add_argument('--toolchain-root', dest='toolchain_root',
default=(
'{root}/toolchain/linux_x86/pnacl_newlib_raw/bin'
).format(root=root),
help='Path to toolchain binaries.')
argparser.add_argument('--filetype', default=None, dest='filetype',
help='File type override, one of {asm, iasm, obj}.')
args = argparser.parse_args()
# Run from the crosstest directory to make it easy to grab inputs.
crosstest_dir = '{root}/toolchain_build/src/subzero/crosstest'.format(
root=root)
os.chdir(crosstest_dir)
tests = ConfigParser.RawConfigParser()
tests.read('crosstest.cfg')
if args.print_tests:
print 'Test name attributes: ' + ' '.join(sorted(tests.sections()))
sys.exit(0)
# includes and excludes are both lists of sets.
includes = [ set(item.split(',')) for item in args.include ]
excludes = [ set(item.split(',')) for item in args.exclude ]
# If any --include args are provided, the default is to not match.
default_match = not args.include
# Delete and recreate the output directory, unless --no-compile was specified.
if not args.no_compile:
if os.path.exists(args.dir):
if os.path.isdir(args.dir):
shutil.rmtree(args.dir)
else:
os.remove(args.dir)
if not os.path.exists(args.dir):
os.makedirs(args.dir)
# If --defer is specified, collect the run commands into deferred_cmds for
# later execution.
deferred_cmds = []
for test in sorted(tests.sections()):
for target in targets:
for sb in sandboxing:
for opt in opt_levels:
for attr in arch_attrs[target]:
desc = [ test, target, sb, opt, attr ]
if Match(set(desc), includes, excludes, default_match):
exe = '{test}_{target}_{sb}_{opt}_{attr}'.format(
test=test, target=target, sb=sb, opt=opt,
attr=attr)
extra = (tests.get(test, 'flags').split(' ')
if tests.has_option(test, 'flags') else [])
if args.filetype:
extra += ['--filetype={ftype}'.format(ftype=args.filetype)]
# Generate the compile command.
cmp_cmd = (
['{path}/crosstest.py'.format(path=pypath),
'-{opt}'.format(opt=opt),
'--mattr={attr}'.format(attr=attr),
'--prefix=Subzero_',
'--target={target}'.format(target=target),
'--nonsfi={nsfi}'.format(nsfi='1' if sb=='nonsfi' else '0'),
'--sandbox={sb}'.format(sb='1' if sb=='sandbox' else '0'),
'--dir={dir}'.format(dir=args.dir),
'--output={exe}'.format(exe=exe),
'--driver={drv}'.format(drv=tests.get(test, 'driver'))] +
extra +
['--test=' + t
for t in tests.get(test, 'test').split(' ')] +
arch_flags[target])
run_cmd_base = os.path.join(args.dir, exe)
# Generate the run command.
run_cmd = run_cmd_base
if sb == 'sandbox':
run_cmd = '{root}/run.py -q '.format(root=root) + run_cmd
elif sb == 'nonsfi':
run_cmd = (
'{root}/scons-out/opt-linux-{arch}/obj/src/nonsfi/' +
'loader/nonsfi_loader ').format(
root=root, arch=NonsfiLoaderArch(target)) + run_cmd
run_cmd = RunNativePrefix(args.toolchain_root, target, attr,
run_cmd)
else:
run_cmd = RunNativePrefix(args.toolchain_root, target, attr,
run_cmd)
if args.lit:
# Create a file to drive the lit test.
with open(run_cmd_base + '.xtest', 'w') as f:
f.write('# RUN: sh %s | FileCheck %s\n')
f.write('cd ' + crosstest_dir + ' && \\\n')
f.write(' '.join(cmp_cmd) + ' && \\\n')
f.write(run_cmd + '\n')
f.write('echo Recreate a failure using ' + __file__ +
' --toolchain-root=' + args.toolchain_root +
(' --filetype=' + args.filetype
if args.filetype else '') +
' --include=' + ','.join(desc) + '\n')
f.write('# CHECK: Failures=0\n')
else:
if not args.no_compile:
shellcmd(cmp_cmd,
echo=args.verbose)
if (args.defer):
deferred_cmds.append(run_cmd)
else:
shellcmd(run_cmd, echo=True)
for run_cmd in deferred_cmds:
shellcmd(run_cmd, echo=True) | 34,195 |
def test_two_underscores():
"""escape two or more underscores inside words."""
assert_equal(
GithubMarkdown.gfm('foo_bar_baz'),
'foo\\_bar\\_baz',
) | 34,196 |
def no_conflict_require_POST(f):
"""
Catches resource conflicts on save and returns a 409 error.
Also includes require_POST decorator
"""
@require_POST
@wraps(f)
def _no_conflict(*args, **kwargs):
try:
return f(*args, **kwargs)
except ResourceConflict:
return HttpResponse(status=409)
return _no_conflict | 34,197 |
def test_paragraph_series_m_fb_ol_ol_nl_fb():
"""
Test case: Ordered list x2 newline fenced block
"""
# Arrange
source_markdown = """1. 1.
```
foo
```
"""
expected_tokens = [
"[olist(1,1):.:1:3:]",
"[olist(1,4):.:1:6: ]",
"[BLANK(1,6):]",
"[end-olist:::True]",
"[end-olist:::True]",
"[fcode-block(2,1):`:3::::::]",
"[text(3,1):foo:]",
"[end-fcode-block::3:False]",
"[BLANK(5,1):]",
]
expected_gfm = """<ol>
<li>
<ol>
<li></li>
</ol>
</li>
</ol>
<pre><code>foo
</code></pre>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens) | 34,198 |
def test_JanusVariableManager_return_type_policy():
# sourcery skip: equality-identity, use-assigned-variable
"""
This checks any return value policies that are not on the default policy
`return_value_policy::automatic`
Reference: <https://pybind11.readthedocs.io/en/stable/advanced/functions.html#return-value-policies>
"""
xml_path = (
f"{os.path.dirname(__file__)}/../../Examples/JanusVariableManagerExample.xml"
)
jvm = pyJanus.JanusVariableManager(xml_path)
input00 = pyJanus.JanusVariable(
"input00", pyJanus.janusInputVariable, pyJanus.janusMandatory, "m s-1", 0.0
)
jInput00 = jvm.push_back(input00)
jv_input00 = jvm[jInput00]
assert id(jv_input00) == id(jvm[jInput00]) | 34,199 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.