content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def optimise_sdss_features(sdss, scaler_path):
""" Apply the W14 reddening correction and compute key colours in the SDSS dataset.
Parameters
----------
sdss : DataFrame
The DataFrame containing photometric features.
"""
# compute the three sets of reddening correction
A_u_w14, A_g_w14, A_r_w14, A_i_w14, A_z_w14 = reddening_correction_w14(sdss['extinction_r'])
# useful variables
psf_magnitudes = ['psfMag_u', 'psfMag_g', 'psfMag_r', 'psfMag_i', 'psfMag_z']
petro_magnitudes = ['petroMag_u', 'petroMag_g', 'petroMag_r', 'petroMag_i', 'petroMag_z']
w14_corrections = [A_u_w14, A_g_w14, A_r_w14, A_i_w14, A_z_w14]
colours = [('psfMag_u', 'psfMag_g'), ('psfMag_g', 'psfMag_r'), ('psfMag_r', 'psfMag_i'), ('psfMag_i', 'psfMag_z'),
('petroMag_u', 'petroMag_g'), ('petroMag_g', 'petroMag_r'), ('petroMag_r', 'petroMag_i'), ('petroMag_i', 'petroMag_z')]
# calculate the corrected magnitudes
correct_magnitudes(sdss, psf_magnitudes, w14_corrections, '_w14')
correct_magnitudes(sdss, petro_magnitudes, w14_corrections, '_w14')
# calculate the corrected magnitudes
compute_colours(sdss, colours, '_w14')
# scale features
w14_feature_cols = ['psfMag_r_w14', 'psf_u_g_w14', 'psf_g_r_w14', 'psf_r_i_w14',
'psf_i_z_w14', 'petroMag_r_w14', 'petro_u_g_w14', 'petro_g_r_w14',
'petro_r_i_w14', 'petro_i_z_w14', 'petroRad_r']
scaler = load_results(scaler_path)
sdss[w14_feature_cols] = scaler.transform(sdss[w14_feature_cols]) | 5,327,300 |
def test_makecondenserloop():
"""pytest for makecondenserloop"""
tdata = (
(
"",
"c_loop",
["sb0", ["sb1", "sb2", "sb3"], "sb4"],
["db0", ["db1", "db2", "db3"], "db4"],
"""BRANCH, sb0, 0.0, , Pipe:Adiabatic, sb0_pipe,
c_loop Cond_Supply Inlet, sb0_pipe_outlet, Bypass; BRANCH, sb1, 0.0,
, Pipe:Adiabatic, sb1_pipe, sb1_pipe_inlet, sb1_pipe_outlet,
Bypass; BRANCH, sb2, 0.0, , Pipe:Adiabatic, sb2_pipe,
sb2_pipe_inlet, sb2_pipe_outlet, Bypass; BRANCH, sb3, 0.0, ,
Pipe:Adiabatic, sb3_pipe, sb3_pipe_inlet, sb3_pipe_outlet,
Bypass; BRANCH, sb4, 0.0, , Pipe:Adiabatic, sb4_pipe,
sb4_pipe_inlet, c_loop Cond_Supply Outlet, Bypass; BRANCH,
db0, 0.0, , Pipe:Adiabatic, db0_pipe, c_loop Demand Inlet,
db0_pipe_outlet, Bypass; BRANCH, db1, 0.0, , Pipe:Adiabatic, db1_pipe,
db1_pipe_inlet, db1_pipe_outlet, Bypass; BRANCH, db2, 0.0, ,
Pipe:Adiabatic, db2_pipe, db2_pipe_inlet, db2_pipe_outlet, Bypass;
BRANCH, db3, 0.0, , Pipe:Adiabatic, db3_pipe, db3_pipe_inlet,
db3_pipe_outlet, Bypass; BRANCH, db4, 0.0, , Pipe:Adiabatic,
db4_pipe, db4_pipe_inlet, c_loop Demand Outlet, Bypass;
BRANCHLIST, c_loop Cond_Supply Branchs, sb0, sb1, sb2, sb3, sb4;
BRANCHLIST, c_loop Condenser Demand Branchs, db0, db1, db2, db3,
db4; CONNECTOR:SPLITTER, c_loop_supply_splitter, sb0, sb1,
sb2, sb3; CONNECTOR:SPLITTER, c_loop_demand_splitter, db0, db1, db2,
db3; CONNECTOR:MIXER, c_loop_supply_mixer, sb4, sb1, sb2, sb3;
CONNECTOR:MIXER, c_loop_demand_mixer, db4, db1, db2, db3;
CONNECTORLIST, c_loop Cond_Supply Connectors, Connector:Splitter,
c_loop_supply_splitter, Connector:Mixer, c_loop_supply_mixer;
CONNECTORLIST, c_loop Condenser Demand Connectors,
Connector:Splitter, c_loop_demand_splitter, Connector:Mixer,
c_loop_demand_mixer; PIPE:ADIABATIC, sb0_pipe,
c_loop Cond_Supply Inlet, sb0_pipe_outlet; PIPE:ADIABATIC,
sb1_pipe, sb1_pipe_inlet, sb1_pipe_outlet; PIPE:ADIABATIC, sb2_pipe,
sb2_pipe_inlet, sb2_pipe_outlet; PIPE:ADIABATIC, sb3_pipe,
sb3_pipe_inlet, sb3_pipe_outlet; PIPE:ADIABATIC, sb4_pipe,
sb4_pipe_inlet, c_loop Cond_Supply Outlet; PIPE:ADIABATIC,
db0_pipe, c_loop Demand Inlet, db0_pipe_outlet; PIPE:ADIABATIC,
db1_pipe, db1_pipe_inlet, db1_pipe_outlet; PIPE:ADIABATIC,
db2_pipe, db2_pipe_inlet, db2_pipe_outlet; PIPE:ADIABATIC,
db3_pipe, db3_pipe_inlet, db3_pipe_outlet; PIPE:ADIABATIC, db4_pipe,
db4_pipe_inlet, c_loop Demand Outlet; CONDENSERLOOP, c_loop, Water, ,
, , , , , 0.0, Autocalculate, c_loop Cond_Supply Inlet,
c_loop Cond_Supply Outlet, c_loop Cond_Supply Branchs,
c_loop Cond_Supply Connectors, c_loop Demand Inlet,
c_loop Demand Outlet, c_loop Condenser Demand Branchs,
c_loop Condenser Demand Connectors, Sequential, None; """,
), # blankidf, loopname, sloop, dloop, nidf
)
for blankidf, loopname, sloop, dloop, nidf in tdata:
fhandle = StringIO("")
idf1 = IDF(fhandle)
loopname = "c_loop"
sloop = ["sb0", ["sb1", "sb2", "sb3"], "sb4"]
dloop = ["db0", ["db1", "db2", "db3"], "db4"]
hvacbuilder.makecondenserloop(idf1, loopname, sloop, dloop)
idf2 = IDF(StringIO(nidf))
assert str(idf1.model) == str(idf2.model) | 5,327,301 |
def trailing_whitespace(text):
"""Gets trailing whitespace of text."""
trailing = ""
while text and text[-1] in WHITESPACE_OR_NL_CHARS:
trailing = text[-1] + trailing
text = text[:-1]
return trailing | 5,327,302 |
def rjust(a, width, fillchar=' '):
"""
Return an array with the elements of `a` right-justified in a
string of length `width`.
Calls `str.rjust` element-wise.
Parameters
----------
a : array_like of str or unicode
width : int
The length of the resulting strings
fillchar : str or unicode, optional
The character to use for padding
Returns
-------
out : ndarray
Output array of str or unicode, depending on input type
See Also
--------
str.rjust
"""
a_arr = numpy.asarray(a)
width_arr = numpy.asarray(width)
size = int(numpy.max(width_arr.flat))
if numpy.issubdtype(a_arr.dtype, numpy.string_):
fillchar = asbytes(fillchar)
return _vec_string(
a_arr, (a_arr.dtype.type, size), 'rjust', (width_arr, fillchar)) | 5,327,303 |
async def test_setup_import(opp: OpenPeerPower, router: Mock):
"""Test setup of integration from import."""
await async_setup_component(opp, "persistent_notification", {})
entry = MockConfigEntry(
domain=DOMAIN,
data={CONF_HOST: MOCK_HOST, CONF_PORT: MOCK_PORT},
unique_id=MOCK_HOST,
)
entry.add_to_opp(opp)
assert await async_setup_component(
opp, DOMAIN, {DOMAIN: {CONF_HOST: MOCK_HOST, CONF_PORT: MOCK_PORT}}
)
await opp.async_block_till_done()
assert opp.config_entries.async_entries() == [entry]
assert router.call_count == 1
assert router().open.call_count == 1
assert opp.services.has_service(DOMAIN, SERVICE_REBOOT) | 5,327,304 |
def form_image_helper(caption_list, dummy=True, image_suffix='.jpg'):
"""
:param caption_list: <class 'list'>
:param dummy: <class 'bool'>
:param image_suffix: <class 'str'>
:return: <class 'dict'> [dict of dummy images for form submission in django unittest]
"""
try:
res = {}
image = Image.new('RGB', (100, 100))
tmp_file = tempfile.NamedTemporaryFile(suffix=image_suffix)
image.save(tmp_file)
with open(tmp_file.name, 'rb') as fp:
for title in caption_list:
res[title] = fp
return res
except Exception as e:
print(str(e))
raise ValueError(str(e)) | 5,327,305 |
def test_calculate_part_stress():
"""calculate_part_stress() should return a the attributes dict updated with
calculated values."""
ATTRIBUTES["subcategory_id"] = 1
_attributes = integratedcircuit.calculate_part_stress(**ATTRIBUTES)
assert isinstance(_attributes, dict)
assert _attributes["piL"] == pytest.approx(0.73699794)
assert _attributes["C1"] == 0.01
assert _attributes["C2"] == pytest.approx(0.011822791)
assert _attributes["temperature_junction"] == 53.05
assert _attributes["piT"] == pytest.approx(1.04718497)
assert _attributes["hazard_rate_active"] == pytest.approx(0.032862208) | 5,327,306 |
def is_complex_converter(obj: Any) -> bool:
"""Check if the object is a complex converter.`"""
return isinstance(obj, ComplexConverterABC) or inspect.isclass(obj) and issubclass(obj, ComplexConverterABC) | 5,327,307 |
def test_delete_unknown_secret(
corev1_api_client_with_user_secrets, user_secrets, no_db_user
):
"""Test delete a non existing secret."""
with patch(
"reana_commons.k8s.secrets." "current_k8s_corev1_api_client",
corev1_api_client_with_user_secrets(user_secrets),
) as api_client:
secrets_store = REANAUserSecretsStore(no_db_user.id_)
secret_name = "unknown-secret"
with pytest.raises(REANASecretDoesNotExist):
secrets_store.delete_secrets([secret_name])
api_client.replace_namespaced_secret.assert_not_called() | 5,327,308 |
def find_pool_or_vip_by_name(full_list, queried_name, fuzzy_search):
"""Wrapper function for searching the pools or VIPs."""
logger.debug(
"Performing search for %s in %s partition",
queried_name,
ARGS.partition,
)
if fuzzy_search:
matches = find_resource_by_fuzzysearch(full_list, queried_name)
else:
matches = find_resource_by_regex(full_list, queried_name)
return matches | 5,327,309 |
def _get_spreadsheet_with_values(spreadsheet_id):
"""Gets all sheets and their cells."""
sheets = []
if not service:
return sheets
result = service.spreadsheets().get(spreadsheetId=spreadsheet_id, fields=FIELDS).execute()
for sheet in result["sheets"]:
if "merges" in sheet:
merges_info = sheet["merges"]
else:
merges_info = []
values = sheet["data"][0]
cells = []
if "rowData" in values:
for y, row in enumerate(values["rowData"]):
cells.append([])
if "values" in row:
for x, value in enumerate(row["values"]):
cell = Cell(x, y, _get_cell_value(value))
for merge_info in merges_info:
x_merge_range = range(merge_info["startColumnIndex"], merge_info["endColumnIndex"])
y_merge_range = range(merge_info["startRowIndex"], merge_info["endRowIndex"])
if x in x_merge_range and y in y_merge_range:
cell.set_merge_range(x_merge_range, y_merge_range)
cells[y].append(cell)
sheets.append({"name": sheet["properties"]["title"], "cells": cells})
# TODO: store sheet["properties"]["gridProperties"]["rowCount"/"columnCount"] too
return sheets | 5,327,310 |
def some_task():
""" Some task """
get_word_counts('python.txt')
get_word_counts('go.txt')
get_word_counts('erlang.txt')
get_word_counts('javascript.txt')
return "Task Done" | 5,327,311 |
def _accelerate(f, n_devices):
"""JIT-compiled version of `f` running on `n_devices`."""
if n_devices == 1:
return fastmath.jit(f)
return fastmath.pmap(f, axis_name='batch') | 5,327,312 |
def savemodel(model, epoch, trainloss, valloss, metric, optimizer, stopflag, name, scheduler, size):
"""Saves PyTorch model."""
torch.save({
'model': model.state_dict(),
'epoch': epoch,
'trainloss': trainloss,
'valloss': valloss,
'metric': metric,
'optimizer': optimizer.state_dict(),
'stopflag': stopflag,
'scheduler': scheduler.state_dict(),
'size': size,
}, os.path.join(Cfg.path, name)) | 5,327,313 |
def plot_ranks(data,
rank_a,
rank_b,
label,
hue=None,
palette=None,
order=None,
ax=None,
offset=5,
xlim=(-0.5, 1.5),
label_kws=None,
legend_kws=None):
"""Plots ranks using matplotlib."""
if ax is None:
_, ax = plt.subplots()
color_map = _build_colormap(data, hue, palette, order=order)
default_label_kws = {'va': 'center', 'textcoords': 'offset points'}
label_kws = toolz.merge(default_label_kws, label_kws or {})
# Plot lines.
y_values = data[[rank_a, rank_b]].dropna().T
ax.plot([0, 1], y_values, color='lightgrey')
# Plot labels.
for row in data.itertuples():
if hue is not None:
color = color_map.get(getattr(row, hue), 'black')
else:
color = 'black'
ax.annotate(
getattr(row, label),
xy=(0, getattr(row, rank_a)),
xytext=(-offset, 0),
ha='right',
color=color,
**label_kws)
ax.annotate(
getattr(row, label),
xy=(1, getattr(row, rank_b)),
xytext=(offset, 0),
ha='left',
color=color,
**label_kws)
# Style axes.
ax.set_xticks([0, 1])
ax.set_xticklabels([rank_a, rank_b])
ax.set_ylabel('Rank')
ax.set_yticks([1] + list(np.arange(5, len(data), 5)))
# Set lims.
max_rank = max(data[rank_a].max(), data[rank_b].max())
ax.set_ylim(max_rank + 1, 0)
ax.set_xlim(*xlim)
# Draw legend.
if hue is not None:
_draw_legend(color_map, ax=ax, **(legend_kws or {}))
sns.despine(ax=ax) | 5,327,314 |
def parse_shadowserver_time(time_string: Text) -> datetime.datetime:
"""Parse a date on the format '2018-10-17 20:36:23'"""
try:
return datetime.datetime.strptime(time_string[:19], '%Y-%m-%d %H:%M:%S')
except:
print(time_string)
raise | 5,327,315 |
def sample_along_rays(rng,
origins,
directions,
radii,
num_samples,
near,
far,
genspace_fn,
ray_shape,
single_jitter,
diag=True):
"""Stratified sampling along the rays.
Args:
rng: random generator. If `None`, use deterministic sampling.
origins: [..., 3], ray origins.
directions: [..., 3], ray directions.
radii: [..., 3], ray radii.
num_samples: int.
near: [..., 1], near-plane camera distance.
far: [..., 1], far-plane camera distance.
genspace_fn: Callable, the curve function used when spacing t values.
ray_shape: string, which shape ray to assume.
single_jitter: bool, if True, apply the same offset to each sample in a ray.
diag: bool, if True, produce diagonal covariances (full otherwise).
Returns:
t_vals: [..., num_samples], sampled t values,
(means: [..., num_samples, 3], means,
covs: [..., num_samples, 3{, 3}], covariances, shape depends on `diag`).
"""
t_vals = spacing.genspace(near, far, num_samples + 1, fn=genspace_fn)
sample_shape = list(origins.shape)[:-1] + [num_samples + 1]
if rng is None:
# Broadcast t_vals to make the returned shape consistent.
t_vals = jnp.broadcast_to(t_vals, sample_shape)
else:
mids = 0.5 * (t_vals[Ellipsis, 1:] + t_vals[Ellipsis, :-1])
upper = jnp.concatenate([mids, t_vals[Ellipsis, -1:]], axis=-1)
lower = jnp.concatenate([t_vals[Ellipsis, :1], mids], axis=-1)
if single_jitter:
t_rand = random.uniform(rng, sample_shape[:-1])[Ellipsis, None]
else:
t_rand = random.uniform(rng, sample_shape)
t_vals = lower + (upper - lower) * t_rand
means, covs = cast_rays(
t_vals, origins, directions, radii, ray_shape, diag=diag)
return t_vals, (means, covs) | 5,327,316 |
def resize(img, size, interpolation=PIL.Image.BILINEAR):
"""Resize image to match the given shape.
This method uses :mod:`cv2` or :mod:`PIL` for the backend.
If :mod:`cv2` is installed, this function uses the implementation in
:mod:`cv2`. This implementation is faster than the implementation in
:mod:`PIL`. Under Anaconda environment,
:mod:`cv2` can be installed by the following command.
.. code::
$ conda install -c menpo opencv3=3.2.0
Args:
img (~numpy.ndarray): An array to be transformed.
This is in CHW format and the type should be :obj:`numpy.float32`.
size (tuple): This is a tuple of length 2. Its elements are
ordered as (height, width).
interpolation (int): Determines sampling strategy. This is one of
:obj:`PIL.Image.NEAREST`, :obj:`PIL.Image.BILINEAR`,
:obj:`PIL.Image.BICUBIC`, :obj:`PIL.Image.LANCZOS`.
Bilinear interpolation is the default strategy.
Returns:
~numpy.ndarray: A resize array in CHW format.
"""
img = _resize(img, size, interpolation)
return img | 5,327,317 |
def build_punt():
""" Construye la ventana del registro del usuario"""
easy = PA.dividir_puntajes("facil")
medium = PA.dividir_puntajes("medio")
hard = PA.dividir_puntajes("dificil")
rows = len(max([easy, medium, hard], key=len))
def create_table(data, dif):
return sg.Table(
values = data,
headings = [dif, "Puntos"],
auto_size_columns = True,
justification = 'center',
alternating_row_color = 'lightblue',
hide_vertical_scroll = True,
num_rows = rows
)
layout = [
[ create_table(easy if (len(easy) > 0) else ["Vacio", ""], "Facil"),
create_table(medium if (len(medium) > 0) else ["vacio", ""], "Medio"),
create_table(hard if (len(hard) > 0) else ["vacio", ""], "Dificil")
],
[sg.Ok()]
]
window = sg.Window('Mejores puntajes por dificultad ', layout, element_justification='center')
return window | 5,327,318 |
def true_fov(M, fov_e=50):
"""Calulates the True Field of View (FOV) of the telescope & eyepiece pair
Args:
fov_e (float): FOV of eyepiece; default 50 deg
M (float): Magnification of Telescope
Returns:
float: True Field of View (deg)
"""
return fov_e/M | 5,327,319 |
def add_hooks():
"""
Adds the endpoint hooks to the application object.
"""
# Accounts hooks
APP.on_inserted_accounts += hooks.log_user_create # pylint: disable=E1101
APP.on_updated_accounts += hooks.log_user_modified # pylint: disable=E1101
APP.on_update_accounts += hooks.manage_user_updates
APP.on_inserted_accounts_info += hooks.log_user_create # pylint: disable=E1101
APP.on_insert_accounts_create += hooks.register_new_user # pylint: disable=E1101
APP.on_deleted_item_accounts += hooks.remove_deleted_user # pylint: disable=E1101
# Gene symbol hooks
APP.on_deleted_gene_symbols += hooks.drop_gene_symbol # pylint: disable=E1101
# Ingestion Hooks
APP.on_updated_ingestion += hooks.process_data_upload # pylint: disable=E1101
APP.on_insert_ingestion += hooks.register_upload_job # pylint: disable=E1101
# Data Hooks
APP.on_insert_data += hooks.serialize_objectids # pylint: disable=E1101
APP.on_inserted_data += hooks.check_for_analysis # pylint: disable=E1101
APP.on_updated_data += hooks.data_patched # pylint: disable=E1101
APP.on_inserted_data_edit += hooks.check_for_analysis # pylint: disable=E1101
APP.on_insert_data_edit += hooks.serialize_objectids # pylint: disable=E1101
APP.on_fetched_item_data += hooks.generate_signed_url # pylint: disable=E1101
# Trials hooks
APP.on_updated_trials += hooks.updated_trial # pylint: disable=E1101
# Analysis Hooks
# Pre get filter hook.
APP.on_pre_GET += hooks.filter_on_id # pylint: disable=E1101
# Logging request related hooks
APP.on_post_PATCH += hooks.log_patch_request # pylint: disable=E1101
APP.on_post_POST += hooks.log_post_request # pylint: disable=E1101
APP.on_post_DELETE += hooks.log_delete_request | 5,327,320 |
async def test_html_content_type_with_utf8_encoding(unused_tcp_port, postgres_db, database_name, async_finalizer):
"""Test whether API endpoints with a "text/html; charset=UTF-8" content-type work."""
configure(unused_tcp_port, database_name, postgres_db.port)
html_content = "<html><body>test</body></html>"
@protocol.typedmethod(path="/test", operation="GET", client_types=["api"])
def test_method() -> ReturnValue[str]: # NOQA
pass
class TestServer(ServerSlice):
@protocol.handle(test_method)
async def test_methodY(self) -> ReturnValue[str]: # NOQA
return ReturnValue(response=html_content, content_type=HTML_CONTENT_WITH_UTF8_CHARSET)
rs = Server()
server = TestServer(name="testserver")
rs.add_slice(server)
await rs.start()
async_finalizer.add(server.stop)
async_finalizer.add(rs.stop)
# client based calls
client = protocol.Client("client")
response = await client.test_method()
assert response.code == 200
assert response.result == html_content | 5,327,321 |
def _increment_inertia(centroid, reference_point, m, mass, cg, I):
"""helper method"""
if m == 0.:
return mass
(x, y, z) = centroid - reference_point
x2 = x * x
y2 = y * y
z2 = z * z
I[0] += m * (y2 + z2) # Ixx
I[1] += m * (x2 + z2) # Iyy
I[2] += m * (x2 + y2) # Izz
I[3] += m * x * y # Ixy
I[4] += m * x * z # Ixz
I[5] += m * y * z # Iyz
mass += m
cg += m * centroid
return mass | 5,327,322 |
def enqueue_ide_stop(*args):
"""Reap theia session kube resources"""
rpc_enqueue(reap_theia_session_by_id, queue="theia", args=args) | 5,327,323 |
def token_addresses(
request,
token_amount,
number_of_tokens,
blockchain_services,
cached_genesis,
register_tokens):
""" Fixture that yields `number_of_tokens` ERC20 token addresses, where the
`token_amount` (per token) is distributed among the addresses behind `blockchain_services` and
potentially pre-registered with the raiden Registry.
The following arguments can control the behavior:
Args:
token_amount (int): the overall number of units minted per token
number_of_tokens (int): the number of token instances
register_tokens (bool): controls if tokens will be registered with raiden Registry
"""
if cached_genesis:
token_addresses = [
address_decoder(token_address)
for token_address in cached_genesis['config']['tokenAddresses']
]
else:
participants = [
privatekey_to_address(blockchain_service.private_key) for
blockchain_service in blockchain_services.blockchain_services
]
token_addresses = _token_addresses(
token_amount,
number_of_tokens,
blockchain_services.deploy_service,
participants,
register_tokens
)
return token_addresses | 5,327,324 |
def threatExpertSearch(pyew):
""" Search in Threat Expert for the behavior's report """
baseurl = "http://www.threatexpert.com/report.aspx?md5="
buf = pyew.getBuffer()
url = baseurl + md5(buf).hexdigest()
webbrowser.open(url) | 5,327,325 |
def get_event_stream(ApplicationId=None):
"""
Returns the event stream for an app.
:example: response = client.get_event_stream(
ApplicationId='string'
)
:type ApplicationId: string
:param ApplicationId: [REQUIRED] ApplicationId
:rtype: dict
:return: {
'EventStream': {
'ApplicationId': 'string',
'DestinationStreamArn': 'string',
'ExternalId': 'string',
'LastModifiedDate': 'string',
'LastUpdatedBy': 'string',
'RoleArn': 'string'
}
}
"""
pass | 5,327,326 |
def saturating_sigmoid(x):
"""Saturating sigmoid: 1.2 * sigmoid(x) - 0.1 cut to [0, 1]."""
with tf.name_scope("saturating_sigmoid", [x]):
y = tf.sigmoid(x)
return tf.minimum(1.0, tf.maximum(0.0, 1.2 * y - 0.1)) | 5,327,327 |
def simulate_strategy(game, strategies, init_states, func):
"""
Harvest an accurate average payoff with the two strategies, considering traversing
all possible private cards dealt by nature
game : the pyspiel game
strategies : the index of player's strategy, a length two list
init_states : a set of possible openspiel state after chance deals private cards
func : given a strategy index and an infostate, output action
"""
payoff = np.array([0, 0], dtype=int)
for root in init_states: # traverse game tree
node = root
while not node.is_terminal():
assert not node.is_chance_node(), "Doesn't exist chance nodes in kuhn's poker after private hands are dealt"
player = node.current_player()
action = func(strategies[player], node.information_state_string(), player)
assert action in node.legal_actions(), "action not legal!"
node = node.child(action)
payoff = payoff + node.returns()
return payoff / len(init_states) | 5,327,328 |
def test_request_id(test_client, log_capture):
""" check if /foo endpoint will log something with request-id """
resp = test_client.get("/foo")
assert resp.json == {"bar": "BAR"}
last_record = log_capture.records[-1]
assert last_record.message == "some message logged in foo_endpoint"
assert last_record.some_extra == "1"
assert getattr(last_record, "request-id", None) is not None | 5,327,329 |
def init_questionnaire_db():
"""Create a new questionnaire database."""
db = QuestionnaireDB()
with current_app.open_resource('util/schema_questionnaire.sql') as f:
db().executescript(f.read().decode('utf8')) | 5,327,330 |
async def is_nsfw_and_guild_predicate(ctx):
"""A predicate to test if a command was run in
an NSFW channel and inside a guild
:param ctx: The context of the predicate
"""
if not ctx.guild or not ctx.channel.is_nsfw():
raise NotNSFWOrGuild()
return True | 5,327,331 |
def get_account(opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetAccountResult:
"""
Get information on your DigitalOcean account.
## Example Usage
Get the account:
```python
import pulumi
import pulumi_digitalocean as digitalocean
example = digitalocean.get_account()
```
"""
__args__ = dict()
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('digitalocean:index/getAccount:getAccount', __args__, opts=opts, typ=GetAccountResult).value
return AwaitableGetAccountResult(
droplet_limit=__ret__.droplet_limit,
email=__ret__.email,
email_verified=__ret__.email_verified,
floating_ip_limit=__ret__.floating_ip_limit,
id=__ret__.id,
status=__ret__.status,
status_message=__ret__.status_message,
uuid=__ret__.uuid) | 5,327,332 |
def get_partition_leaders(cluster_config):
"""Return the current leaders of all partitions. Partitions are
returned as a "topic-partition" string.
:param cluster_config: the cluster
:type cluster_config: kafka_utils.utils.config.ClusterConfig
:returns: leaders for partitions
:rtype: map of ("topic-partition", broker_id) pairs
"""
client = KafkaClient(cluster_config.broker_list)
result = {}
for topic, topic_data in six.iteritems(client.topic_partitions):
for partition, p_data in six.iteritems(topic_data):
topic_partition = topic + "-" + str(partition)
result[topic_partition] = p_data.leader
return result | 5,327,333 |
def xyzToAtomsPositionsWrapper(
xyzFileOrDir,
outDir=None,
outFileBaseName=None,
fileExt=None,
noOutFile=False):
"""
Wrapper for the xyzToAtomsPositions function.
Returns atom positions (order) given a molecule9s) in an xyz format.
The heavy atoms positions are based on inchi, thus should always
be the same, regardless of the heavy atoms order in the xyz file.
The hydrogen positions ARE NOT UNIQUE. They will depend, to some
extent, on their order in the xyz file.
Use this function to set the atoms positions in a reference
molecule. The idea is to assign the positions once and to never
change them again.
Arguments:
----------
xyzFileOrDir : str
input xyz molecule (either file or dir path)
outDir : str, optional, default = cwd
directory to write the results into
outFileBaseName : str, optional, default xyzFile name
base name of all output files
fileExt : str, optional, default = xyz
extension of the input files
noOutFile : bool, optional, default False
suppresses writing any output files
Returns:
----------
atomsPositions: list of tuples
also written to a file
"""
if outDir is None: outDir= ioutils.getBaseDirPath(xyzFileOrDir)
if fileExt is None: fileExt= '.xyz'
xyz_files = ioutils.getFilesWithExtensions(xyzFileOrDir, fileExt)
if not xyz_files: return
atomsPositions = []
for i, xyz_file in enumerate(xyz_files):
_atomsPositions = xyztools.xyzToAtomsPositions(xyz_file)
filePathObj = pathlib.Path(xyz_file)
fileName = filePathObj.name
if not noOutFile:
outFileName = fileName+'_atomspositions.json'
if outFileBaseName is not None:
outFileName = outFileBaseName+'_'+str(i)+'_atomspositions.json'
outPath = os.path.join(outDir,outFileName)
ioutils.writeFile(path=outPath,
data=json.dumps({fileName:_atomsPositions}, indent=4),
newline='')
atomsPositions.append({fileName:_atomsPositions})
return atomsPositions | 5,327,334 |
def find_mod_names(file_path=__file__):
"""Find Ice module names that start with 'ice_test_'.
The returned names are without the extension '.ice'.
TODO: Needs to recurse in `test_` sub directories.
"""
directory = pathlib.Path(file_path).absolute().parent
# pylint: disable=no-member
return [f_name.stem for f_name in directory.iterdir() if
f_name.stem.startswith('ice_test_') and
f_name.suffix == '.ice'] | 5,327,335 |
def WR(df, N=10, N1=6):
"""
威廉指标
:param df:
:param N:
:param N1:
:return:
"""
HIGH = df['high']
LOW = df['low']
CLOSE = df['close']
WR1 = 100 * (HHV(HIGH, N) - CLOSE) / (HHV(HIGH, N) - LLV(LOW, N))
WR2 = 100 * (HHV(HIGH, N1) - CLOSE) / (HHV(HIGH, N1) - LLV(LOW, N1))
return pd.DataFrame({
'WR1': WR1, 'WR2': WR2
}) | 5,327,336 |
def make_fn_type(params):
"""Turn type parameters into corresponding internal type system object.
Returned object will represent the type of a function over the
parameters.
:param params: a list of type paramaters, e.g. from a type
signature. These should be instances of TypeOperator or
TypeVariable.
:returns: An instance of TypeOperator representing the function type.
"""
from hask3.lang.hindley_milner import Function
if len(params) == 2:
last_input, return_type = params
return Function(last_input, return_type)
else:
return Function(params[0], make_fn_type(params[1:])) | 5,327,337 |
def export(request, wid):
"""Export the logs from the given workflow
:param request: HTML request
:param pk: pk of the workflow to export
:return: Return a CSV download of the logs
"""
dataset = LogResource().export(
Log.objects.filter(user=request.user, workflow__id=wid)
)
# Create the response as a csv download
response = HttpResponse(dataset.csv, content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="logs.csv"'
return response | 5,327,338 |
def record_params(setup_state):
"""
Copy config so we can easily work with it
"""
app = setup_state.app
worker_router_bp.config = dict(
[(key, value) for (key, value) in app.config.items()]
) | 5,327,339 |
def _validate_valid_xml(value):
"""
Checks whether the given value is well-formed and valid XML.
"""
try:
# Try to create an XML tree from the given String value.
_value = XML_DECL.sub(u'', value)
_ = etree.fromstring(_value.encode('utf-8'))
return True
except etree.ParseError, parse_error:
# In case of an exception, we raise a ValidationError.
raise ValidationError(parse_error)
# cfedermann: in case of other exceptions, raise a ValidationError with
# the corresponding error message. This will prevent the exception
# page handler to be shown and is hence more acceptable for end users.
except Exception, error:
raise ValidationError(error) | 5,327,340 |
def import_submodules(package, recursive=True):
"""Import all submodules of a module, recursively, including subpackages
:param recursive: bool
:param package: package (name or actual module)
:type package: str | module
:rtype: dict[str, types.ModuleType]
"""
if isinstance(package, str):
package = importlib.import_module(package)
results = {}
for loader, name, is_pkg in pkgutil.walk_packages(package.__path__):
full_name = package.__name__ + '.' + name
results[full_name] = importlib.import_module(full_name)
if recursive and is_pkg:
results.update(import_submodules(full_name))
return results | 5,327,341 |
def seasonal_pattern(season_time):
"""Just an arbitrary pattern, you can change it if you wish"""
return np.where(season_time < 0.4,
np.sin(season_time * 2),
1 / np.exp(3 * season_time)) | 5,327,342 |
def prepare_files():
"""
Put all the lemmata to lexemes.txt. Put all the lexical
rules to lexical_rules.txt, if any. Create separate versions of
relevant files for diacriticless texts.
Put all grammar files to uniparser_turoyo/data_strict/
(original version) or uniparser_turoyo/data_nodiacritics/
(diacriticless version).
"""
lemmata, lexrules = collect_lemmata()
paradigms = collect_paradigms()
fOutLemmata = open('uniparser_turoyo/data_strict/lexemes.txt', 'w', encoding='utf-8')
fOutLemmata.write(lemmata)
fOutLemmata.close()
fOutLemmataNodiacritics = open('uniparser_turoyo/data_nodiacritics/lexemes.txt', 'w', encoding='utf-8')
fOutLemmataNodiacritics.write(simplify(lemmata))
fOutLemmataNodiacritics.close()
if len(lexrules) > 0:
fOutLexrules = open('uniparser_turoyo/data_strict/lex_rules.txt', 'w', encoding='utf-8')
fOutLexrules.write(lexrules)
fOutLexrules.close()
fOutLexrules = open('uniparser_turoyo/data_nodiacritics/lex_rules.txt', 'w', encoding='utf-8')
fOutLexrules.write(lexrules)
fOutLexrules.close()
fOutParadigms = open('uniparser_turoyo/data_strict/paradigms.txt', 'w', encoding='utf-8')
fOutParadigms.write(paradigms)
fOutParadigms.close()
fOutParadigmsNodiacritics = open('uniparser_turoyo/data_nodiacritics/paradigms.txt', 'w', encoding='utf-8')
fOutParadigmsNodiacritics.write(simplify(paradigms))
fOutParadigmsNodiacritics.close()
if os.path.exists('bad_analyses.txt'):
shutil.copy2('bad_analyses.txt', 'uniparser_turoyo/data_strict/')
shutil.copy2('bad_analyses.txt', 'uniparser_turoyo/data_nodiacritics/')
if os.path.exists('turoyo_disambiguation.txt'):
shutil.copy2('turoyo_disambiguation.cg3', 'uniparser_turoyo/data_strict/')
shutil.copy2('turoyo_disambiguation.cg3', 'uniparser_turoyo/data_nodiacritics/') | 5,327,343 |
def dice(labels, predictions, axis, weights=1.0, scope=None, loss_collection=tf.GraphKeys.LOSSES, reduction=Reduction.SUM_BY_NONZERO_WEIGHTS):
"""Dice loss for binary segmentation. The Dice loss is one minus the Dice
coefficient, and therefore this loss converges towards zero.
The Dice loss between predictions `p` and labels `g` is
.. math::
1 - \frac{2 \Sigma_i^N p_i g_i + \epsilon}
{\Sigma_i^N p_i^2 + \Sigma_i^N g_i^2 + \epsilon}
where `\epsilon` is a small value for stability.
Parameters
----------
labels: float `Tensor`
predictions: float `Tensor`
References
----------
https://arxiv.org/pdf/1606.04797.pdf
"""
if labels is None:
raise ValueError("labels must not be None.")
if predictions is None:
raise ValueError("predictions must not be None.")
with tf.name_scope(scope, "dice",
(predictions, labels, weights)) as scope:
predictions = tf.to_float(predictions)
labels = tf.to_float(labels)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
intersection = tf.reduce_sum(tf.abs(predictions * labels), axis=axis)
union = (tf.reduce_sum(predictions, axis=axis) +
tf.reduce_sum(labels, axis=axis))
losses = 1. - ((2 * intersection + _EPSILON) / (union + _EPSILON))
return compute_weighted_loss(
losses=losses,
weights=weights,
scope=scope,
loss_collection=loss_collection,
reduction=reduction) | 5,327,344 |
def air_density(temp, patm, pw = 0):
"""
Calculates the density of dry air by means of the universal gas law as a
function of air temperature and atmospheric pressure.
m / V = [Pw / (Rv * T)] + [Pd / (Rd * T)]
where:
Pd: Patm - Pw
Rw: specific gas constant for water vapour [Rw = 461.495 MJ/kg/K]
Rv: specific gas constant for dry air [Rv = 287.058 MJ/kg/K]
T: air temperature [K]
m/V: density of air [kg/m³]
Parameters
----------
temp : float
Air temperature [K].
patm : float
Atmospheric pressure [Pa].
pw : float
Vapour pressure [Pa]. Default to 0 Pa (dry air).
Returns
-------
float
Air density [kg/m³].
"""
rd, rw = 287.058, 461.495 # specific gas constant for dry air and water vapour [J / (kg K)]
pd = patm - pw
return (pd / (rd * temp)) + (pw / (rw * temp)) | 5,327,345 |
def index(dataset: Dataset, min_df=5, inplace=False, **kwargs):
"""
Indexes the tokens of a textual :class:`quapy.data.base.Dataset` of string documents.
To index a document means to replace each different token by a unique numerical index.
Rare words (i.e., words occurring less than `min_df` times) are replaced by a special token `UNK`
:param dataset: a :class:`quapy.data.base.Dataset` object where the instances of training and test documents
are lists of str
:param min_df: minimum number of occurrences below which the term is replaced by a `UNK` index
:param inplace: whether or not to apply the transformation inplace (True), or to a new copy (False, default)
:param kwargs: the rest of parameters of the transformation (as for sklearn's
`CountVectorizer <https://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.CountVectorizer.html>_`)
:return: a new :class:`quapy.data.base.Dataset` (if inplace=False) or a reference to the current
:class:`quapy.data.base.Dataset` (inplace=True) consisting of lists of integer values representing indices.
"""
__check_type(dataset.training.instances, np.ndarray, str)
__check_type(dataset.test.instances, np.ndarray, str)
indexer = IndexTransformer(min_df=min_df, **kwargs)
training_index = indexer.fit_transform(dataset.training.instances)
test_index = indexer.transform(dataset.test.instances)
if inplace:
dataset.training = LabelledCollection(training_index, dataset.training.labels, dataset.classes_)
dataset.test = LabelledCollection(test_index, dataset.test.labels, dataset.classes_)
dataset.vocabulary = indexer.vocabulary_
return dataset
else:
training = LabelledCollection(training_index, dataset.training.labels.copy(), dataset.classes_)
test = LabelledCollection(test_index, dataset.test.labels.copy(), dataset.classes_)
return Dataset(training, test, indexer.vocabulary_) | 5,327,346 |
def login_principal(principal):
"""Start an interaction with `principal`."""
request = zope.publisher.browser.TestRequest()
request.setPrincipal(principal)
zope.security.management.newInteraction(request) | 5,327,347 |
def PerpendicularFrameAt(thisCurve, t, multiple=False):
"""
Return a 3d frame at a parameter. This is slightly different than FrameAt in
that the frame is computed in a way so there is minimal rotation from one
frame to the next.
Args:
t (double): Evaluation parameter.
Returns:
bool: True on success, False on failure.
plane (Plane): The frame is returned here.
"""
url = "rhino/geometry/curve/perpendicularframeat-curve_double_plane"
if multiple: url += "?multiple=true"
args = [thisCurve, t]
if multiple: args = list(zip(thisCurve, t))
response = Util.ComputeFetch(url, args)
return response | 5,327,348 |
def _attributes_cosmo2dict(cosmo):
"""
Converts CoSMoMVPA-like attributes to a dictionary form
Parameters
----------
cosmo: dict
Dictionary that may contains fields 'sa', 'fa', 'a'. For any of these
fields the contents can be a dict, np.ndarray (object array as returned
by loadmat) or ArrayCollectable (from a PyMVPA Dataset's .a, .fa or .sa)
Returns
-------
pymvpa_attributes: dict
Data represented in cosmo with fields 'sa', 'fa' and 'a'. Each element
in pymvpa_attributes[key] is a dict itself mapping an attribute name
to a value.
"""
# space for output
pymvpa_attributes = dict()
# go over 'sa', 'fa' and 'a'
for fieldname, do_transpose in _attr_fieldname2do_transpose.items():
attrs = dict()
if fieldname in cosmo:
v = cosmo[fieldname]
if type(v) is dict:
# copy the data over
attrs.update(v)
elif isinstance(v, np.ndarray):
# extract singleton element
fsa_mat = _from_singleton(v)
if fsa_mat is not None:
# assume an object array
fsa_keys = fsa_mat.dtype.names
for fsa_key in fsa_keys:
dim = fsa_mat[fsa_key]
if do_transpose:
# feature attribute case, to match dimensionality
# in second dimension
dim = dim.T
# transform row-vectors in matrix form (shape=(1,P))
# to vectors (shape=(P,))
if len(dim.shape) == 2 and dim.shape[1] == 1:
dim = dim.ravel()
attrs[fsa_key] = dim
elif isinstance(v, Collection):
# from PyMVPA Dataset, extract keys and values
attrs.update((k, v[k].value) for k in v)
elif v is None:
pass
else:
raise TypeError("Unsupported input %s" % v)
pymvpa_attributes[fieldname] = attrs
return pymvpa_attributes | 5,327,349 |
def is_instrument_port(port_name):
"""test if a string can be a com of gpib port"""
answer = False
if isinstance(port_name, str):
ports = ["COM", "com", "GPIB0::", "gpib0::"]
for port in ports:
if port in port_name:
answer = not (port == port_name)
return answer | 5,327,350 |
def test_irreg_topo_new(dans_grid2):
"""Test D4 routing on a toy irregular topo. 'method' passed to init."""
fr = FlowAccumulator(dans_grid2.mg, flow_director="D4")
fr.run_one_step()
assert_array_equal(dans_grid2.A_target_D4, dans_grid2.mg.at_node["drainage_area"])
assert_array_equal(
dans_grid2.frcvr_target_D4, dans_grid2.mg.at_node["flow__receiver_node"]
)
assert_array_equal(
dans_grid2.upids_target_D4, dans_grid2.mg.at_node["flow__upstream_node_order"]
)
assert_array_equal(
dans_grid2.links2rcvr_target_D4,
dans_grid2.mg.at_node["flow__link_to_receiver_node"],
)
assert dans_grid2.steepest_target_D4 == pytest.approx(
dans_grid2.mg.at_node["topographic__steepest_slope"]
) | 5,327,351 |
def get_anvil_path():
"""Gets the anvil/ path.
Returns:
The full path to the anvil/ source.
"""
return os.path.normpath(os.path.dirname(__file__)) | 5,327,352 |
def share_data(value):
""" Take a value and use the same value from the store,
if the value isn't in the store this one becomes the shared version. """
# We don't want to change the types of strings, between str <=> unicode
# and hash('a') == hash(u'a') ... so use different stores.
# In theory eventaully we'll have all of one type, but don't hold breath.
store = _share_data_store
if isinstance(value, unicode):
store = _share_data_store_u
# hahahah, of course the above means that:
# hash(('a', 'b')) == hash((u'a', u'b'))
# ...which we have in deptuples, so just screw sharing those atm.
if type(value) == types.TupleType:
return value
return store.setdefault(value, value) | 5,327,353 |
def gaussian_dropout(incoming, keep_prob, mc, scale_during_training = True, name=None):
""" Gaussian Dropout.
Outputs the input element multiplied by a random variable sampled from a Gaussian distribution with mean 1 and either variance keep_prob*(1-keep_prob) (scale_during_training False) or (1-keep_prob)/keep_prob (scale_during_training True)
Arguments:
incoming : A `Tensor`. The incoming tensor.
keep_prob : A float representing the probability that each element is kept by Bernoulli dropout which is used to set the variance of the Gaussian distribution.
scale_during_training : A boolean determining whether to match the variance of the Gaussian distribution to Bernoulli dropout with scaling during testing (False) or training (True)
mc : A boolean Tensor correponding to whether or not Monte-Carlo sampling will be used to calculate the networks output
name : A name for this layer (optional).
References:
Dropout: A Simple Way to Prevent Neural Networks from Overfitting.
N. Srivastava, G. Hinton, A. Krizhevsky, I. Sutskever & R. Salakhutdinov,
(2014), Journal of Machine Learning Research, 5(Jun)(2), 1929-1958.
Links:
[https://www.cs.toronto.edu/~hinton/absps/JMLRdropout.pdf]
(https://www.cs.toronto.edu/~hinton/absps/JMLRdropout.pdf)
"""
with tf.name_scope(name) as scope:
inference = incoming
if scale_during_training:
stddev = math.sqrt((1-keep_prob)/keep_prob)
else:
stddev = math.sqrt((1-keep_prob)*keep_prob)
def apply_gaussian_dropout():
return tf.multiply(inference,tf.random_normal(tf.shape(inference), mean = 1, stddev = stddev))
inference = tf.cond(mc, apply_gaussian_dropout, lambda: inference)
return inference | 5,327,354 |
def checkTimeStamp(op, graph, frm, to):
"""
Confirm the timestamp formats within the metadata did not change.
:param op:
:param graph:
:param frm:
:param to:
:return:
"""
edge = graph.get_edge(frm, to)
pred = graph.predecessors(to)
if pred<2:
ffile = os.path.join(graph.dir, graph.get_node(frm)['file'])
sfile = os.path.join(graph.dir, graph.get_node(to)['file'])
timediffs = dateTimeStampCompare(ffile,sfile)
if len(timediffs) != 0:
return (Severity.INFO, "Timestamps " + str(timediffs) + " are in a format different from prior node")
else:
for p in pred:
if p != frm:
ffile = os.path.join(graph.dir, graph.get_node(p)['file'])
sfile = os.path.join(graph.dir, graph.get_node(to)['file'])
timediffs = dateTimeStampCompare(ffile, sfile)
if len(timediffs) != 0:
return (Severity.INFO, "Timestamps "+ str(timediffs) + " are in a format different from donor") | 5,327,355 |
def bottom_up_low_space(N,K,ts):
"""
Recursive algorithm.
args:
N :: int
length of ts
K :: int
ts :: list of ints
returns:
res :: bool
True :: if a subset of ts sums to K
False :: otherwise
subset :: list of tuples
index and value in ts of the subset that sums to K.
"""
U = np.zeros(K+1, dtype = int)
U[0] = 1
for t in ts:
j = K
while j >= t:
if U[j-t] != 0:
U[j] = t
j -= 1
res = U[K] != 0
subset = []
k = K
while U[k] != 0 and k > 0:
t = U[k]
i = [i for i, x in enumerate(U[k] == ts) if x]
for l in i:
if not (l,t) in subset:
subset.append((l,t))
k -= U[k]
return res, sorted(subset) | 5,327,356 |
def sincpt(method, target, et, fixref, abcorr, obsrvr, dref, dvec):
"""
Given an observer and a direction vector defining a ray, compute
the surface intercept of the ray on a target body at a specified
epoch, optionally corrected for light time and stellar
aberration.
This routine supersedes :func:`srfxpt`.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/sincpt_c.html
:param method: Computation method.
:type method: str
:param target: Name of target body.
:type target: str
:param et: Epoch in ephemeris seconds past J2000 TDB.
:type et: float
:param fixref: Body-fixed, body-centered target body frame.
:type fixref: str
:param abcorr: Aberration correction.
:type abcorr: str
:param obsrvr: Name of observing body.
:type obsrvr: str
:param dref: Reference frame of ray's direction vector.
:type dref: str
:param dvec: Ray's direction vector.
:type dvec: 3-Element Array of floats
:return:
Surface intercept point on the target body,
Intercept epoch,
Vector from observer to intercept point.
:rtype: tuple
"""
method = stypes.stringToCharP(method)
target = stypes.stringToCharP(target)
et = ctypes.c_double(et)
fixref = stypes.stringToCharP(fixref)
abcorr = stypes.stringToCharP(abcorr)
obsrvr = stypes.stringToCharP(obsrvr)
dref = stypes.stringToCharP(dref)
dvec = stypes.toDoubleVector(dvec)
spoint = stypes.emptyDoubleVector(3)
trgepc = ctypes.c_double(0)
srfvec = stypes.emptyDoubleVector(3)
found = ctypes.c_int(0)
libspice.sincpt_c(method, target, et, fixref, abcorr, obsrvr, dref, dvec,
spoint, ctypes.byref(trgepc), srfvec, ctypes.byref(found))
return stypes.cVectorToPython(spoint), trgepc.value, stypes.cVectorToPython(
srfvec), bool(found.value) | 5,327,357 |
def create_user_upload_path_if_not_exists(upload_path, user_id):
"""
Generates the upload path according to the user_id
:param upload_path:
:param user_id:
:return: nothing just creates the directory
"""
if not os.path.isdir(os.path.join(upload_path, str(user_id))):
os.makedirs(os.path.join(upload_path, str(user_id))) | 5,327,358 |
def test_dahlquist_constructor_mr():
"""
Test constructor
"""
dahlquist = Dahlquist(t_start=0, t_stop=1, nt=11, method='MR')
np.testing.assert_equal('MR', dahlquist.method) | 5,327,359 |
def crumb_link(src_crumb: hansel.Crumb, dst_crumb: hansel.Crumb, exist_ok: bool=False, verbose: bool=False):
"""Will link the content of `src_crumb` into `dst_crumb` folder.
For this `src_crumb` and `dst_crumb` must have similar set of argument
names.
All the defined arguments of `src_crumb.ls()[0]` must define `dst_crumb`
entirely and create a path to a file or folder.
It will create the folder structure in the base of `dst_crumb` and link
exclusively the leaf nodes.
"""
for src, dst in _crumb_fill_dst(src_crumb, dst_crumb):
link_all_files(src.path, dst.path, exist_ok=exist_ok, verbose=verbose) | 5,327,360 |
def send_file(request, filepath_or_fp, mimetype=None, as_attachment=False,
attachment_filename=None, add_etags=True, cache_timeout=60 * 60 * 12,
conditional=False, use_x_sendfile=False, response_class=None):
"""Sends the contents of a file to the client. This will use the
most efficient method available and configured. By default it will
try to use the WSGI server's file_wrapper support. Alternatively
you can set the `use_x_sendfile` parameter to `True` to directly emit
an `X-Sendfile` header. This however requires support of the underlying
webserver for `X-Sendfile`.
By default it will try to guess the mimetype for you, but you can
also explicitly provide one. For extra security you probably want
to send certain files as attachment (HTML for instance). The mimetype
guessing requires a `filename` or an `attachment_filename` to be
provided.
Please never pass filenames to this function from user sources without
checking them first. Something like this is usually sufficient to
avoid security problems::
if '..' in filename or filename.startswith('/'):
raise NotFound()
param request:
...
param filepath_or_fp:
The absolute path of the file to send.
Alternatively a file object might be provided in which case
`X-Sendfile` might not work and fall back to the traditional method.
Make sure that the file pointer is positioned at the start
of data to send before calling `send_file`.
param mimetype:
The mimetype of the file if provided, otherwise
auto detection happens.
param as_attachment:
Set to `True` if you want to send this file with
a `Content-Disposition: attachment` header.
param attachment_filename:
The filename for the attachment if it
differs from the file's filename.
param add_etags:
Set to `False` to disable attaching of etags.
param conditional:
Set to `True` to enable conditional responses.
param cache_timeout:
The timeout in seconds for the headers.
param use_x_sendfile:
Set to `True` to directly emit an `X-Sendfile` header.
This however requires support of the underlying webserver.
param response_class:
Set to overwrite the default Response class.
--------------------------------
Copied almost verbatim from Flask <http://flask.pocoo.org/>
Copyright © 2010 by Armin Ronacher.
Used under the modified BSD license.
"""
from .wrappers import Response
mtime = None
if isinstance(filepath_or_fp, basestring):
filepath = filepath_or_fp
file = None
else:
assert bool(mimetype or attachment_filename)
add_etags = False
file = filepath_or_fp
filepath = getattr(file, 'name', None)
if filepath is not None:
filepath = os.path.abspath(filepath)
if mimetype is None and (filepath or attachment_filename):
mimetype = mimetypes.guess_type(filepath or attachment_filename)[0]
if mimetype is None:
mimetype = 'application/octet-stream'
headers = Headers()
if as_attachment:
if attachment_filename is None:
if filepath is None:
raise TypeError('filename unavailable, required for '
'sending as attachment')
attachment_filename = os.path.basename(filepath)
headers.add('Content-Disposition', 'attachment',
filename=attachment_filename)
if use_x_sendfile and filepath:
if file is not None:
file.close()
headers['X-Sendfile'] = filepath
data = None
else:
if file is None:
file = io.open(filepath, 'rb')
mtime = os.path.getmtime(filepath)
data = wrap_file(request.environ, file)
response_class = response_class or Response
resp = response_class(data, mimetype=mimetype, headers=headers,
direct_passthrough=True)
# if we know the file modification date, we can store it as the
# the time of the last modification.
if mtime is not None:
resp.last_modified = int(mtime)
resp.cache_control.public = True
if cache_timeout:
resp.cache_control.max_age = cache_timeout
resp.expires = int(time() + cache_timeout)
if add_etags and filepath is not None:
resp.set_etag('shake-%s-%s-%s' % (
os.path.getmtime(filepath),
os.path.getsize(filepath),
adler32(
filepath.encode('utf8') if isinstance(filepath, unicode)
else filepath
) & 0xffffffff
))
if conditional:
resp = resp.make_conditional(request)
# make sure we don't send x-sendfile for serespers that
# ignore the 304 status code for x-sendfile.
if resp.status_code == 304:
resp.headers.pop('x-sendfile', None)
return resp | 5,327,361 |
def getInfo(ID):
"""
get info from file
:param ID: meter ID
:return: info = {
"distance": 10,
"horizontal": 10,
"vertical": 20,
"name": "1_1",
"type": SF6,
"template": "template.jpg",
"ROI": {
"x": 200,
"y": 200,
"w": 1520,
"h": 680
},
"startPoint": {
"x": -1,
"y": -1
},
"endPoint": {
"x": -1,
"y": -1
},
"centerPoint": {
"x": -1,
"y": -1
},
"startValue": 0,
"totalValue": 2
}
"""
file = open("config/" + ID + ".json")
info = json.load(file)
# string to pointer
if info["type"] == "SF6":
info["type"] = SF6
elif info["type"] == "youwen":
info["type"] = youwen
info["template"] = cv2.imread("template/" + ID + ".jpg")
return info | 5,327,362 |
def DO_run(wb, args: Union[list, None] = None, external: bool = False) -> RET:
"""run shell_command lfn|alien: tagged lfns :: download lfn(s) as a temporary file and run shell command on the lfn(s)"""
if args is None: args = []
if not args: return RET(1, '', 'No shell command specified')
if is_help(args) or len(args) == 1:
msg_last = ('Command format: shell_command arguments lfn\n'
'N.B.!! the lfn must be the last element of the command!!\n'
'N.B.! The output and error streams will be captured and printed at the end of execution!\n'
'for working within application use <edit> or -noout argument\n'
'additiona arguments recognized independent of the shell command:\n'
'-force : will re-download the lfn even if already present\n'
'-noout : will not capture output, the actual application can be used')
if external:
ret_obj = runShellCMD(f'{args[0]} -h', captureout = True, do_shell = True)
return ret_obj._replace(out = f'{ret_obj.out}\n{msg_last}')
msg = ('Command format: run shell_command arguments lfn\n'
'the lfn must be the last element of the command\n'
'N.B.! The output and error streams will be captured and printed at the end of execution!\n'
'for working within application use <edit>\n'
'additiona arguments recognized independent of the shell command:\n'
'-force : will re-download the lfn even if already present\n'
'-noout : will not capture output, the actual application can be used')
return RET(0, msg)
overwrite = get_arg(args, '-force')
capture_out = get_arg(args, '-noout')
list_of_lfns = [arg for arg in args if 'alien:' in arg]
if not list_of_lfns: list_of_lfns = [args.pop(-1)]
tmp_list = [download_tmp(wb, lfn, overwrite) for lfn in list_of_lfns] # list of temporary downloads
new_args = [arg for arg in args if arg not in list_of_lfns] # command arguments without the files
args = list(new_args)
cmd = " ".join(args)
files = " ".join(tmp_list)
if tmp_list and all(os.path.isfile(tmp) for tmp in tmp_list):
return runShellCMD(f'{cmd} {files}', capture_out, do_shell = True)
return RET(1, '', f'There was an error downloading the following files:\n{chr(10).join(tmp_list)}') | 5,327,363 |
def get_keychain_pass(account=None, server=None):
"""
Gets the password for a given account from Apple's keychain
"""
params = {
'user': os.environ['USER'],
'security': '/usr/bin/security',
'command': 'find-internet-password',
'account': account,
'server': server,
'keychain': os.environ['HOME'] + '/Library/Keychains/login.keychain'
}
command = "sudo -u %(user)s %(security)s -v %(command)s -g -a %(account)s -s %(server)s %(keychain)s" % params
output = subprocess.check_output(command, shell=True, stderr=subprocess.STDOUT)
outtext = [line for line in output.splitlines() if line.startswith('password: ')][0]
return re.match(r'password: "(.*)"', outtext).group(1) | 5,327,364 |
def make_train_valid(labels_dir:Path, train:float=.8, valid:float=.2, test:float=0):
"""
usage: make_train_valid(labels_dir:Path, train:float=.8, valid:float=.2, test:float=0)
Make a train-valid directory and randomly copy files from labels_dir to sub-
directories
positional arguments:
labels_dir Contains at least two directories of labels, each containing
files of that label
optional arguments:
train=.8 files for training, default=.8
valid=.2 files for validation, default=.2
test= 0 files for training, default=.0
"""
assert sum([train, valid, test]) == 1
assert (Path(labels_dir).is_dir())
labels_path = Path(labels_dir)
runs = {'train':train, 'valid':valid, 'test':test}
for run in runs.keys():
shutil.rmtree((labels_path / run), ignore_errors=True)
labels = [d.name for d in labels_path.iterdir() if d.is_dir()]
for label in labels:
files = list((labels_path / label).iterdir())
num_files = len(files)
for run in runs.keys():
os.makedirs(labels_path / run / label)
take = round(num_files * runs[run])
random.shuffle(files)
for f in files[:take]:
shutil.copy(f, (labels_path / run / label / f.name))
#print(f, (labels_path / run / label / f.name))
files = files[take:] | 5,327,365 |
def cast_tensor_by_spec(_input, spec):
"""
transform dtype & shape following spec
"""
try:
import tensorflow as tf
except ImportError:
raise MissingDependencyException(
"Tensorflow package is required to use TfSavedModelArtifact"
)
if not _isinstance_wrapper(spec, "TensorSpec"):
return _input
if _isinstance_wrapper(_input, ["Tensor", "EagerTensor"]):
# TensorFlow issue #43038
# pylint: disable=unexpected-keyword-arg, no-value-for-parameter
return tf.cast(_input, dtype=spec.dtype, name=spec.name)
else:
return tf.constant(_input, dtype=spec.dtype, name=spec.name) | 5,327,366 |
def StopRequestHook(ref, args, request):
"""Declarative request hook for TPU Stop command."""
del ref
del args
stop_request = GetMessagesModule().StopNodeRequest()
request.stopNodeRequest = stop_request
return request | 5,327,367 |
def get_bucket(
storage_bucket_name: str,
**kwargs,
) -> Bucket:
"""Get a storage bucket."""
client = get_client()
return client.get_bucket(storage_bucket_name, **kwargs) | 5,327,368 |
def furl_for(endpoint: str, filename: str=None, **kwargs: dict) -> str:
""" Replacement for url_for. """
return URL() + (url_for(endpoint, filename=filename) if filename != None else ("/" if endpoint == "" else url_for(endpoint, **kwargs))) | 5,327,369 |
def image_max_value(img, region=None, scale=None):
"""Retrieves the maximum value of an image.
Args:
img (object): The image to calculate the maximum value.
region (object, optional): The region over which to reduce data. Defaults to the footprint of the image's first band.
scale (float, optional): A nominal scale in meters of the projection to work in. Defaults to None.
Returns:
object: ee.Number
"""
if region is None:
region = img.geometry()
if scale is None:
scale = image_scale(img)
max_value = img.reduceRegion(**{
'reducer': ee.Reducer.max(),
'geometry': region,
'scale': scale,
'maxPixels': 1e12
})
return max_value | 5,327,370 |
def getblock(lst, limit):
"""Return first limit entries from list lst and remove them from the list"""
r = lst[-limit:]
del lst[-limit:]
return r | 5,327,371 |
def test_r53_policy_expected_aws_records(zone, boto_client):
"""
Tests a Policy builds the expected desired_records for the alias tree.
"""
policy = G(m.Policy, name='pol1')
policy_record = G(m.PolicyRecord, zone=zone, name='www', policy=policy)
ip1 = create_ip_with_healthcheck()
G(m.PolicyMember, policy=policy_record.policy, region=regions[0], ip=ip1)
G(m.PolicyMember, policy=policy_record.policy, region=regions[1], ip=ip1)
# pol_factory = route53.CachingFactory(route53.Policy)
r53_policy = route53.Policy(zone=zone.r53_zone, policy=policy)
assert [(r.name, r.values) for r in r53_policy.desired_records.values()] == [
('_zn_pol1_us-east-1', [ip1.ip]),
('_zn_pol1_us-east-2', [ip1.ip]),
('_zn_pol1', ['ALIAS _zn_pol1_us-east-1.test-zinc.net.']),
('_zn_pol1', ['ALIAS _zn_pol1_us-east-2.test-zinc.net.']),
] | 5,327,372 |
def run_gateway(gateway):
"""Run a sync gateway."""
gateway.start_persistence()
gateway.start()
try:
while True:
time.sleep(0.5)
except KeyboardInterrupt:
gateway.stop() | 5,327,373 |
def electrode_neighborhoods(mea='hidens', neighborhood_radius=HIDENS_NEIGHBORHOOD_RADIUS, x=None, y=None):
"""
Calculate neighbor matrix from distances between electrodes.
:param mea: (optional) type of the micro electrode array, default: 'hidens'
:param neighborhood_radius:(optional) depends on mea type
:param x, y: (optional) electrode coordinates
:return: neighbors: square matrix
"""
distances = electrode_distances(mea, x, y)
neighbors = distances < neighborhood_radius
return neighbors | 5,327,374 |
def lat_from_meta(meta):
"""
Obtains a latitude coordinates array from rasterio metadata.
:param meta: dict rasterio metadata.
:return: numpy array
"""
try:
t, h = meta["transform"], meta["height"]
except KeyError as e:
raise e
lat = np.arange(t[5], t[5] + (t[4] * h), t[4])
# in rare cases coords may be too short or too long (e.g. due to rounding)
lat = shorten_coords_array(lat, t[5], t[4], h) # try several times to be sure
lat = enlarge_coords_array(lat, t[5], t[4], h)
lat = shorten_coords_array(lat, t[5], t[4], h)
lat = enlarge_coords_array(lat, t[5], t[4], h)
lat = shorten_coords_array(lat, t[5], t[4], h)
lat = enlarge_coords_array(lat, t[5], t[4], h)
return lat | 5,327,375 |
def get_test_class(dbcase):
"""Return the implementation class of a TestCase, or None if not found.
"""
if dbcase.automated and dbcase.valid:
impl = dbcase.testimplementation
if impl:
obj = module.get_object(impl)
if type(obj) is type and issubclass(obj, core.Test):
return obj
else:
raise InvalidTestError("%r is not a Test class object." % (obj,))
else:
return None
else:
return None | 5,327,376 |
def calc_dp(t_c, rh):
"""Calculate the dew point in Celsius.
Arguments:
t_c - the temperature in °C.
rh - the relative humidity as a percent, (0-100)
Returns:
The dew point in °C.
"""
sat_vp = vapor_pressure_liquid_water(t_c)
vp = sat_vp * rh / 100.0
a = log(vp / 6.1037) / 17.641
return a * 243.27 / (1.0 - a) | 5,327,377 |
def getSlackMetar(input: flask.Request) -> Tuple[str, int, dict]:
"""
Endpoint handler for the slack_metar HTTP function trigger
"""
global _baseUrl
global _logger
_logger.debug("Entered metar")
station = _getStationName(input)
txt = _requestData('METAR', station)
return (_buildSlackResponse(txt),
200,
{'Content-type': 'application/json'}) | 5,327,378 |
def conv_block(input_tensor, kernel_size, filters, stage, block, strides=(2, 2)):
"""conv_block is the block that has a conv layer at shortcut
# Arguments
input_tensor: input tensor
kernel_size: defualt 3, the kernel size of middle conv layer at main path
filters: list of integers, the filterss of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
# Returns
Output tensor for the block.
Note that from stage 3, the first conv layer at main path is with strides=(2,2)
And the shortcut should have strides=(2,2) as well
"""
filters1, filters2, filters3 = filters
if IMAGE_ORDERING == 'channels_last':
bn_axis = 3
else:
bn_axis = 1
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = Conv2D(filters1, (1, 1) , data_format=IMAGE_ORDERING , strides=strides,
name=conv_name_base + '2a')(input_tensor)
x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x)
x = Activation('relu')(x)
x = Conv2D(filters2, kernel_size , data_format=IMAGE_ORDERING , padding='same',
name=conv_name_base + '2b')(x)
x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x)
x = Activation('relu')(x)
x = Conv2D(filters3, (1, 1) , data_format=IMAGE_ORDERING , name=conv_name_base + '2c')(x)
x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x)
shortcut = Conv2D(filters3, (1, 1) , data_format=IMAGE_ORDERING , strides=strides,
name=conv_name_base + '1')(input_tensor)
shortcut = BatchNormalization(axis=bn_axis, name=bn_name_base + '1')(shortcut)
x = layers.add([x, shortcut])
x = Activation('relu')(x)
return x | 5,327,379 |
def _fake_before_lines(first_line: str) -> List[str]:
"""Construct the fake lines that should go before the text."""
fake_lines = []
indent_levels = _indent_levels(first_line)
# Handle regular indent
for i in range(indent_levels):
prefix = SINGLE_INDENT * i
fake_lines.append(f"{prefix}if True:\n")
# Handle else/elif/except/finally
try:
first_token: Optional[tokenize.TokenInfo] = next(
tokenize.generate_tokens(iter([first_line.lstrip()]).__next__)
)
except tokenize.TokenError:
first_token = None
if first_token and first_token.type == tokenize.NAME:
name = first_token.string
prefix = SINGLE_INDENT * indent_levels
if name in {"else", "elif"}:
fake_lines.append(f"{prefix}if True:\n")
fake_lines.append(f"{prefix}{SINGLE_INDENT}pass\n")
elif name in {"except", "finally"}:
fake_lines.append(f"{prefix}try:\n")
fake_lines.append(f"{prefix}{SINGLE_INDENT}pass\n")
return fake_lines | 5,327,380 |
def normal_logpdf(x, mu, cov):
"""
Multivariate normal logpdf, numpy native implementation
:param x:
:param mu:
:param cov:
:return:
"""
part1 = 1 / (((2 * np.pi) ** (len(mu) / 2)) * (np.linalg.det(cov) ** (1 / 2)))
part2 = (-1 / 2) * ((x - mu).T.dot(np.linalg.inv(cov))).dot((x - mu))
return float(np.log(part1) + part2) | 5,327,381 |
def fork_workflow_step_context(
workflow_id: Optional[str] = _sentinel,
storage_url: Optional[str] = _sentinel,
workflow_scope: Optional[List[str]] = _sentinel,
outer_most_step_id: Optional[str] = _sentinel,
last_step_of_workflow: Optional[bool] = _sentinel,
checkpoint_context: CheckpointContext = _sentinel,
):
"""Fork the workflow step context.
Inherits the original value if no value is provided.
Args:
workflow_id: The ID of the workflow.
storage_url: The storage the workflow is using.
"""
global _context
original_context = _context
assert workflow_id is not None
try:
_context = WorkflowStepContext(
workflow_id=original_context.workflow_id
if workflow_id is _sentinel
else workflow_id,
storage_url=original_context.storage_url
if storage_url is _sentinel
else storage_url,
workflow_scope=original_context.workflow_scope
if workflow_scope is _sentinel
else workflow_scope,
outer_most_step_id=original_context.outer_most_step_id
if outer_most_step_id is _sentinel
else outer_most_step_id,
last_step_of_workflow=original_context.last_step_of_workflow
if last_step_of_workflow is _sentinel
else last_step_of_workflow,
checkpoint_context=original_context.checkpoint_context
if checkpoint_context is _sentinel
else checkpoint_context,
)
yield
finally:
_context = original_context | 5,327,382 |
def criteriarr(criteria):
"""Validate if the iterable only contains MIN (or any alias) and MAX
(or any alias) values. And also always returns an ndarray representation
of the iterable.
Parameters
----------
criteria : Array-like
Iterable containing all the values to be validated by the function.
Returns
-------
numpy.ndarray :
Criteria array.
Raises
------
DataValidationError :
if some value of the criteria array are not MIN (-1) or MAX (1)
"""
pcriteria = np.array([ALIASES.get(c) for c in criteria])
if None in pcriteria:
msg = (
"Criteria Array only accept minimize or maximize Values. Found {}")
raise DataValidationError(msg.format(criteria))
return pcriteria | 5,327,383 |
def show_command(
*, login_manager: LoginManager, index_id: uuid.UUID, subject: str
) -> None:
"""Show the data for a given subject in an index
This is subject the visible_to access control list on the entries for that subject.
If there are one or more entries visible to the current user, they will be
displayed.
If there are no entries visible to the current user, a NotFound error will be
raised.
"""
search_client = login_manager.get_search_client()
res = search_client.get_subject(index_id, subject)
formatted_print(res, text_format=_print_subject) | 5,327,384 |
def integer_or_rational(entropy, signed, min_abs=0):
"""Returns a rational, with 50% probability of it being an integer."""
if random.choice([False, True]):
return integer(entropy, signed, min_abs=min_abs)
else:
return non_integer_rational(entropy, signed) | 5,327,385 |
def ignore_python_warnings(function):
"""
Decorator for ignoring *Python* warnings.
Parameters
----------
function : object
Function to decorate.
Returns
-------
object
Examples
--------
>>> @ignore_python_warnings
... def f():
... warnings.warn('This is an ignored warning!')
>>> f()
"""
@functools.wraps(function)
def wrapped(*args, **kwargs):
"""
Wrapped function.
"""
with warnings.catch_warnings():
warnings.simplefilter('ignore')
return function(*args, **kwargs)
return wrapped | 5,327,386 |
def runMetrics(
initWorkingSetName,
stepName,
requestInfo,
jobId,
outputFolder,
referenceFolder,
referencePrefix,
dtmFile,
dsmFile,
clsFile,
mtlFile,
):
"""
Run a Girder Worker job to compute metrics on output files.
Requirements:
- Danesfield Docker image is available on host
:param initWorkingSetName: The name of the top-level working set.
:type initWorkingSetName: str
:param stepName: The name of the step.
:type stepName: str (DanesfieldStep)
:param requestInfo: HTTP request and authorization info.
:type requestInfo: RequestInfo
:param jobId: Job ID.
:type jobId: str
:param outputFolder: Output folder document.
:type outputFolder: dict
:param referenceFolder: Reference directory.
:type referenceFolder: dict
:param referencePrefix: Reference file prefix.
:type referencePrefix: str
:param dtmFile: DTM file document.
:type dtmFile: dict
:param dsmFile: DSM file document.
:type dsmFile: dict
:param clsFile: CLS file document.
:type clsFile: dict
:param mtlFile: MTL file document.
:type mtlFile: dict
:returns: Job document.
"""
gc = createGirderClient(requestInfo)
if referencePrefix == "STANDARD":
# We know that there's no reference data with this selection
containerArgs = ["echo", "No ground truth selected for scoring"]
asyncResult = docker_run.delay(
**createDockerRunArguments(
image=DockerImage.DANESFIELD,
containerArgs=containerArgs,
jobTitle="[%s] Run metrics" % initWorkingSetName,
jobType=stepName,
user=requestInfo.user,
)
)
else:
# Otherwise we assume the reference data exists, and try to
# run the metrics
outputVolumePath = VolumePath("__output__")
# Docker container arguments
containerArgs = [
"danesfield/tools/run_metrics.py",
"--output-dir",
outputVolumePath,
"--ref-dir",
GirderFolderIdToVolume(referenceFolder["_id"], gc=gc),
"--ref-prefix",
referencePrefix,
"--dsm",
GirderFileIdToVolume(dsmFile["_id"], gc=gc),
"--cls",
GirderFileIdToVolume(clsFile["_id"], gc=gc),
"--mtl",
GirderFileIdToVolume(mtlFile["_id"], gc=gc),
"--dtm",
GirderFileIdToVolume(dtmFile["_id"], gc=gc),
]
# Result hooks
# - Upload output files to output folder
# - Provide upload metadata
upload_kwargs = createUploadMetadata(jobId, stepName)
resultHooks = [
GirderUploadVolumePathToFolder(
outputVolumePath,
outputFolder["_id"],
upload_kwargs=upload_kwargs,
gc=gc,
)
]
asyncResult = docker_run.delay(
**createDockerRunArguments(
image=DockerImage.DANESFIELD,
containerArgs=containerArgs,
jobTitle="[%s] Run metrics" % initWorkingSetName,
jobType=stepName,
user=requestInfo.user,
resultHooks=resultHooks,
)
)
# Add info for job event listeners
job = asyncResult.job
job = addJobInfo(job, jobId=jobId, stepName=stepName)
return job | 5,327,387 |
def configure_logging(args):
"""Configures the default logger. This can be called only once and has to
be called before any logging is done.
"""
logging.TIMING = logging.ERROR + 5
logging.addLevelName(logging.TIMING, 'TIMING')
def timing(msg, *args, **kwargs):
logging.log(logging.TIMING, msg, *args, **kwargs)
logging.timing = timing
if args.debug:
logging.basicConfig(level=logging.DEBUG, format=DEBUG_FORMAT)
else:
logging.basicConfig(level=logging.INFO,
format=FORMAT,
filename=os.path.join(args.infer_out,
config.LOG_FILE),
filemode='w') | 5,327,388 |
def terminal_bg():
"""Returns the first argument if the terminal has a light background, the second if it has a
dark background, and the third if it cannot determine."""
colorfgbg = os.environ.get('COLORFGBG', '')
if colorfgbg == '0;15':
return BGColor.LIGHT
if colorfgbg == '15;0':
return BGColor.DARK
return BGColor.UNKNOWN | 5,327,389 |
def mkdir_p(path):
"""
Equivalent of mkdir -p on the commandline; wrapper around os.makedirs
@param path The path to create
"""
try:
os.makedirs(path)
except:
pass | 5,327,390 |
def matplotlib_kwarg_dealiaser(args, kind):
"""De-aliase the kwargs passed to plots."""
if args is None:
return {}
matplotlib_kwarg_dealiaser_dict = {
"scatter": mpl.collections.PathCollection,
"plot": mpl.lines.Line2D,
"hist": mpl.patches.Patch,
"bar": mpl.patches.Rectangle,
"hexbin": mpl.collections.PolyCollection,
"fill_between": mpl.collections.PolyCollection,
"hlines": mpl.collections.LineCollection,
"text": mpl.text.Text,
"contour": mpl.contour.ContourSet,
"pcolormesh": mpl.collections.QuadMesh,
}
return normalize_kwargs(args, getattr(matplotlib_kwarg_dealiaser_dict[kind], "_alias_map", {})) | 5,327,391 |
def volume():
"""
Get volume number
:return:
"""
return Scheduler.ret_volume | 5,327,392 |
def grid_grad(input, grid, interpolation='linear', bound='zero',
extrapolate=False):
"""Sample spatial gradients of an image with respect to a deformation field.
Notes
-----
{interpolation}
{bound}
Parameters
----------
input : ([batch], [channel], *inshape) tensor
Input image.
grid : ([batch], *inshape, dim) tensor
Transformation field.
shape : sequence[int], default=inshape
Output shape
interpolation : int or sequence[int], default=1
Interpolation order.
bound : BoundType, or sequence[BoundType], default='zero'
Boundary conditions.
extrapolate : bool or int, default=True
Extrapolate out-of-bound data.
Returns
-------
output : ([batch], [channel], *shape, dim) tensor
Sampled gradients.
"""
# Broadcast
dim = grid.shape[-1]
input_no_batch = input.dim() == dim + 1
input_no_channel = input.dim() == dim
grid_no_batch = grid.dim() == dim + 1
if input_no_channel:
input = input[None, None]
elif input_no_batch:
input = input[None]
if grid_no_batch:
grid = grid[None]
batch = max(input.shape[0], grid.shape[0])
input = expand(input, [batch, *input.shape[1:]])
grid = expand(grid, [batch, *grid.shape[1:]])
out = GridGrad.apply(input, grid, interpolation, bound, extrapolate)
if input_no_channel:
out = out[:, 0]
if input_no_batch and grid_no_batch:
out = out[0]
return out | 5,327,393 |
def delete_cart_item(quote_id, item_code):
"""Delete given item_codes from Quote if all deleted then delete Quote"""
try:
response = frappe._dict()
item_code = item_code.encode('utf-8')
item_list= [ i.strip() for i in item_code.split(",")]
if not isinstance(item_code, list):
item_code = [item_code]
if not frappe.db.exists("Quotation", quote_id):
response["message"] = "Quotation not found"
frappe.local.response['http_status_code'] = 404
else:
quote = frappe.get_doc("Quotation", quote_id)
new_items = []
for idx, row in enumerate(quote.get("items")):
if not row.item_code in item_list:
new_items.append(row)
quote.items = new_items
quote.flags.ignore_mandatory = True
quote.save()
if not len(quote.get("items", [])):
frappe.delete_doc("Quotation", quote_id)
response["message"] = "Deleted all items"
frappe.local.response["http_status_code"] = 200
else:
response = get_cart_details(quote_id)
frappe.db.commit()
except Exception as e:
http_status_code = getattr(e, "http_status_code", 500)
frappe.local.response['http_status_code'] = http_status_code
response["message"] = "Unable to Delete Quote Item"
frappe.log_error(message=frappe.get_traceback() , title="Website API: delete_cart_item")
finally:
return response | 5,327,394 |
def list_methods(f):
"""Return a list of the multimethods currently registered to `f`.
The multimethods are returned in the order they would be tested by the dispatcher
when the generic function is called.
The return value is a list, where each item is `(callable, type_signature)`.
Each type signature is in the format returned by `typing.get_type_hints`.
`f`: a callable that has been declared `@generic` or `@typed`.
**Interaction with OOP**:
Bound methods are resolved to the underlying function automatically.
The `self`/`cls` argument is extracted from the `__self__` attribute of
the bound method, enabling linked dispatcher lookups in the MRO.
**CAUTION**:
Recall that in Python, instance methods when accessed through the *class*
are just raw functions; the method becomes bound, and thus `self` is set,
when accessed through *an instance* of that class.
Let `Cat` be a class with an OOP instance method `meow`, and `cat` an
instance of that class. If you call `list_methods(cat.meow)`, you get the
MRO lookup for linked dispatchers, as expected.
But if you call `list_methods(Cat.meow)` instead, it won't see the MRO,
because the value of the `self` argument isn't set for an unbound method
(which is really just a raw function).
If `Cat` has a `@classmethod` `iscute`, calling `list_methods(Cat.iscute)`
performs the MRO lookup for linked dispatchers. This is because a class
method is already bound (to the class, so the `cls` argument already has
a value) when it is accessed through the class.
Finally, note that while that is how `list_methods` works, it is not the
mechanism actually used to determine `self`/`cls` when *calling* the
generic function. There, the value of `self`/`cls` is extracted from the
first positional argument of the call. This is because the dispatcher is
actually installed on the underlying raw function, so it has no access to
the metadata of the bound method (which, as seen from the dispatcher, is
on the outside).
"""
function, _ = getfunc(f)
if not isgeneric(function):
raise TypeError(f"{_function_fullname(function)} is not a generic function, it does not have multimethods.")
# In case of a bound method (either `Foo.classmeth` or `foo.instmeth`),
# we can get the value for `self`/`cls` argument from its `__self__` attribute.
#
# Otherwise we have a regular function, an unbound method, or a `@staticmethod`;
# in those cases, there's no `self`/`cls`. (Technically, an unbound method has
# a parameter to receive it, but no value has been set yet.)
self_or_cls = f.__self__ if hasattr(f, "__self__") else None
return _list_multimethods(function, self_or_cls) | 5,327,395 |
def load_CIFAR10(file_dir):
""" load all of cifar """
xs = []
ys = []
for filename in train_list:
file_path = os.path.join(file_dir, filename)
data, labels = load_CIFAR_batch(file_path)
xs.append(data)
ys.append(labels)
x_train = np.concatenate(xs)
y_train = np.concatenate(ys)
x_test, y_test = load_CIFAR_batch(os.path.join(file_dir, test_batch))
return x_train, y_train, x_test, y_test | 5,327,396 |
def get_requirements(extra=None):
"""
Load the requirements for the given extra from the appropriate
requirements-extra.txt, or the main requirements.txt if no extra is
specified.
"""
filename = f"requirements-{extra}.txt" if extra else "requirements.txt"
with open(filename) as fp:
# Parse out as one per line
return [l.strip() for l in fp.readlines() if l.strip()] | 5,327,397 |
def add_account_details(config):
"""add account details to config"""
# Get api client
client = get_api_client()
# Add accounts details
payload = {"length": 250, "filter": "state==VERIFIED;type!=nutanix"}
res, err = client.account.list(payload)
if err:
raise Exception("[{}] - {}".format(err["code"], err["error"]))
response = res.json()
a_entities = response.get("entities", None)
accounts = {}
for a_entity in a_entities:
account_type = a_entity["status"]["resources"]["type"].upper()
account_name = a_entity["status"]["name"]
if account_type not in accounts:
accounts[account_type] = []
account_data = {
"NAME": account_name,
"UUID": a_entity["metadata"]["uuid"],
}
if account_type == "NUTANIX_PC":
if not (
account_name == "NTNX_LOCAL_AZ"
or account_name.startswith("multipc_account")
):
continue
account_data["SUBNETS"] = []
Obj = get_resource_api("nutanix/v1/subnets", client.connection)
payload = {"filter": "account_uuid=={}".format(account_data["UUID"])}
result, er = Obj.list(payload)
if er:
pass
else:
result = result.json()
for entity in result["entities"]:
cluster_ref = entity["status"].get("cluster_reference", {})
if not cluster_ref:
continue
cluster_name = cluster_ref.get("name", "")
account_data["SUBNETS"].append(
{
"NAME": entity["status"]["name"],
"CLUSTER": cluster_name,
"UUID": entity["metadata"]["uuid"],
}
)
# If it is local nutanix account, assign it to local nutanix ACCOUNT
if a_entity["status"]["resources"]["data"].get("host_pc", False):
accounts["NTNX_LOCAL_AZ"] = account_data
accounts[account_type].append(account_data)
# fill accounts data
config["ACCOUNTS"] = accounts | 5,327,398 |
def count_image_files(directory, montage_mode=False):
"""Counts all image files inside the directory.
If montage_mode, counts 1 level deep and returns the minimum count.
Else, counts all child images of directory.
Args:
directory (str): directory to look for child image files
montage_mode (bool): whether ot not to look in subdirs of directory
Returns:
int: the number of image files in the directory
"""
def count_images(d):
valid_extensions = {'.tiff', '.tif', '.png', '.jpg', '.jpeg', '.bmp'}
count = 0
for f in os.listdir(directory):
_, ext = os.path.splitext(f.lower())
if ext in valid_extensions:
count += 1
return count
if not montage_mode:
return count_images(directory)
return min([count_images(d) for d in get_immediate_subdirs(directory)]) | 5,327,399 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.