content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def _expand_and_tile(tensor, multiple, dim=0, name=None):
"""Slice `tensor` shape in 2, then tile along the sliced dimension.
A new dimension is inserted in shape of `tensor` before `dim`, then values are
tiled `multiple` times along the new dimension.
Args:
tensor: Input `Tensor` or `SparseTensor`.
multiple: Integer, number of times to tile.
dim: Integer, dimension along which to tile.
name: Name of operation.
Returns:
`Tensor` result of expanding and tiling `tensor`.
Raises:
ValueError: if `multiple` is less than 1, or `dim` is not in
`[-rank(tensor), rank(tensor)]`.
"""
if multiple < 1:
raise ValueError(f'Invalid argument multiple={multiple} for '
'expand_and_tile call. `multiple` must be an integer > 0')
with ops.name_scope(name, 'expand_and_tile',
(tensor, multiple, dim)) as scope:
# Sparse.
tensor = sparse_tensor.convert_to_tensor_or_sparse_tensor(tensor)
if isinstance(tensor, sparse_tensor.SparseTensor):
if dim < 0:
expand_dims = array_ops.reshape(
array_ops.size(tensor.dense_shape) + dim, [1])
else:
expand_dims = [dim]
expanded_shape = array_ops.concat(
(array_ops.slice(tensor.dense_shape, [0], expand_dims), [1],
array_ops.slice(tensor.dense_shape, expand_dims, [-1])),
0,
name='expanded_shape')
expanded = sparse_ops.sparse_reshape(
tensor, shape=expanded_shape, name='expand')
if multiple == 1:
return expanded
return sparse_ops.sparse_concat(
dim - 1 if dim < 0 else dim, [expanded] * multiple, name=scope)
# Dense.
expanded = array_ops.expand_dims(
tensor, dim if (dim >= 0) else (dim - 1), name='expand')
if multiple == 1:
return expanded
ones = array_ops.ones_like(array_ops.shape(tensor))
tile_multiples = array_ops.concat(
(ones[:dim], (multiple,), ones[dim:]), 0, name='multiples')
return array_ops.tile(expanded, tile_multiples, name=scope)
| 11,600
|
def eval_model(opt, print_parser=None):
"""Evaluates a model.
:param opt: tells the evaluation function how to run
:param bool print_parser: if provided, prints the options that are set within the
model after loading the model
:return: the final result of calling report()
"""
random.seed(42)
# load model and possibly print opt
agent = create_agent(opt, requireModelExists=True)
if print_parser:
# show args after loading model
print_parser.opt = agent.opt
print_parser.print_args()
tasks = opt['task'].split(',')
reports = []
for task in tasks:
task_report = _eval_single_world(opt, agent, task)
reports.append(task_report)
report = aggregate_task_reports(
reports, tasks, micro=opt.get('aggregate_micro', True)
)
# print announcements and report
print_announcements(opt)
print(
'[ Finished evaluating tasks {} using datatype {} ]'.format(
tasks, opt.get('datatype', 'N/A')
)
)
print(report)
return report
| 11,601
|
def login(request):
"""
Login with Dummy Test Account.
"""
if 'user' in request.environ['beaker.session']:
return app.redirect('index')
users.store_to_session(request, users.create())
return app.redirect('index')
| 11,602
|
def safe_plus(x,y):
"""
Handle "x + y" where x and y could be some combination of ints and strs.
"""
# Handle Excel Cell objects. Grrr.
if excel.is_cell_dict(x):
x = x["value"]
if excel.is_cell_dict(y):
y = y["value"]
# Handle NULLs.
if (x == "NULL"):
x = 0
if (y == "NULL"):
y = 0
# Easy case first.
if ((isinstance(x, int) or isinstance(x, float)) and
(isinstance(y, int) or isinstance(y, float))):
return x + y
# Fix data types.
if (isinstance(y, str)):
# NULL string in VB.
if (x == 0):
x = ""
# String concat.
return str(x) + y
if (isinstance(x, str)):
# NULL string in VB.
if (y == 0):
y = ""
# String concat.
return x + str(y)
# Punt. We are not doing pure numeric addition and
# we have already handled string concatentaion. Just
# convert things to strings and hope for the best.
return str(x) + str(y)
| 11,603
|
def full_like(a, fill_value, dtype=types.float32, split=None, device=None, comm=None, order="C"):
"""
Return a full array with the same shape and type as a given array.
Parameters
----------
a : object
The shape and data-type of 'a' define these same attributes of the returned array.
fill_value : scalar
Fill value.
dtype : ht.dtype, optional
Overrides the data type of the result.
split: int, optional
The axis along which the array is split and distributed, defaults to None (no distribution).
device : str, ht.Device or None, optional
Specifies the device the tensor shall be allocated on, defaults to None (i.e. globally set default device).
comm: Communication, optional
Handle to the nodes holding distributed parts or copies of this tensor.
Returns
-------
out : ht.DNDarray
Array of fill_value with the same shape and type as a.
Examples
--------
>>> x = ht.zeros((2, 3,))
>>> x
tensor([[0., 0., 0.],
[0., 0., 0.]])
>>> ht.full_like(a, 1.0)
tensor([[1., 1., 1.],
[1., 1., 1.]])
"""
return __factory_like(a, dtype, split, full, device, comm, fill_value=fill_value, order=order)
| 11,604
|
def test_autoimport_list_empty(mocker, credentials):
"""Test user has no jobs."""
runner = CliRunner()
mocked_list_s3_autoimport_jobs = mocker.patch.object(
APIClient,
"list_s3_autoimport_jobs",
return_value=S3ProjectImport(results=[], meta=dict(next=None)),
)
res = runner.invoke(autoimport_list, credentials)
assert res.exit_code == 0
mocked_list_s3_autoimport_jobs.assert_called_once()
assert "" in res.output
| 11,605
|
def safedelete(file_path):
"""os.remove wrapped in minimal exception handling."""
try:
os.remove(file_path)
except Exception as ex:
print('Exception: ' + str(sys.exc_info()[0]))
print(ex)
| 11,606
|
def gather_pulled_downloads(input_dir, output_dir):
"""
Gather MPEG stream files from input_dir into a single MP4 file in output_dir
"""
dash_globstr = f"{input_dir.absolute() / '*.dash'}"
dash_glob = glob(dash_globstr)
if len(dash_glob) < 1:
raise ValueError(f"No dash file found in {input_dir}")
elif len(dash_glob) > 1:
raise ValueError(f"Multiple dash files found in {input_dir}")
else:
dash_file = dash_glob[0]
m4s_globstr = f"{input_dir.absolute() / '*.m4s'}"
m4s_files = sorted(glob(m4s_globstr))
output_mp4 = output_dir.absolute() / "output.mp4"
gather_m4s_to_mp4(dash_file, m4s_files, output_mp4)
return output_mp4
| 11,607
|
def Parallelize(ListIn, f, procs = -1, **kwargs):
"""This function packages the "starmap" function in multiprocessing, to allow multiple iterable inputs for the parallelized function.
Parameters
----------
ListIn: list
each item in the list is a tuple of non-keyworded arguments for f.
f : func
function to be parallelized. Signature must not contain any other non-keyworded arguments other than those passed as iterables.
Example:
.. highlight:: python
.. code-block:: python
def multiply(x, y, factor = 1.0):
return factor*x*y
X = np.linspace(0,1,1000)
Y = np.linspace(1,2,1000)
XY = [ (x, Y[i]) for i, x in enumerate(X)] # List of tuples
Z = Parallelize_MultiIn(XY, multiply, factor = 3.0, procs = 8)
Create as many positional arguments as required, but all must be packed into a list of tuples.
"""
if type(ListIn[0]) != tuple:
ListIn = [(ListIn[i],) for i in range(len(ListIn))]
reduced_argfunc = functools.partial(f, **kwargs)
if procs == -1:
opt_procs = int(np.interp(len(ListIn), [1,100,500,1000,3000,5000,10000] ,[1,2,4,8,12,36,48]))
procs = min(opt_procs, cpu_count())
if procs == 1:
OutList = [reduced_argfunc(*ListIn[iS]) for iS in range(len(ListIn))]
else:
p = Pool(processes = procs)
OutList = p.starmap(reduced_argfunc, ListIn)
p.close()
p.join()
return OutList
| 11,608
|
def s3_bucket_with_data(s3_client):
"""
Dummy s3 buckets with objects.
"""
bucket_name = 'test-orphan-project-objects'
bucket_tag = {
'Key': 'Project',
'Value': 'test-objects'
},
# Create bucket
s3_client.create_bucket(
Bucket=bucket_name,
CreateBucketConfiguration={
'LocationConstraint': 'eu-west-1'
}
)
# Add tags
s3_client.put_bucket_tagging(
Bucket=bucket_name,
Tagging={
'TagSet': bucket_tag
}
)
# Add objects to bucket
object_binary_data = b'The quick brown fox jumps over the lazy dog'
objects_keys = ['test/object-one', 'test/object-two']
for object_key in objects_keys:
s3_client.put_object(
Body=object_binary_data,
Bucket=bucket_name,
Key=object_key
)
yield bucket_name
| 11,609
|
def run_game(game):
"""Brain Games Engine.
Executing game process
Args:
game (function): create game question and answer
"""
print('Welcome to the Brain Games!\n{}\n'.format(game.GAME_DESCRIPTION))
user_name = prompt.string('May I have your name? ')
print('Hello, {}!\n'.format(user_name))
for step in range(NUMBER_OF_STEPS):
(game_question, game_answer) = game.get_challenge()
user_answer = prompt.string(
'Question: {}\nYour answer: '.format(game_question))
if user_answer != game_answer:
print("'{}' is wrong answer ;(. Correct answer was '{}'.".format(
user_answer, game_answer))
print("Let's try again, {}!".format(user_name))
return
print('Correct!')
print('Congratulations, {}'.format(user_name))
| 11,610
|
def to_terminal(series: Union[pd.Series, List[pd.Series]],
title: str = 'resource usage',
pu: str = 'cpu', labels: Optional[list] = None):
"""
Plot a datetime series (or a list of them) to terminal
Parameters
-------
series:
A datetime series or a list of series to be plot
title:
Title for the plot
pu:
Processing using (GPU or CPU) for y axis
labels:
If multiple series, the labels of each ome
"""
size = os.get_terminal_size()
colors = ['blue', 'yellow', 'magenta', 'cyan', 'white', 'green']
if not isinstance(series, list):
series = [series]
if labels is None:
labels = [None for _ in range(len(series))]
else:
if len(labels) != len(series):
raise Exception('Labels do not match inputs')
print(title.center(size.columns))
fig = plotille.Figure()
fig.color_mode = 'names'
fig.x_label = 'Dates'
fig.y_label = f'{pu} count'
for idx, s in enumerate(series):
s = s.map('{:.2f}'.format).astype(float)
x = s.index
y = s.values
q1 = s.quantile(0.1)
fig.set_x_limits(min_=min(x), max_=max(x))
fig.set_y_limits(min_=min(y) - q1, max_=max(y) + q1)
fig.plot(x, y, lc=colors[idx], label=labels[idx])
print(fig.show(legend=True))
| 11,611
|
def main():
"""
The main function of the puzzle
"""
print("The total number of trees on the first trip: %s" % count_trees(map_data))
print("The total number of trees on the second trip: %s" % multi_path(map_data))
| 11,612
|
def method_not_found(e):
""" Custom response for methods not allowed for the requested URLs :param e: Exception :return: """
return response('failed', 'The method is not allowed for the requested URL', 405)
| 11,613
|
def get_total_trainsets(df_anual_data, segments):
"""
# Fill the training_sets dict
:param df_anual_data:
:return:
"""
rows_per_day = int(((60 / 15) * 24))
training_sets = {'ID_SEGMENT': [], 'MES': [], 'COD_LABORALIDAD': [], 'TRAINING_SET': []}
for seg_id in segments: # 1) Particionar anual_data por segmento
df_seg = df_anual_data.loc[df_anual_data.ID_SEGMENT == seg_id]
for month_i in df_seg.FECHA.dt.month.unique(): # 2) Dividir mensual_data en 12 datasets
df_month_seg = df_seg.loc[df_seg.FECHA.dt.month == month_i]
for code_i in df_month_seg.COD_LABORALIDAD.unique(): # 3) Particionar por dias con mismo código de lab
df_month_seg_code = df_month_seg.loc[df_month_seg.COD_LABORALIDAD == code_i]
# Fill training_sets dictionary
training_sets['ID_SEGMENT'].append(seg_id)
training_sets['MES'].append(month_i)
training_sets['COD_LABORALIDAD'].append(code_i)
training_sets['TRAINING_SET'].append(df_month_seg_code)
return training_sets
| 11,614
|
def uni2diff(u):
"""Convert speed and angular rate to wheel speeds."""
v = u[0]
omega = u[1]
v_L = v - ELL / 2 * omega
v_R = v + ELL / 2 * omega
return np.array([v_L, v_R])
| 11,615
|
def extract_service_and_module(repo_url):
"""Extract service and module from repository url.
:param str repo_url: repository url
:return (service, module)
:rtype (str, str)
"""
m = re.match(r'.+[/@]([^\.]+\.[^\.]+)[:/]([^/]+/[^/]+)\.git/?$', repo_url)
if not m:
m = re.match(r'.+[/@]([^\.]+\.[^\.]+)[:/]([^/]+/[^/]+)/?$', repo_url)
if not m:
raise Exception(
'cannot detect service and module from {}'.format(repo_url))
service = m.group(1)
module = m.group(2)
if service not in _pull_request_url.keys():
raise Exception(
'service not supported: {}'.format(service))
return (service, module)
| 11,616
|
def triangular(left, mode, right, size=None):
"""Triangular distribution.
Draw samples from the triangular distribution over the interval
[left, right].
For full documentation refer to :obj:`numpy.random.triangular`.
Limitations
-----------
Parameter ``left``, ``mode`` and ``right`` are supported as scalar.
Otherwise, :obj:`numpy.random.triangular(left, mode, right, size)`
samples are drawn.
Output array data type is :obj:`dpnp.float64`.
Examples
--------
Draw samples from the distribution:
>>> df = 2.
>>> s = dpnp.random.triangular(-3, 0, 8, 1000000)
"""
if not use_origin_backend(left):
# TODO:
# array_like of floats for `left`, `mode`, `right`.
if not dpnp.isscalar(left):
pass
elif not dpnp.isscalar(mode):
pass
elif not dpnp.isscalar(right):
pass
elif left > mode:
pass
elif mode > right:
pass
elif left == right:
pass
else:
return dpnp_rng_triangular(left, mode, right, size).get_pyobj()
return call_origin(numpy.random.triangular, left, mode, right, size)
| 11,617
|
def format_hexa(value: str) -> ColorBytes:
"""
Examples:
"bda" => (187, 221, 170, 255)
"4fcd" => (68, 255, 204, 221)
"60B0C4" => (96, 176, 196, 255)
"2BEA40D0" => (43, 234, 64, 208)
"""
if len(value) in {3, 4}:
expanded_color = ''.join(s * 2 for s in value)
else:
expanded_color = value
length = len(expanded_color)
if length in {6, 8}:
hex_parts = [expanded_color[i:(i + 2)] for i in range(0, length, 2)]
return format_color_bytes([int(v, 16) for v in hex_parts])
else:
raise ValueError(value)
| 11,618
|
def normalize_community_features(features):
"""
This performs TF-IDF-like normalization of community embedding features.
Introduced in: Tang, L., Wang, X., Liu, H., & Wang, L. (2010, July).
A multi-resolution approach to learning with overlapping communities.
In Proceedings of the First Workshop on Social Media Analytics (pp. 14-22). ACM.
Input: - X in R^(nxC_n): The community indicator matrix.
Output: - X_norm in R^(nxC_n): The tf-idf + row normalized community indicator matrix.
"""
# Calculate inverse document frequency.
features = normalize_columns(features)
# Normalize each row of term frequencies to 1
features = normalize_rows(features)
return features
| 11,619
|
def __extend_prefixed(pu):
"""
:param pu:
:return:
"""
parts = pu.split(':')
if len(parts) == 1:
parts = ('', parts[0])
try:
return URIRef(_prefixes[parts[0]] + parts[1])
except KeyError:
return BNode(pu)
| 11,620
|
def update_plugins_json(plugin_name, plugin_version, assets):
"""
Update the plugins JSON in s3 with the new URLs for assets.
:param plugin_name: The plugin name
:param plugin_version: The plugin version.
:param assets: A list of local paths that will be changed to new URLs.
:return:
"""
logging.info(
'Updating {plugin_name} {plugin_version} in plugin JSON'.format(
plugin_name=plugin_name,
plugin_version=plugin_version))
# Convert the local paths to remote URLs in s3.
# For example /tmp/tmp000000/plugin.yaml
# will become: cloudify/wagons/{plugin_name}/{plugin_version}/plugin.yaml
assets = [ASSET_URL_TEMPLATE.format(
BUCKET_FOLDER,
plugin_name,
plugin_version,
os.path.basename(asset)) for asset in assets]
plugin_dict = get_plugin_new_json(
PLUGINS_JSON_PATH,
plugin_name,
plugin_version,
assets)
write_json_and_upload_to_s3(plugin_dict, PLUGINS_JSON_PATH, BUCKET_NAME)
| 11,621
|
async def test_browse_media(
hass,
hass_ws_client,
mock_plex_server,
requests_mock,
hubs,
hubs_music_library,
):
"""Test getting Plex clients from plex.tv."""
websocket_client = await hass_ws_client(hass)
media_players = hass.states.async_entity_ids("media_player")
msg_id = 1
# Browse base of Plex server
msg_id += 1
await websocket_client.send_json(
{
"id": msg_id,
"type": "media_player/browse_media",
"entity_id": media_players[0],
}
)
msg = await websocket_client.receive_json()
assert msg["id"] == msg_id
assert msg["type"] == TYPE_RESULT
assert msg["success"]
result = msg["result"]
assert result[ATTR_MEDIA_CONTENT_TYPE] == "server"
assert (
result[ATTR_MEDIA_CONTENT_ID]
== PLEX_URI_SCHEME + DEFAULT_DATA[CONF_SERVER_IDENTIFIER] + "/server"
)
# Library Sections + Recommended + Playlists
assert len(result["children"]) == len(mock_plex_server.library.sections()) + 2
music = next(iter(x for x in result["children"] if x["title"] == "Music"))
tvshows = next(iter(x for x in result["children"] if x["title"] == "TV Shows"))
playlists = next(iter(x for x in result["children"] if x["title"] == "Playlists"))
special_keys = ["Recommended"]
requests_mock.get(
f"{mock_plex_server.url_in_use}/hubs",
text=hubs,
)
# Browse into a special folder (server)
msg_id += 1
await websocket_client.send_json(
{
"id": msg_id,
"type": "media_player/browse_media",
"entity_id": media_players[0],
ATTR_MEDIA_CONTENT_TYPE: "server",
ATTR_MEDIA_CONTENT_ID: PLEX_URI_SCHEME
+ f"{DEFAULT_DATA[CONF_SERVER_IDENTIFIER]}/server/{special_keys[0]}",
}
)
msg = await websocket_client.receive_json()
assert msg["id"] == msg_id
assert msg["type"] == TYPE_RESULT
assert msg["success"]
result = msg["result"]
assert result[ATTR_MEDIA_CONTENT_TYPE] == "server"
assert (
result[ATTR_MEDIA_CONTENT_ID]
== PLEX_URI_SCHEME
+ f"{DEFAULT_DATA[CONF_SERVER_IDENTIFIER]}/server/{special_keys[0]}"
)
assert len(result["children"]) == 4 # Hardcoded in fixture
assert result["children"][0]["media_content_type"] == "hub"
assert result["children"][1]["media_content_type"] == "hub"
assert result["children"][2]["media_content_type"] == "hub"
assert result["children"][3]["media_content_type"] == "hub"
# Browse into a special folder (server): Continue Watching
msg_id += 1
await websocket_client.send_json(
{
"id": msg_id,
"type": "media_player/browse_media",
"entity_id": media_players[0],
ATTR_MEDIA_CONTENT_TYPE: result["children"][0][ATTR_MEDIA_CONTENT_TYPE],
ATTR_MEDIA_CONTENT_ID: result["children"][0][ATTR_MEDIA_CONTENT_ID],
}
)
msg = await websocket_client.receive_json()
assert msg["id"] == msg_id
assert msg["type"] == TYPE_RESULT
assert msg["success"]
result = msg["result"]
assert result[ATTR_MEDIA_CONTENT_TYPE] == "hub"
assert result["title"] == "Continue Watching"
assert result["children"][0]["media_content_id"].endswith("?resume=1")
requests_mock.get(
f"{mock_plex_server.url_in_use}/hubs/sections/3?includeStations=1",
text=hubs_music_library,
)
# Browse into a special folder (library)
msg_id += 1
library_section_id = 3
await websocket_client.send_json(
{
"id": msg_id,
"type": "media_player/browse_media",
"entity_id": media_players[0],
ATTR_MEDIA_CONTENT_TYPE: "library",
ATTR_MEDIA_CONTENT_ID: PLEX_URI_SCHEME
+ f"{DEFAULT_DATA[CONF_SERVER_IDENTIFIER]}/{library_section_id}/{special_keys[0]}",
}
)
msg = await websocket_client.receive_json()
assert msg["id"] == msg_id
assert msg["type"] == TYPE_RESULT
assert msg["success"]
result = msg["result"]
assert result[ATTR_MEDIA_CONTENT_TYPE] == "library"
assert (
result[ATTR_MEDIA_CONTENT_ID]
== PLEX_URI_SCHEME
+ f"{DEFAULT_DATA[CONF_SERVER_IDENTIFIER]}/{library_section_id}/{special_keys[0]}"
)
assert len(result["children"]) == 1
# Browse into a library radio station hub
msg_id += 1
await websocket_client.send_json(
{
"id": msg_id,
"type": "media_player/browse_media",
"entity_id": media_players[0],
ATTR_MEDIA_CONTENT_TYPE: result["children"][0][ATTR_MEDIA_CONTENT_TYPE],
ATTR_MEDIA_CONTENT_ID: result["children"][0][ATTR_MEDIA_CONTENT_ID],
}
)
msg = await websocket_client.receive_json()
assert msg["id"] == msg_id
assert msg["type"] == TYPE_RESULT
assert msg["success"]
result = msg["result"]
assert result[ATTR_MEDIA_CONTENT_TYPE] == "hub"
assert len(result["children"]) == 3
assert result["children"][0]["title"] == "Library Radio"
# Browse into a Plex TV show library
msg_id += 1
await websocket_client.send_json(
{
"id": msg_id,
"type": "media_player/browse_media",
"entity_id": media_players[0],
ATTR_MEDIA_CONTENT_TYPE: tvshows[ATTR_MEDIA_CONTENT_TYPE],
ATTR_MEDIA_CONTENT_ID: str(tvshows[ATTR_MEDIA_CONTENT_ID]),
}
)
msg = await websocket_client.receive_json()
assert msg["id"] == msg_id
assert msg["type"] == TYPE_RESULT
assert msg["success"]
result = msg["result"]
assert result[ATTR_MEDIA_CONTENT_TYPE] == "library"
result_id = int(URL(result[ATTR_MEDIA_CONTENT_ID]).name)
# All items in section + Hubs
assert (
len(result["children"])
== len(mock_plex_server.library.sectionByID(result_id).all()) + 1
)
# Browse into a Plex TV show
msg_id += 1
mock_show = MockPlexShow()
mock_season = next(iter(mock_show))
with patch.object(
mock_plex_server, "fetch_item", return_value=mock_show
) as mock_fetch:
await websocket_client.send_json(
{
"id": msg_id,
"type": "media_player/browse_media",
"entity_id": media_players[0],
ATTR_MEDIA_CONTENT_TYPE: result["children"][-1][
ATTR_MEDIA_CONTENT_TYPE
],
ATTR_MEDIA_CONTENT_ID: str(
result["children"][-1][ATTR_MEDIA_CONTENT_ID]
),
}
)
msg = await websocket_client.receive_json()
assert msg["id"] == msg_id
assert msg["type"] == TYPE_RESULT
assert msg["success"]
result = msg["result"]
assert result[ATTR_MEDIA_CONTENT_TYPE] == "show"
result_id = int(URL(result[ATTR_MEDIA_CONTENT_ID]).name)
assert result["title"] == mock_plex_server.fetch_item(result_id).title
assert result["children"][0]["title"] == f"{mock_season.title} ({mock_season.year})"
# Browse into a Plex TV show season
msg_id += 1
mock_episode = next(iter(mock_season))
with patch.object(
mock_plex_server, "fetch_item", return_value=mock_season
) as mock_fetch:
await websocket_client.send_json(
{
"id": msg_id,
"type": "media_player/browse_media",
"entity_id": media_players[0],
ATTR_MEDIA_CONTENT_TYPE: result["children"][0][ATTR_MEDIA_CONTENT_TYPE],
ATTR_MEDIA_CONTENT_ID: str(
result["children"][0][ATTR_MEDIA_CONTENT_ID]
),
}
)
msg = await websocket_client.receive_json()
assert mock_fetch.called
assert msg["id"] == msg_id
assert msg["type"] == TYPE_RESULT
assert msg["success"]
result = msg["result"]
assert result[ATTR_MEDIA_CONTENT_TYPE] == "season"
result_id = int(URL(result[ATTR_MEDIA_CONTENT_ID]).name)
assert (
result["title"]
== f"{mock_season.parentTitle} - {mock_season.title} ({mock_season.year})"
)
assert (
result["children"][0]["title"]
== f"{mock_episode.seasonEpisode.upper()} - {mock_episode.title}"
)
# Browse into a Plex music library
msg_id += 1
await websocket_client.send_json(
{
"id": msg_id,
"type": "media_player/browse_media",
"entity_id": media_players[0],
ATTR_MEDIA_CONTENT_TYPE: music[ATTR_MEDIA_CONTENT_TYPE],
ATTR_MEDIA_CONTENT_ID: str(music[ATTR_MEDIA_CONTENT_ID]),
}
)
msg = await websocket_client.receive_json()
assert msg["success"]
result = msg["result"]
result_id = int(URL(result[ATTR_MEDIA_CONTENT_ID]).name)
assert result[ATTR_MEDIA_CONTENT_TYPE] == "library"
assert result["title"] == "Music"
# Browse into a Plex artist
msg_id += 1
mock_artist = MockPlexArtist()
mock_album = next(iter(MockPlexArtist()))
mock_track = next(iter(MockPlexAlbum()))
with patch.object(
mock_plex_server, "fetch_item", return_value=mock_artist
) as mock_fetch:
await websocket_client.send_json(
{
"id": msg_id,
"type": "media_player/browse_media",
"entity_id": media_players[0],
ATTR_MEDIA_CONTENT_TYPE: result["children"][-1][
ATTR_MEDIA_CONTENT_TYPE
],
ATTR_MEDIA_CONTENT_ID: str(
result["children"][-1][ATTR_MEDIA_CONTENT_ID]
),
}
)
msg = await websocket_client.receive_json()
assert mock_fetch.called
assert msg["success"]
result = msg["result"]
result_id = int(URL(result[ATTR_MEDIA_CONTENT_ID]).name)
assert result[ATTR_MEDIA_CONTENT_TYPE] == "artist"
assert result["title"] == mock_artist.title
assert result["children"][0]["title"] == "Radio Station"
assert result["children"][1]["title"] == f"{mock_album.title} ({mock_album.year})"
# Browse into a Plex album
msg_id += 1
await websocket_client.send_json(
{
"id": msg_id,
"type": "media_player/browse_media",
"entity_id": media_players[0],
ATTR_MEDIA_CONTENT_TYPE: result["children"][-1][ATTR_MEDIA_CONTENT_TYPE],
ATTR_MEDIA_CONTENT_ID: str(result["children"][-1][ATTR_MEDIA_CONTENT_ID]),
}
)
msg = await websocket_client.receive_json()
assert msg["success"]
result = msg["result"]
result_id = int(URL(result[ATTR_MEDIA_CONTENT_ID]).name)
assert result[ATTR_MEDIA_CONTENT_TYPE] == "album"
assert (
result["title"]
== f"{mock_artist.title} - {mock_album.title} ({mock_album.year})"
)
assert result["children"][0]["title"] == f"{mock_track.index}. {mock_track.title}"
# Browse into a non-existent TV season
unknown_key = 99999999999999
requests_mock.get(
f"{mock_plex_server.url_in_use}/library/metadata/{unknown_key}",
status_code=HTTPStatus.NOT_FOUND,
)
msg_id += 1
await websocket_client.send_json(
{
"id": msg_id,
"type": "media_player/browse_media",
"entity_id": media_players[0],
ATTR_MEDIA_CONTENT_TYPE: result["children"][0][ATTR_MEDIA_CONTENT_TYPE],
ATTR_MEDIA_CONTENT_ID: str(unknown_key),
}
)
msg = await websocket_client.receive_json()
assert msg["id"] == msg_id
assert msg["type"] == TYPE_RESULT
assert not msg["success"]
assert msg["error"]["code"] == ERR_UNKNOWN_ERROR
# Browse Plex playlists
msg_id += 1
await websocket_client.send_json(
{
"id": msg_id,
"type": "media_player/browse_media",
"entity_id": media_players[0],
ATTR_MEDIA_CONTENT_TYPE: playlists[ATTR_MEDIA_CONTENT_TYPE],
ATTR_MEDIA_CONTENT_ID: str(playlists[ATTR_MEDIA_CONTENT_ID]),
}
)
msg = await websocket_client.receive_json()
assert msg["id"] == msg_id
assert msg["type"] == TYPE_RESULT
assert msg["success"]
result = msg["result"]
assert result[ATTR_MEDIA_CONTENT_TYPE] == "playlists"
result_id = result[ATTR_MEDIA_CONTENT_ID]
| 11,622
|
def angle(u: Vec, v: Vec) -> float:
"""
Returns the cosine (angle) between two vectors u and v
:param u: (Vec) vector u
:param v: (Vec) vector v
:return: The scaled dot product, cosine of u and v's angle
"""
if u.is_zero or v.is_zero:
raise ValueError("Angle with lower dimensional 0 vector cannot be determined")
l_u = u.length
l_v = v.length
return u.dot(v) / (l_u * l_v)
| 11,623
|
def find_most_similar(top_k, probs, cache_dict, num=10):
"""返回最相似的num张照片的文件名,如果找到相似的,
则返回一个包括匹配元组的列表,否则返回一个空列表
top_k : 包含最佳分类的索引的列表
probs : 包含最佳分类索引对应的概率
cache_dict: 缓存中的索引和概率
num : 返回最近匹配的数目
"""
similar = []
for filename in cache_dict:
score = 0
count = 0
other_top_k, other_probs = cache_dict[filename]
for i, t in enumerate(top_k):
if t in other_top_k:
prob = probs[i]
other_prob = other_probs[other_top_k.tolist().index(t)]
score += abs(prob-other_prob)
count += 1
if count > 0:
score = score / count
similar.append((filename, score))
if similar:
similar.sort(key=lambda item: item[1]) # 根据score升序排序
return similar[:num]
return similar
| 11,624
|
def load(url_or_handle, allow_unsafe_formats=False, cache=None, **kwargs):
"""Load a file.
File format is inferred from url. File retrieval strategy is inferred from
URL. Returned object type is inferred from url extension.
Args:
url_or_handle: a (reachable) URL, or an already open file handle
allow_unsafe_formats: set to True to allow saving unsafe formats (eg. pickles)
cache: whether to attempt caching the resource. Defaults to True only if
the given URL specifies a remote resource.
Raises:
RuntimeError: If file extension or URL is not supported.
"""
# handle lists of URLs in a performant manner
if isinstance(url_or_handle, (list, tuple)):
return _load_urls(url_or_handle, cache=cache, **kwargs)
ext, decompressor_ext = _get_extension(url_or_handle)
try:
ext = ext.lower()
if ext in loaders:
loader = loaders[ext]
elif ext in unsafe_loaders:
if not allow_unsafe_formats:
raise ValueError(f"{ext} is considered unsafe, you must explicitly allow its use by passing allow_unsafe_formats=True")
loader = unsafe_loaders[ext]
else:
raise KeyError(f'no loader found for {ext}')
decompressor = decompressors[decompressor_ext] if decompressor_ext is not None else nullcontext
message = "Using inferred loader '%s' due to passed file extension '%s'."
log.debug(message, loader.__name__[6:], ext)
return load_using_loader(url_or_handle, decompressor, loader, cache, **kwargs)
except KeyError:
log.warning("Unknown extension '%s', attempting to load as image.", ext)
try:
with read_handle(url_or_handle, cache=cache) as handle:
result = _load_img(handle)
except Exception as e:
message = "Could not load resource %s as image. Supported extensions: %s"
log.error(message, url_or_handle, list(loaders))
raise RuntimeError(message.format(url_or_handle, list(loaders)))
else:
log.info("Unknown extension '%s' successfully loaded as image.", ext)
return result
| 11,625
|
def evaluate_full_batch(model, minibatch, mode='val'):
"""
Full batch evaluation: for validation and test sets only.
When calculating the F1 score, we will mask the relevant root nodes.
"""
time_s = time.time()
loss, preds, labels = model.eval_step(*minibatch.one_batch(mode=mode))
torch.cuda.synchronize()
time_e = time.time()
node_val_test = minibatch.node_val if mode == 'val' else minibatch.node_test
f1_scores = calc_f1(to_numpy(labels[node_val_test]),
to_numpy(preds[node_val_test]), model.sigmoid_loss)
# node_test=minibatch.node_test
# f1_test=calc_f1(to_numpy(labels[node_test]),to_numpy(preds[node_test]),model.sigmoid_loss)
# printf(' ******TEST: loss = {:.4f}\tmic = {:.4f}\tmac = {:.4f}'.format(loss,f1_test[0],f1_test[1]),style='yellow')
del labels
del preds
return loss, f1_scores[0], f1_scores[1], time_e - time_s
| 11,626
|
def berlekamp_massey(s):
"""Given a sequence of LFSR outputs, find the coefficients of the LFSR."""
C, B, L, m, b = [1], [1], 0, 1, 1
for n in range(len(s)):
d = s[n]
for i in range(1, L + 1):
d ^= mul(C[i], s[n - i])
if d == 0:
m += 1
else:
T = list(C)
while len(C) <= len(B) + m:
C += [0]
t = mul(d, inv(b))
for i in range(len(B)):
C[i + m] ^= mul(t, B[i])
if 2 * L <= n:
L, B, b, m = n + 1 - L, T, d, 1
else:
m += 1
return C[0:L + 1]
| 11,627
|
def vocab_from_file(vocabfile):
"""
Generates vocabulary from a vocabulary file in JSON
Outputs vocabulary and inverted vocabulary
"""
with smart_open(vocabfile, 'r') as f:
inv_vocab = json.loads(f.read())
vocabulary = {}
for no, word in enumerate(inv_vocab):
vocabulary[word] = no
print('Vocabulary size = %d' % len(vocabulary), file=sys.stderr)
return vocabulary, inv_vocab
| 11,628
|
def verify_pool_type(pooltype):
"""
:type pooltype: str
:param pooltype: Pool type
:raises: ValueError: Invalid Pool type
"""
valid_pool_types = ['Transactional', 'Repository', 'Archival', 'Iops-Optimized', 'Balanced', 'Throughput-Optimized',
'Depot']
if pooltype not in valid_pool_types:
raise ValueError('"{0}" is not a valid pool type. Allowed values are: {1}'.format(pooltype,
str(valid_pool_types)))
| 11,629
|
def sample(image: type_alias.TensorLike,
warp: type_alias.TensorLike,
resampling_type: ResamplingType = ResamplingType.BILINEAR,
border_type: BorderType = BorderType.ZERO,
pixel_type: PixelType = PixelType.HALF_INTEGER,
name: Optional[str] = None) -> tf.Tensor:
"""Samples an image at user defined coordinates.
Note:
The warp maps target to source. In the following, A1 to An are optional
batch dimensions.
Args:
image: A tensor of shape `[B, H_i, W_i, C]`, where `B` is the batch size,
`H_i` the height of the image, `W_i` the width of the image, and `C` the
number of channels of the image.
warp: A tensor of shape `[B, A_1, ..., A_n, 2]` containing the x and y
coordinates at which sampling will be performed. The last dimension must
be 2, representing the (x, y) coordinate where x is the index for width
and y is the index for height.
resampling_type: Resampling mode. Supported values are
`ResamplingType.NEAREST` and `ResamplingType.BILINEAR`.
border_type: Border mode. Supported values are `BorderType.ZERO` and
`BorderType.DUPLICATE`.
pixel_type: Pixel mode. Supported values are `PixelType.INTEGER` and
`PixelType.HALF_INTEGER`.
name: A name for this op. Defaults to "sample".
Returns:
Tensor of sampled values from `image`. The output tensor shape
is `[B, A_1, ..., A_n, C]`.
Raises:
ValueError: If `image` has rank != 4. If `warp` has rank < 2 or its last
dimension is not 2. If `image` and `warp` batch dimension does not match.
"""
with tf.name_scope(name or "sample"):
image = tf.convert_to_tensor(image, name="image")
warp = tf.convert_to_tensor(warp, name="warp")
shape.check_static(image, tensor_name="image", has_rank=4)
shape.check_static(
warp,
tensor_name="warp",
has_rank_greater_than=1,
has_dim_equals=(-1, 2))
shape.compare_batch_dimensions(
tensors=(image, warp), last_axes=0, broadcast_compatible=False)
if pixel_type == PixelType.HALF_INTEGER:
warp -= 0.5
if resampling_type == ResamplingType.NEAREST:
warp = tf.math.round(warp)
if border_type == BorderType.DUPLICATE:
image_size = tf.cast(tf.shape(image)[1:3], dtype=warp.dtype)
height, width = tf.unstack(image_size, axis=-1)
warp_x, warp_y = tf.unstack(warp, axis=-1)
warp_x = tf.clip_by_value(warp_x, 0.0, width - 1.0)
warp_y = tf.clip_by_value(warp_y, 0.0, height - 1.0)
warp = tf.stack((warp_x, warp_y), axis=-1)
return tfa_image.resampler(image, warp)
| 11,630
|
def show_record(record, show_logs=True, truncate_logs=None, truncate_result=10000, header_width=100, show_result ='deep', hang=True):
"""
Show the results of an experiment record.
:param record:
:param show_logs:
:param truncate_logs:
:param truncate_result:
:param header_width:
:param show_result:
:param hang:
:return:
"""
string = get_record_full_string(record, show_logs=show_logs, show_result=show_result, truncate_logs=truncate_logs,
truncate_result=truncate_result, header_width=header_width, include_bottom_border=False)
has_matplotlib_figures = any(loc.endswith('.pkl') for loc in record.get_figure_locs())
if has_matplotlib_figures:
record.show_figures(hang=hang)
print(string)
| 11,631
|
def normalize_parameter(kv):
"""
Translate a parameter into standard form.
"""
(k, v) = kv
if k[0] == 'requiressl' and v in ('1', True):
k[0] = 'sslmode'
v = 'require'
elif k[0] == 'dbname':
k[0] = 'database'
elif k[0] == 'sslmode':
v = v.lower()
return (tuple(k),v)
| 11,632
|
def pulse_broadening(DM, f_ctr):
"""
pulse_broadening(DM, f_ctr):
Return the approximate pulse broadening (tau) in ms due to scattering
based on the rough relation in Cordes' 'Pulsar Observations I' paper.
'f_ctr' should be in MHz. The approximate error is 0.65 in log(tau).
"""
logDM = Num.log10(DM)
return 10.0**(-3.59 + 0.129*logDM + 1.02*logDM**2.0 -
4.4*Num.log10(f_ctr/1000.0))/1000.0
| 11,633
|
def generate_wavefunction_coefficients(dir_name: str):
"""
Generate wavefunction.h5 file using amset.
Parameters
----------
dir_name : str
Directory containing WAVECAR and vasprun.xml files (can be gzipped).
Returns
-------
dict
A dictionary with the keys:
- "dir_name" (str): containing the directory where the wavefunction.h5 file was
generated.
- "log" (str): The output log from ``amset wave``.
- "ibands" (Tuple[List[int], ...]): The bands included in the wavefunction.h5
file. Given as a tuple of one or two lists (one for each spin channel).
The bands indices are zero indexed.
"""
from amset.tools.wavefunction import wave
dir_name = strip_hostname(dir_name) # TODO: Handle hostnames properly.
fc = FileClient()
files = fc.listdir(dir_name)
vasprun_file = Path(dir_name) / get_zfile(files, "vasprun.xml")
wavecar_file = Path(dir_name) / get_zfile(files, "WAVECAR")
# wavecar can't be gzipped, so copy it to current directory and unzip it
fc.copy(wavecar_file, wavecar_file.name)
fc.gunzip(wavecar_file.name)
args = ["--wavecar=WAVECAR", f"--vasprun={vasprun_file}"]
runner = CliRunner()
result = runner.invoke(wave, args, catch_exceptions=False)
ibands = _extract_ibands(result.output)
# remove WAVECAR from current directory
fc.remove("WAVECAR")
return {"dir_name": str(Path.cwd()), "log": result.output, "ibands": ibands}
| 11,634
|
def logged_in_student(browser, override_allowed_hosts, base_test_data):
"""
Fixture for a logged-in student user
Returns:
User: User object
"""
return LoginPage(browser).log_in_via_admin(base_test_data.student_user, DEFAULT_PASSWORD)
| 11,635
|
def test_encoding_and_decoding():
"""Tests the encoding and decoding of components."""
test_components: Dict[ValidatorName, List[Validator]] = {
ValidatorName.SCRIPT: [
ScriptValidator(script="black", args=["-l", "100"]),
ScriptValidator(
script="black", args=["-l", "100"], failure_level=ValidationResultLevel.WARNING
),
ScriptValidator(script="black", args=["-l", "100"], per_item=True),
ScriptValidator(script="black", args=["-l", "100"], run_on_changes=True),
ScriptValidator(
script="black",
args=["-l", "100"],
failure_level=ValidationResultLevel.WARNING,
per_item=True,
),
ScriptValidator(
script="black",
args=["-l", "100"],
failure_level=ValidationResultLevel.WARNING,
run_on_changes=True,
),
ScriptValidator(script="black", args=["-l", "100"], per_item=True, run_on_changes=True),
ScriptValidator(
script="black",
args=["-l", "100"],
failure_level=ValidationResultLevel.WARNING,
per_item=True,
run_on_changes=True,
),
],
}
for name in ValidatorName:
assert name in test_components, f"No test components for Validator {name}"
for name, components in test_components.items():
assert name in ValidatorName, f"{name} is not a valid ValidatorName"
for component in components:
assert (
component.name == name
), f"Testing validator of name {component.name} for name {name}"
assert (
FACTORY.get_instance(json.loads(json.dumps(component.bundle()))) == component
), f"Component {component} does not bundle and unbundle correctly"
| 11,636
|
def test_model(data_set=None, langider=None, lang_to_idx=None, ) -> np.ndarray:
"""
Tests a given langid.py model on the given data set.
:param data_set: data set to test on
:param langider: model to test
:param lang_to_idx: mapping of languages to ids
"""
import numpy as np
langs = data_set.get_tag_set()
pred_prob = np.zeros((len(data_set), len(langs) + 1))
dataloader = DataLoader(data_set)
for i, elem in enumerate(tqdm(dataloader)):
text = elem['text'][0]
label = elem['label'][0]
ranking = langider.rank(text)
for lang, prob in ranking:
pred_prob[i, lang_to_idx[lang]] = prob
pred_prob[i, len(langs)] = lang_to_idx[label]
return pred_prob
| 11,637
|
def test_copy_axis():
"""Sometimes we remove an axis. So when we copy it, we need to make sure
that the new dataset doesn't have the removed axis.
"""
# remove one axis
data = create_data()
data = math(data, axis='chan', operator_name='mean')
assert len(data.axis) == 1
output = data._copy(axis=True)
assert len(data.axis) == len(output.axis)
output = data._copy(axis=False)
assert len(data.axis) == len(output.axis)
| 11,638
|
def mapquest_search(text):
"""
Search on Mapquest (https://www.mapquest.com)
Parameters
-----------
text:- The query which you want to search about (str)
"""
mapquest=f"https://www.mapquest.com/search/results?query={text}"
open(mapquest)
| 11,639
|
def search(querry, lim=5):
"""Search the querry in youtube and return lim number of results.
Querry is the keyword, i:e name of the song
lim is the number of songs that will be added to video array and returned
"""
# Replace all the spaces with +
querry = querry.replace(' ', '+')
url = "https://www.youtube.com/results?search_query={}".format(querry)
response = urlopen(url)
html = response.read()
soup = BeautifulSoup(html, "html.parser")
count = 0
for vid in soup.findAll(attrs={'class': 'yt-uix-tile-link'}):
if lim == count:
break
url = vid['href']
data = scan_video(url)
if not data:
break
video.append(data)
urls.append(url)
count += 1
return (video, urls)
| 11,640
|
def calculate_center_of_mass(symbols, coordinates):
"""Calculate the center of mass of a molecule.
The center of mass is weighted by each atom's weight.
Parameters
----------
symbols : list
A list of elements for the molecule
coordinates : np.ndarray
The coordinates of the molecule.
Returns
-------
center_of_mass: np.ndarray
The center of mass of the molecule.
Notes
-----
The center of mass is calculated with the formula
.. math:: \\vec{R}=\\frac{1}{M} \\sum_{i=1}^{n} m_{i}\\vec{r_{}i}
"""
total_mass = calculate_molecular_mass(symbols)
center_of_mass = np.array([0.0, 0.0, 0.0])
for atom_number in range(len(symbols)):
atom_type = symbols[atom_number]
mass_of_atom = atomic_weights[atom_type]
atom_position = coordinates[atom_number]
center_of_mass += mass_of_atom * atom_position
center_of_mass = center_of_mass / total_mass
return center_of_mass
| 11,641
|
def test_tool_exec_command_dash_help_reverse(help_option):
"""Execute a command (that needs no params) asking for help."""
cmd = create_command('somecommand', 'This command does that.')
command_groups = COMMAND_GROUPS + [('group', 'help text', [cmd])]
dispatcher = Dispatcher([help_option, 'somecommand'], command_groups)
with patch('charmcraft.helptexts.get_command_help') as mock:
mock.return_value = 'test help'
with pytest.raises(CommandError) as cm:
dispatcher.run()
error = cm.value
# check the given information to the builder
args = mock.call_args[0]
assert args[0] == COMMAND_GROUPS
assert args[1].__class__ == cmd
assert sorted(x[0] for x in args[2]) == [
'-h, --help', '-p, --project-dir', '-q, --quiet', '-v, --verbose']
# check the result of the full help builder is what is shown
assert error.argsparsing
assert str(error) == "test help"
assert error.retcode == 0
| 11,642
|
def __iadd__(self, other):
"""Pythonic use of concat
Example:
xs += ys
Returns self.concat(self, other)"""
return self.concat(self, other)
| 11,643
|
def AddSqlServerAuditBucketPath(parser):
"""Adds the `--audit-bucket-path` flag to the parser."""
parser.add_argument(
'--audit-bucket-path',
required=False,
hidden=True,
help=('Path in Google Cloud Storage to upload generated audit files. '
'The URI is in the form gs://bucketName/folderName. '
'Only available for SQL Server instances.'))
| 11,644
|
def remove_stopwords(texts, stop_words):
"""
Define functions for stopwords
:param texts: Processed texts from main module
:return: Texts that already removed a stopwords
"""
return [[word for word in simple_preprocess(str(doc)) if word not in stop_words] for doc in texts]
| 11,645
|
def patch_aggregated_metadata(context):
"""
Patches context in aggregated pages
"""
for metadata in context["posts"]:
metadata.body = patch_links(
metadata.body,
metadata.link[:11], # first 11 characters is path (YYYY/MM/DD/)
metadata.link[11:], # following characters represent filename
True) # hyperlink title to post
metadata.body = strip_xml_declaration(metadata.body)
| 11,646
|
def dumps(obj):
"""Output json with formatting edits + object handling."""
return json.dumps(obj, indent=4, sort_keys=True,
cls=CustomEncoder)
| 11,647
|
def find_asg_using_amis(ami_ids):
"""
take a list of ami ids and return a dictionary of
launch configs that use them
"""
# ref: return = { ami_id : "lc_arns":[]}
ami_ids = listify(ami_ids)
result = {id: [] for id in ami_ids}
client_asg = boto3.client('autoscaling')
lc = client_asg.describe_launch_configurations()
for a_lc in lc['LaunchConfigurations']:
if a_lc['ImageId'] in ami_ids:
result[a_lc['ImageId']].append(a_lc['LaunchConfigurationARN'])
return result
| 11,648
|
def tokenize_nmt(text, num_examples=None):
"""Tokenize the English-French dataset."""
source, target = [], []
for i, line in enumerate(text.split('\n')):
if num_examples and i > num_examples:
break
parts = line.split('\t')
if len(parts) == 2:
source.append(parts[0].split(' '))
target.append(parts[1].split(' '))
return source, target
| 11,649
|
def op_habit(queryUserId):
"""
節目操作習慣推薦模組
"""
print("****** Start processing op_habit Module! ******\n", file=sys.stderr)
tmpStr = ""
tmpCount = []
playLog = PlayLog.query.filter_by(user=queryUserId).all() # Find the playlog list of `user` = 1
for i in range(len(playLog)): # Traverse all the playlog
# Use the `play_log_id` to find the corresponding opLog list
opLog = OperationLog.query.filter(OperationLog.play_log == playLog[i].play_log_id).all()
#Save the play_log and corresponding count
tmpCount.append({
'play_log' : playLog[i].play_log_id,
'count' : len(opLog)
})
for i in range(0, len(tmpCount)-1):
for j in range(0, len(tmpCount)-1-i):
if tmpCount[j]['count'] < tmpCount[j+1]['count']:
tmp = tmpCount[j]
tmpCount[j] = tmpCount[j+1]
tmpCount[j+1] = tmp
# print(tmpCount)
# for i in range(0, 4):
# # print(tmpCount[i]['play_log'])
# playLog_back = PlayLog.query.filter_by(play_log_id=tmpCount[i]['play_log']).first()
# tmpStr += str(playLog_back.audio) + ","
# playLog_back = PlayLog.query.filter_by(play_log_id=tmpCount[4]['play_log']).first()
# tmpStr += str(playLog_back.audio)
# user = User.query.filter_by(id=queryUserId).first()
# user.operation_ranks = tmpStr
# db.session.commit()
print("****** Processing hot_play Module done! ******\n", file=sys.stderr)
| 11,650
|
def setup_snicar(input_file):
"""Builds impurity array and instances of all classes according to config in yaml file.
Args:
None
Returns:
ice: instance of Ice class
illumination: instance of Illumination class
rt_config: instance of RTConfig class
model_config: instance of ModelConfig class
plot_config: instance of PlotConfig class
display_config: instance of DisplayConfig class
"""
impurities = build_impurities_array(input_file)
(
ice,
illumination,
rt_config,
model_config,
plot_config,
) = build_classes(input_file)
return (
ice,
illumination,
rt_config,
model_config,
plot_config,
impurities,
)
| 11,651
|
def plot_history(history, figsize=(8, 5)):
"""Plot metric values through training
Args:
history: object returned by model.fit(, callbacks=[History(), ...])
metric: metric name from model.metrics_names
"""
# Setup the figure object
figsize=(figsize[0]*len(history.model.metrics_names), figsize[1])
fig = plt.figure(figsize=figsize)
for i, metric in enumerate(history.model.metrics_names):
plt.subplot(1, len(history.model.metrics_names), i+1)
plt.plot(history.epoch, history.history[metric], label=f'train')
plt.plot(history.epoch, history.history['val_'+metric], label=f'valid')
plt.legend()
plt.xlabel("epoch")
plt.ylabel(metric)
plt.title(metric)
plt.tight_layout()
| 11,652
|
async def list_model_config(model_name: str):
"""
Lists all the model's configuration.
:param model_name: Model name
:return: List of model's configuration
"""
try:
return ApiResponse(data=dl_service.get_config(model_name))
except ApplicationError as e:
raise e
except Exception:
raise ApplicationError('unexpected server error')
| 11,653
|
def _save_video(f, filename, directory):
"""
The uploaded video file is saved.
"""
destination_file = open(os.path.join(directory, filename), 'wb+')
for chunk in f.chunks():
destination_file.write(chunk)
destination_file.close()
| 11,654
|
def signup_post(request):
"""Получаем данные пользователя с формы и заносим их в базу проверяя,
если удачно то предоставляем вход в базу потом дополнительные идентификац"""
mess = "login please"
data = get_post( request )
mail = data.POST['mail']
name = data.POST['name']
captcha = data.POST['captcha']
hash = data.POST['hash']
password = data.POST['password']
address = data.POST['addres']
phone = data.POST['phone']
# date = time.strftime("%H:%M:%S %d.%m.%Y")
# date=time.time()
passw = getmd5(password)
if not check_captcha(hash, captcha):
hash, raw = get_captcha(hash)
mess="Неправильно введен проверочный код"
return templ('app.auth:signup', request, dict(name=name, mail=mail, address=address, phone=phone, hash = hash, mess = mess.decode('UTF-8')) )
#проверяем есть ли такие пользователи в базе если нет то регистрируем
if not request.db.doc.find_one({'_id':'user:'+name}):
doc = {'_id': 'user:'+name, 'name': name, 'password': passw, 'mail': mail, "type": "table_row", "rate":0,
"doc_type": "des:users", "doc": {"user":"user:"+name, "name": {'ru':name, 'en':name}, "old": "33", "phone":phone, "address": address,
'date': create_date(), "home": "false" } }
request.db.doc.save(doc)
request.db.doc.update({'_id':'role:simple_users'}, {'$set':{'users.user:'+name:'true'}} )
mess = "Поздравляем, можете войти."
else:
mess = "Такой логин уже есть выберите другой"
return templ('libs.auth:login', request, dict(mess = mess.decode('UTF-8')) )
| 11,655
|
def make_onehot(label, num_classes, axis=-1):
"""
Create one hot tensor based on the input tensor
Args:
label: input tensor, the value must be positive integer and less than num_class
num_classes: the number of class in one hot tensor
axis: The axis to fill (default: -1, a new inner-most axis).
Returns:
:onehot tensor
Examples:
>>> make_onehot(np.array([[1, 2],[1, 3]]).long(), 4, axis=-1)
tensor([[[0., 1., 1., 0.],
[0., 1., 0., 1.]],
<BLANKLINE>
[[0., 0., 0., 0.],
[0., 0., 0., 0.]]])
"""
shp=label.shape
flatten_label=label.reshape(-1)
result=np.eye(num_classes)
result=result[flatten_label.astype(np.int64)]
if axis!=-1 and axis!=ndim(label)-1:
result=np.swapaxes(axis,-1)
return result
| 11,656
|
def test_random_fourier_feature_layer_compute_covariance_of_inducing_variables(
basis_func_cls, batch_size
):
"""
Ensure that the random fourier feature map can be used to approximate the covariance matrix
between the inducing point vectors of a sparse GP, with the condition that the number of latent
GP models is greater than one.
"""
n_components = 10000
kernel = gpflow.kernels.SquaredExponential()
fourier_features = basis_func_cls(kernel, n_components, dtype=tf.float64)
x_new = tf.ones(shape=(2 * batch_size + 1, 1), dtype=tf.float64)
u = fourier_features(x_new)
approx_kernel_matrix = inner_product(u, u)
actual_kernel_matrix = kernel.K(x_new, x_new)
np.testing.assert_allclose(approx_kernel_matrix, actual_kernel_matrix, atol=5e-2)
| 11,657
|
def kohn_sham_iteration(
state,
num_electrons,
xc_energy_density_fn,
interaction_fn,
enforce_reflection_symmetry):
"""One iteration of Kohn-Sham calculation.
Note xc_energy_density_fn must be wrapped by jax.tree_util.Partial so this
function can take a callable. When the arguments of this callable changes,
e.g. the parameters of the neural network, kohn_sham_iteration() will not be
recompiled.
Args:
state: KohnShamState.
num_electrons: Integer, the number of electrons in the system. The first
num_electrons states are occupid.
xc_energy_density_fn: function takes density (num_grids,) and returns
the energy density (num_grids,).
interaction_fn: function takes displacements and returns
float numpy array with the same shape of displacements.
enforce_reflection_symmetry: Boolean, whether to enforce reflection
symmetry. If True, the system are symmetric respecting to the center.
Returns:
KohnShamState, the next state of Kohn-Sham iteration.
"""
if enforce_reflection_symmetry:
xc_energy_density_fn = _flip_and_average_fn(
xc_energy_density_fn, locations=state.locations, grids=state.grids)
hartree_potential = get_hartree_potential(
density=state.density,
grids=state.grids,
interaction_fn=interaction_fn)
xc_potential = get_xc_potential(
density=state.density,
xc_energy_density_fn=xc_energy_density_fn,
grids=state.grids)
ks_potential = hartree_potential + xc_potential + state.external_potential
xc_energy_density = xc_energy_density_fn(state.density)
# Solve Kohn-Sham equation.
density, total_eigen_energies, gap = solve_noninteracting_system(
external_potential=ks_potential,
num_electrons=num_electrons,
grids=state.grids)
total_energy = (
# kinetic energy = total_eigen_energies - external_potential_energy
total_eigen_energies
- get_external_potential_energy(
external_potential=ks_potential,
density=density,
grids=state.grids)
# Hartree energy
+ get_hartree_energy(
density=density,
grids=state.grids,
interaction_fn=interaction_fn)
# xc energy
+ get_xc_energy(
density=density,
xc_energy_density_fn=xc_energy_density_fn,
grids=state.grids)
# external energy
+ get_external_potential_energy(
external_potential=state.external_potential,
density=density,
grids=state.grids)
)
if enforce_reflection_symmetry:
density = utils.flip_and_average(
locations=state.locations, grids=state.grids, array=density)
return state._replace(
density=density,
total_energy=total_energy,
hartree_potential=hartree_potential,
xc_potential=xc_potential,
xc_energy_density=xc_energy_density,
gap=gap)
| 11,658
|
def calculate_reliability(data):
""" Calculates the reliability rating of the smartcab during testing. """
success_ratio = data['success'].sum() * 1.0 / len(data)
if success_ratio == 1: # Always meets deadline
return ("A+", "green")
else:
if success_ratio >= 0.90:
return ("A", "green")
elif success_ratio >= 0.80:
return ("B", "green")
elif success_ratio >= 0.70:
return ("C", "#EEC700")
elif success_ratio >= 0.60:
return ("D", "#EEC700")
else:
return ("F", "red")
| 11,659
|
def sort_and_index(file_name, sorted_prefix=None):
""" Sorts and indexes the bam file given by file_name.
"""
if sorted_prefix is None:
sorted_prefix = file_name.replace('.bam', '') + '_sorted'
sorted_name = sorted_prefix + '.bam'
log_subprocess.check_call(['samtools','sort', '-o', sorted_name, file_name])
pysam.index(sorted_name)
| 11,660
|
def cache_mat_calc(dra, ddc, dra_err, ddc_err, ra_rad, dc_rad, ra_dc_cor=None,
l_max=1, fit_type="full", num_iter=None):
"""Calculate cache matrix for future use
Parameters
----------
dra/ddc : array of float
R.A.(*cos(Dec.))/Dec. differences
dra_err/ddc_err : array of float
formal uncertainty of dra(*cos(dc_rad))/ddc
ra_rad/dc_rad : array of float
Right ascension/Declination in radian
ra_dc_cov/ra_dc_cor : array of float
covariance/correlation coefficient between dra and ddc, default is None
fit_type : string
flag to determine which parameters to be fitted
'full' for T- and S-vectors both
'T' for T-vectors only
'S' for S-vectors only
Returns
----------
pmt : array of float
estimation of (d1, d2, d3, r1, r2, r3)
sig : array of float
uncertainty of x
cor_mat : matrix
matrix of correlation coefficient.
"""
# Maxium number of sources processed per time
# According to my test, 100 should be a good choice
if num_iter is None:
num_iter = 100
div = dra.size // num_iter
rem = dra.size % num_iter
suffix_array = []
if rem:
suffix_array.append("{:05d}".format(0))
if not ra_dc_cor is None:
nor_mat_calc_for_cache(dra_err[: rem], ddc_err[: rem],
ra_rad[: rem], dc_rad[: rem],
ra_dc_cor=ra_dc_cor[: rem],
l_max=l_max, fit_type=fit_type,
suffix=suffix_array[0])
else:
nor_mat_calc_for_cache(dra_err[: rem], ddc_err[: rem],
ra_rad[: rem], dc_rad[: rem],
l_max=l_max, fit_type=fit_type,
suffix=suffix_array[0])
for i in range(div):
sta = rem + i * num_iter
end = sta + num_iter
suffix_array.append("{:05d}".format(i+1))
if not ra_dc_cor is None:
nor_mat_calc_for_cache(dra_err[sta: end], ddc_err[sta: end],
ra_rad[sta: end], dc_rad[sta: end],
ra_dc_cor=ra_dc_cor[sta: end],
l_max=l_max, fit_type=fit_type,
suffix=suffix_array[-1])
else:
nor_mat_calc_for_cache(dra_err[sta: end], ddc_err[sta: end],
ra_rad[sta: end], dc_rad[sta: end],
l_max=l_max, fit_type=fit_type,
suffix=suffix_array[-1])
return suffix_array
| 11,661
|
def get_android_replacements():
"""Gets a dictionary of all android-specific replacements to be made."""
replacements = {}
compileSdk = 'compileSdkVersion {}'.format(COMPILE_SDK_VERSION)
targetSdk = 'targetSdkVersion {}'.format(TARGET_SDK_VERSION)
buildToolsVersion = 'buildToolsVersion \'{}\''.format(BUILD_TOOLS_VERSION)
replacements[COMPILE_SDK_RE] = compileSdk
replacements[TARGET_SDK_RE] = targetSdk
replacements[BUILD_TOOLS_RE] = buildToolsVersion
return replacements
| 11,662
|
def process_long_term_idle_users(
slack_data_dir: str,
users: List[ZerverFieldsT],
slack_user_id_to_zulip_user_id: SlackToZulipUserIDT,
added_channels: AddedChannelsT,
added_mpims: AddedMPIMsT,
dm_members: DMMembersT,
zerver_userprofile: List[ZerverFieldsT],
) -> Set[int]:
"""Algorithmically, we treat users who have sent at least 10 messages
or have sent a message within the last 60 days as active.
Everyone else is treated as long-term idle, which means they will
have a slightly slower first page load when coming back to
Zulip.
"""
all_messages = get_messages_iterator(slack_data_dir, added_channels, added_mpims, dm_members)
sender_counts: Dict[str, int] = defaultdict(int)
recent_senders: Set[str] = set()
NOW = float(timezone_now().timestamp())
for message in all_messages:
timestamp = float(message["ts"])
slack_user_id = get_message_sending_user(message)
if not slack_user_id:
continue
if slack_user_id in recent_senders:
continue
if NOW - timestamp < 60:
recent_senders.add(slack_user_id)
sender_counts[slack_user_id] += 1
for (slack_sender_id, count) in sender_counts.items():
if count > 10:
recent_senders.add(slack_sender_id)
long_term_idle = set()
for slack_user in users:
if slack_user["id"] in recent_senders:
continue
zulip_user_id = slack_user_id_to_zulip_user_id[slack_user["id"]]
long_term_idle.add(zulip_user_id)
for user_profile_row in zerver_userprofile:
if user_profile_row["id"] in long_term_idle:
user_profile_row["long_term_idle"] = True
# Setting last_active_message_id to 1 means the user, if
# imported, will get the full message history for the
# streams they were on.
user_profile_row["last_active_message_id"] = 1
return long_term_idle
| 11,663
|
def tb_view(model, logdir=None, cmd=None):
"""Visualises a :model: in TensorBoard. (That is, everything in the model's Graph, which may actually be much larger
than the model itself.)
TensorBoard should automatically open; it is inconsistent whether the browser will automatically come to the front
though.
Extra arguments:
:logdir: is the directory to save the model to prior to opening it in TensorBoard. Defaults to a randomly-named
temporary directory.
:cmd: is any command to call before launching TensorBoard, for example to open a virtual environment. This can
be arbitrary shell code.
"""
if logdir is None:
logdir = f'/tmp/{tools.uuid2()}'
inp = model.input
if isinstance(inp, (tuple, list)):
inp = inp[0]
graph = inp.graph
tf.summary.FileWriter(logdir=logdir, graph=graph).flush()
def run_tensorboard():
if cmd:
tools.shell(f'{cmd}; tensorboard --logdir {logdir}')
else:
tools.shell(f'tensorboard --logdir {logdir}')
thread = threading.Thread(target=run_tensorboard)
thread.start()
time.sleep(2) # todo: actually detect when tensorboard is ready and open then. But this is almost always right.
webbrowser.open_new_tab('http://localhost:6006')
thread.join()
| 11,664
|
def clear_cache():
"""
Clears internal cache. Returns
something that can be given back to restore_cache.
"""
global FS_CACHE
old = FS_CACHE
FS_CACHE = {}
return old
| 11,665
|
def extend_gdf(gdf_disjoint, id_col):
"""
Add duplicates of intersecting geometries to be able to add the constants.
This function adds rows with duplicate geometries and creates the new `id`
column for each of the new rows. This function is called by another function
`complete_disjoint_geoms`.
"""
tqdm_max = len(gdf_disjoint)
ext = pd.DataFrame(columns=list(gdf_disjoint.columns) + [id_col + "_set"])
for _, row in tqdm(gdf_disjoint.iterrows(), total=tqdm_max):
num = len(row[id_col])
data = np.array([list(row[id_col]), [row["geometry"]] * num]).T
ext_new = pd.DataFrame(data, columns=gdf_disjoint.columns)
ext_new[id_col + "_set"] = [row[id_col]] * num
ext = ext.append(ext_new, ignore_index=True)
return ext
| 11,666
|
def schedule_news_updates(update_interval:int, update_name:str)->dict:
"""
Functionality:
---------------
Schedules a new news data update
Parameters:
---------------
update_interval: int
The time until the scheduler should update the news data
update_name: str
The name of the update that has caused the scheduling of the news data update
Returns:
---------------
a key-value pair: dict
Returns a dictionary with the key being the update name and the
value being news scheduler object
"""
return({update_name:news_scheduler.enter(update_interval, 1, update_news, ())})
| 11,667
|
def test_basic_greetings():
"""The bot should respond sensibly to common greeting words"""
sent = "hello"
resp = broback(sent)
assert resp == GREETING_RESPONSES[1]
| 11,668
|
def ValidateResponse(data):
"""Validate a JSON-RPC response object.
Args:
data: The JSON-RPC object (dict).
Raises:
ProtocolError: if the object format is not correct.
"""
ValidateBasicJSONRPCData(data)
if 'response' not in data:
raise ProtocolError('JSON is not a valid response')
| 11,669
|
def get_digits(text):
"""
Returns all numeric characters (digits) in string *text* in a new (concatenated) **string**.
Example:
>>> get_digits('Test123')
'123'
>>> int(get_digits('The answer is 42'))
42
:param text: The string to search.
:type text: str, unicode
:rtype: str, unicode
"""
_vld.pass_if(_vld.is_text(text), TypeError, "'text' attribute must be a string (got {!r})".format(text))
return EMPTY_STR.join(s for s in text if s.isdigit())
| 11,670
|
def make():
""" генератор 1 расстановки = 1 горизонтали"""
# ставим 1 слона
to = random.choice((1, 3, 5, 7))
f1[to-1] = "B"
fp.remove(to)
# ставим 2 слона
to = random.choice((2, 4, 6, 8))
f1[to-1] = "B"
fp.remove(to)
# ставим ферзя
to = random.choice(fp)
f1[to-1] = "Q"
fp.remove(to)
# ставим 1 коня
to = random.choice(fp)
f1[to-1] = "N"
fp.remove(to)
# ставим 2 коня
to = random.choice(fp)
f1[to-1] = "N"
fp.remove(to)
# ставим 1 ладью
to = fp.pop(0)
f1[to-1] = "R"
# ставим 2 ладью
to = fp.pop()
f1[to-1] = "R"
# ставим короля
to = fp.pop()
f1[to-1] = "K"
| 11,671
|
def test_all(args):
"""
Measure FPS of all configurations.
"""
agent_types = [
"Baxter_ik",
"Baxter_impedance",
"Sawyer_ik",
"Sawyer_impedance",
"Cursor_ik",
]
rendering_qualities = ["no_200", "low_200", "high_200", "low_500", "high_500"]
furniture_ids = [0, 9, 6]
results = {}
for agent in agent_types:
results[agent] = {}
for rendering in rendering_qualities:
results[agent][rendering] = {}
for furniture_id in furniture_ids:
if rendering.startswith("no"):
args.unity = False
args.visual_ob = False
elif rendering.startswith("low"):
args.unity = True
args.quality = 0
args.visual_ob = True
elif rendering.startswith("high"):
args.unity = True
args.quality = 4
args.visual_ob = True
if "200" in rendering:
args.screen_width = 200
args.screen_height = 200
else:
args.screen_width = 500
args.screen_height = 500
background_name = "Simple"
# set parameters for the environment (env, furniture_id, background)
env_name = "Furniture{}Env".format(agent.split("_")[0])
args.env = env_name
args.control_type = agent.split("_")[1]
args.furniture_id = furniture_id
args.background = background_name
print()
print(
"Creating environment (robot: {}, furniture: {}, background: {})".format(
env_name, furniture_names[furniture_id], background_name
)
)
FPS = []
for i in range(2):
# make environment following arguments
env = make_env(env_name, args)
# reset the environment with new furniture and background
env.reset(furniture_id, background_name)
# measure FPS of simulation and rendering
done = False
st = time.time()
step = 0
while not done and step < 500:
step += 1
ob, rew, done, info = env.step(env.action_space.sample())
FPS.append(step / (time.time() - st))
# close the environment instance
env.close()
print("fps = {:.2f}".format(np.mean(FPS)))
results[agent][rendering][furniture_id] = np.mean(FPS)
# output summary
for agent in agent_types:
print(agent)
for rendering in rendering_qualities:
output = "\t".join(
[
"{:.2f}".format(results[agent][rendering][furniture_id])
for furniture_id in furniture_ids
]
)
print(rendering, output)
| 11,672
|
def embed_tiles_in_json_sprite(tile_list, as_bytes=True, out_file=None):
"""Make a big rectangle containing the images for a brainsprite.
Parameters:
-----------
tile_list : list
List of 2d square numpy arrays to stick in a mosaic
Returns:
--------
mosaic : np.ndarray
Mosaic of tile images
"""
# Tiles are squares
tile_size = tile_list[0].shape[0]
num_tiles = len(tile_list)
num_tile_rows = nearest_square(num_tiles)
num_tile_cols = int(np.ceil(num_tiles/num_tile_rows))
mosaic = np.zeros((num_tile_rows * tile_size,
num_tile_cols * tile_size))
i_indices, j_indices = np.unravel_index(np.arange(num_tiles),
(num_tile_rows, num_tile_cols))
i_tile_offsets = tile_size * i_indices
j_tile_offsets = tile_size * j_indices
for tile, i_offset, j_offset in zip(tile_list, i_tile_offsets,
j_tile_offsets):
mosaic[i_offset:(i_offset + tile_size),
j_offset:(j_offset + tile_size)] = tile
if as_bytes:
img = mplfig(mosaic, out_file, as_bytes=as_bytes)
return dict(img=img, N=num_tile_rows, M=num_tile_cols,
pix=tile_size, num_slices=num_tiles)
return dict(mosaic=mosaic, N=num_tile_rows, M=num_tile_cols,
pix=tile_size, num_slices=num_tiles)
| 11,673
|
def test_compare_eq3():
""" not not (two == 2), 20x
"""
n = 20
while n:
not not (two == 2)
n -= 1
| 11,674
|
def removeUnicode(text):
""" Removes unicode strings like "\u002c" and "x96" """
text = re.sub(r'(\\u[0-9A-Fa-f]+)',r'', text)
text = re.sub(r'[^\x00-\x7f]',r'',text)
return text
| 11,675
|
def mark_word(page, text):
"""
Underline each word that contains 'text'.
"""
wlist = page.getText("words") # make the word list
for w in wlist: # scan through all words on page
if text in w[4]: # w[4] is the word's string
r = fitz.Rect(w[:4]) # make rect from word bbox
highlight = page.addHighlightAnnot(r)
highlight.setColors({"stroke":(0, 0, 1), "fill":(0.75, 0.8, 0.95)})
highlight.update() # underline
return
| 11,676
|
def mfcc(tf, n_mfcc, fs, fmin=0.0, fmax=None):
"""
Extract MFCC vectors
Args:
tf : single-channel time-frequency domain signal,
indexed by 'tf'
n_mfcc : number of coefficients
fs : sample rate
fmin : (default 0) minimal frequency in Hz
fmax : (default fs/2) maximal frequency in Hz
Returns:
mfcc : MFCC
"""
if fmax is None:
fmax = fs / 2.0
n_frame, n_fbin = tf.shape
# get filter weights
freq = np.fft.fftfreq(n_fbin)
fbw = mel_freq_fbank_weight(n_mfcc, freq, fs, fmin=fmin, fmax=fmax)
# get log power
sigpow = np.real(tf * tf.conj())
logfpow = np.log(np.einsum('bf,tf->tb', fbw, sigpow) + 1e-20)
# DCT
mfcc = scipy.fft.dct(logfpow)
return mfcc
| 11,677
|
def transform(fname, metadata=False):
"""
This function reads a Mission Analysis Orbit file and performs a matrix
transformation on it. Currently only from the Mercury Equatorial frame to
the Earth Equatorial frame.
:param fname: The path to the orbit file.
:type fname: str.
:param metadata: Flag to return the metadata dictionary
:type state: bool.
:returns: numpy.array -- the return code.
"""
furnsh("/Users/jmcaulif/Code/naif/generic/lsk/naif0010.tls")
logging.basicConfig(level=logging.INFO)
mdata = {}
data = {}
with open(fname, 'r') as fh:
for line in fh:
t, x, y, z, vx, vy, vz = [float(x) for x in line.split()]
T = np.array([[0.98159386604468, 0.19098031873327, 0.0],
[-0.16775718426422, 0.86223242348167,
0.47792549108063],
[0.09127436261733, -0.46912873047114,
0.87840037851502]])
Tinv = linalg.inv(T)
r = np.array([[x, y, z]])
v = np.array([[vx, vy, vz]])
r_new = Tinv.dot(r.T).T
v_new = Tinv.dot(v.T).T
x, y, z = r_new[0]
vx, vy, vz = v_new[0]
t = et2utc(t * 86400, 'isoc', 2)
print("{} {:9.2f} {:9.2f} {:9.2f} {:9.6f} {:9.6f} {:9.6f}".
format(t, x, y, z, vx, vy, vz))
fh.close()
if metadata:
return data, mdata
else:
return data
| 11,678
|
def query_repository(repo_name, index_uuid, token, display_results=False):
"""
Display the ids ('subjects') of all items indexed in a repository.
:param repo_name: Textual name of repository to query, corresponds to 'name' field in conf file.
:param index_name: Name of index, mapped by us to a UUID.
:param display_results: Print ids to standard output
:return: List of result ids
"""
LOGGER.info("Querying index %s for repository %s" % (index_uuid, repo_name))
querylimit = 20
headers = {'Authorization' : ('Bearer ' + token), 'Content-Type' : 'application/json'}
queryobj = {"@datatype": "GSearchRequest", "@version": "2016-11-09", "advanced": True, "offset": 0,
"limit": querylimit, "q": "*", "filters": [
{"@datatype": "GFilter", "@version": "2016-11-09", "type": "match_any",
"field_name": "https://frdr\\.ca/schema/1\\.0#origin\\.id", "values": [""]}]}
result_ids = []
queryobj["filters"][0]["values"][0] = repo_name
offset = 0
while True:
r = requests.post('https://' + _api_host + '/v1/index/' + index_uuid + '/search', headers=headers, json=queryobj)
search_results = json.loads(r.text)
results_count = search_results['count']
LOGGER.info("Got %i results" % (results_count))
if results_count == 0:
break
for result in search_results['gmeta']:
result_ids.append(result['subject'])
offset = offset + querylimit
queryobj["offset"] = offset
if display_results:
print('\n'.join(result_ids))
return result_ids
| 11,679
|
def clean_weight_func(value, kpi, scorecard):
"""
ensure that the total weight values are not more than 100%
"""
if scorecard:
try:
scorecard_kpis = ScorecardKPI.objects.filter(scorecard=scorecard)
except ScorecardKPI.DoesNotExist:
pass
else:
if kpi and kpi.id:
# this kpi already exists
# we remove it from the queryset so that its weight
# is not summed up below
scorecard_kpis = scorecard_kpis.exclude(kpi=kpi)
sum_dict = scorecard_kpis.aggregate(
weight_sum=Coalesce(Sum('kpi__weight'), Value(0)))
# sum of all weights in the scorecard, excluding the
# current one
weight_sum = sum_dict['weight_sum']
# ensure that the sums does not go above 100%
if (value + weight_sum) > Decimal(100):
raise forms.ValidationError(
_('The sum of the weights in a Scorecard cannot exceed'
' 100%. Please reduce the value of this weight, or'
' of other weights in this scorecard.'))
| 11,680
|
def power(x1, x2, out=None, where=True, dtype=None):
"""
First array elements raised to powers from second array, element-wise.
Raises each base in `x1` to the positionally-corresponding power in `x2`.
Note:
Numpy arguments `casting`, `order`, `dtype`, `subok`, `signature`, and `extobj` are
not supported.
When `where` is provided, `out` must have a tensor value. `out` is not supported
for storing the result, however it can be used in combination with `where` to set
the value at indices for which `where` is set to False.
On GPU, the supported dtypes are np.float16, and np.float32.
Args:
x1 (Tensor): the bases.
x2 (Tensor): the exponents.
out (Tensor or None, optional): defaults to None.
where (Tensor or None, optional): For any non-default value of type other
than :class:`Tensor` or :class:`None`, the output retains its original value.
This condition is broadcasted over the input. At locations where the
condition is `True`, the out array will be set to the ufunc result.
Elsewhere, the out array will retain its original value. Note that
if an uninitialized out array is created via the default ``out=None``,
locations within it where the condition is `False` will remain
uninitialized.
dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
output Tensor.
Returns:
Tensor or scalar, the bases in `x1` raised to the exponents in `x2`. This
is a scalar if both `x1` and `x2` are scalars.
Raises:
TypeError: if the input is not a tensor.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> x1 = np.full((3, 2), [1, 2]).astype('float32')
>>> x2 = np.full((3, 2), [3, 4]).astype('float32')
>>> output = np.power(x1, x2)
>>> print(output)
[[ 1, 16],
[ 1, 16],
[ 1, 16]]
"""
return _apply_tensor_op(F.tensor_pow, x1, x2, out=out, where=where, dtype=dtype)
| 11,681
|
def _str2bool(s):
"""文字列からboolへの変換"""
return s.lower() in ["true", "t", "yes", "1"]
| 11,682
|
def _generate_nonce(length=42):
"""
Generate an alpha numeric string that is unique for each request.
Twitter used a 42 character alpha-numeric (case-sensitive) string in the
API documentation. However, they note "any approach which produces a
relatively random alphanumeric string should be OK here." I opted not to
use a cryptographically secure source of entropy. `SystemRandom` is
convenient, but it uses file IO to connect to `/dev/urandom`. Adding
`async` machinery here seems like expensive complexity.
"""
return "".join(random.choice(ALPHANUMERIC) for _ in range(length))
| 11,683
|
def env_vars_delete(env_var_id, deployment_name, version_name, assume_yes, quiet):
"""
Delete an environment variable.
\b
- When deployment_name and version_name are provided: the environment variable will be deleted on deployment
version level.
- When a deployment name is provided, but not a version name: the environment variable will be deleted on
deployment level.
- When no deployment_name nor a version name is provided: the environment variable will be deleted on
project level.
"""
project_name = get_current_project(error=True)
if version_name and not deployment_name:
raise Exception("Missing option <deployment_name>")
client = init_client()
confirm_message = "Are you sure you want to delete environment variable "
try:
if version_name:
response = client.deployment_version_environment_variables_get(
project_name=project_name, deployment_name=deployment_name, version=version_name, id=env_var_id
)
if assume_yes or click.confirm(confirm_message + "<%s> of deployment <%s> version <%s> in project <%s>?"
% (response.name, deployment_name, version_name, project_name)):
client.deployment_version_environment_variables_delete(
project_name=project_name, deployment_name=deployment_name, version=version_name, id=env_var_id
)
elif deployment_name:
response = client.deployment_environment_variables_get(
project_name=project_name, deployment_name=deployment_name, id=env_var_id
)
if assume_yes or click.confirm(confirm_message + "<%s> of deployment <%s> in project <%s>?"
% (response.name, deployment_name, project_name)):
client.deployment_environment_variables_delete(
project_name=project_name, deployment_name=deployment_name, id=env_var_id
)
else:
response = client.project_environment_variables_get(project_name=project_name, id=env_var_id)
if assume_yes or click.confirm(confirm_message + "<%s> of project <%s>?" % (response.name, project_name)):
client.project_environment_variables_delete(project_name=project_name, id=env_var_id)
except api.exceptions.ApiException as e:
if hasattr(e, "status") and e.status == 404:
click.echo("%s %s" % (click.style('Warning:', fg='yellow'), WARNING_MSG))
raise e
client.api_client.close()
if not quiet:
click.echo("Environment variable was successfully deleted")
| 11,684
|
def is_expired(image_id: str) -> bool:
"""
Check whether entry is expired (based on timestamp encoded in the image_id)
"""
ts = expiration(image_id)
now = datetime.now(timezone.utc)
if ts is None:
log.debug("Invalid cache entry ID: %s", image_id)
return True
log.debug("entry: %s (expiration ts: %s)", image_id, ts.isoformat())
return ts < now
| 11,685
|
def int_(value):
"""Validate that the config option is an integer.
Automatically also converts strings to ints.
"""
check_not_templatable(value)
if isinstance(value, int):
return value
if isinstance(value, float):
if int(value) == value:
return int(value)
raise Invalid(
f"This option only accepts integers with no fractional part. Please remove the fractional part from {value}"
)
value = string_strict(value).lower()
base = 10
if value.startswith("0x"):
base = 16
try:
return int(value, base)
except ValueError:
# pylint: disable=raise-missing-from
raise Invalid(f"Expected integer, but cannot parse {value} as an integer")
| 11,686
|
def explode(screen):
"""Convert a string representing a screen
display into a list of lists."""
return [list(row) for row in screen.split('\n')]
| 11,687
|
def covert_to_bin():
"""
save dataset with bin format
:return: no return
"""
dataset = lstm_create_dataset(config.preprocess_path, config.batch_size, training=False)
img_path = os.path.join(config.result_path, "00_data")
os.makedirs(img_path)
label_list = []
for i, data in enumerate(dataset.create_dict_iterator(output_numpy=True)):
file_name = "LSTM_data_bs" + str(config.batch_size) + "_" + str(i) + ".bin"
file_path = img_path + "/" + file_name
data['feature'].tofile(file_path)
label_list.append(data['label'])
print('processed {}.'.format(file_name))
# save as npy
np.save(config.result_path + "label_ids.npy", label_list)
# save as txt
sentence_labels = np.array(label_list)
sentence_labels = sentence_labels.reshape(-1, 1).astype(np.int32)
np.savetxt(config.result_path + "labels.txt", sentence_labels)
print("=" * 20, "export bin files finished", "=" * 20)
| 11,688
|
def merge_csv_files(directory, out):
"""\
Merges the CSV files in the provided `directory` into one CSV file.
:param str directory: Path where to find the CSV files
:param str out: Resulting file name.
"""
f = open(out, 'w', encoding='utf-8')
writer = csv.writer(f)
writerow = writer.writerow
writerow(['URL', 'Draft Date', 'Document Number', 'Film Number', 'From', 'Subject', 'TAGS', 'To'])
cnt = 0
for fn in sorted(glob.glob(directory + '*.csv'), key=lambda fn: int(os.path.basename(fn).split('.')[0])):
with open(fn, 'r', encoding='utf-8') as inputfile:
reader = csv.reader(inputfile)
for row in reader:
cnt += 1
writerow(row)
f.close()
return cnt
| 11,689
|
def test_get_system_service_sdp(sros_parser, parsed_system_sdp):
"""
Test extracting system SDPs
"""
result = sros_parser.get_system_service_sdp()
assert result, parsed_system_sdp
| 11,690
|
def basic_info(user, keys):
"""Prints a table of basic user information"""
table = formatting.KeyValueTable(['Title', 'Basic Information'])
table.align['Title'] = 'r'
table.align['Basic Information'] = 'l'
table.add_row(['Id', user.get('id', '-')])
table.add_row(['Username', user.get('username', '-')])
if keys:
for key in user.get('apiAuthenticationKeys'):
table.add_row(['APIKEY', key.get('authenticationKey')])
table.add_row(['Name', "%s %s" % (user.get('firstName', '-'), user.get('lastName', '-'))])
table.add_row(['Email', user.get('email')])
table.add_row(['OpenID', user.get('openIdConnectUserName')])
address = "%s %s %s %s %s %s" % (
user.get('address1'), user.get('address2'), user.get('city'), user.get('state'),
user.get('country'), user.get('postalCode'))
table.add_row(['Address', address])
table.add_row(['Company', user.get('companyName')])
table.add_row(['Created', user.get('createDate')])
table.add_row(['Phone Number', user.get('officePhone')])
if user.get('parentId', False):
table.add_row(['Parent User', utils.lookup(user, 'parent', 'username')])
table.add_row(['Status', utils.lookup(user, 'userStatus', 'name')])
table.add_row(['PPTP VPN', user.get('pptpVpnAllowedFlag', 'No')])
table.add_row(['SSL VPN', user.get('sslVpnAllowedFlag', 'No')])
for login in user.get('unsuccessfulLogins', {}):
login_string = "%s From: %s" % (login.get('createDate'), login.get('ipAddress'))
table.add_row(['Last Failed Login', login_string])
break
for login in user.get('successfulLogins', {}):
login_string = "%s From: %s" % (login.get('createDate'), login.get('ipAddress'))
table.add_row(['Last Login', login_string])
break
return table
| 11,691
|
def iobes_iob(tags):
"""
IOBES -> IOB
"""
new_tags = []
for i, tag in enumerate(tags):
if tag == 'rel':
new_tags.append(tag)
elif tag.split('-')[0] == 'B':
new_tags.append(tag)
elif tag.split('-')[0] == 'I':
new_tags.append(tag)
elif tag.split('-')[0] == 'S':
new_tags.append(tag.replace('S-', 'B-'))
elif tag.split('-')[0] == 'E':
new_tags.append(tag.replace('E-', 'I-'))
elif tag.split('-')[0] == 'O':
new_tags.append(tag)
else:
raise Exception('Invalid format!')
return new_tags
| 11,692
|
def draw_handler(canvas):
"""
Event handler that is responsible for all drawing. It
receives "canvas" object and draws the "Pong" table,
the "moving" ball and the scores of each "Player".
It is also responsible for testing whether the ball
touches/collides with the "gutters" or the "paddles".
"""
# These are (global) numbers; vertical "position" of
# each "paddle".
global paddle1_pos, paddle2_pos
# These are (global) numbers; "score" of each "Player".
global score1, score2
# These are vectors stored as (global) "[x,y]" lists;
# ball "position" and "velocity".
global ball_pos, ball_vel
# This is (global) number; keeps track of the time in
# "seconds".
global seconds
# Draw middle line and "gutters" of "Pong" table.
canvas.draw_line([WIDTH / 2, 0], [WIDTH / 2, HEIGHT], LINE_WIDTH, COLOR)
canvas.draw_line([PAD_WIDTH, 0], [PAD_WIDTH, HEIGHT], LINE_WIDTH, COLOR)
canvas.draw_line([WIDTH - PAD_WIDTH, 0], [WIDTH - PAD_WIDTH, HEIGHT], LINE_WIDTH, COLOR)
# "Postpone" the beginning of new game if "Timer" is
# already running by "reseting" ball "position" at the
# middle of the table.
if timer.is_running():
ball_pos = [WIDTH / 2, HEIGHT / 2]
# Print message about the remaining time until the
# beginning of the new game by referencing the
# global "seconds" counter.
canvas.draw_text("new game will start in " +
str(NEW_GAME_DELAY - seconds) +
" seconds" +
("." * (NEW_GAME_DELAY - seconds)),
[WIDTH // 12, 3 * HEIGHT // 4], 3 * FONT_SIZE // 10, COLOR, FONT_FACE)
else:
# "Timer" has expired; update ball "position" for
# the new game.
ball_pos[0] += ball_vel[0]
ball_pos[1] += ball_vel[1]
# Test whether the ball touches/collides with the left
# "gutter" (offset from the left edge of the "canvas"
# by the width of the "paddle").
if ball_pos[0] <= (BALL_RADIUS + PAD_WIDTH):
# Check whether the ball is actually striking left
# "paddle" when it touches left "gutter". If so,
# reflect the ball back into play; ball's "velocity"
# increased by the "acceleration" configured.
if ((paddle1_pos - HALF_PAD_HEIGHT) <= ball_pos[1] <= (paddle1_pos + HALF_PAD_HEIGHT)):
ball_vel[0] = -ball_vel[0] * BALL_VELOCITY_ACCELERATION
else:
# Ball touched "gutter". Respawn the ball in
# the center of the table headed towards the
# opposite "gutter" and of course update score
# of "Player" 2 (right) by the "points"
# configured.
score2 += POINTS
# Start a game of "Pong". Start also a "Timer"
# to "postpone" the beginning of the new game.
if not timer.is_running():
timer.start()
spawn_ball(RIGHT)
# Test whether the ball touches/collides with the right
# "gutter" (offset from the right edge of the "canvas"
# by the width of the "paddle").
elif ball_pos[0] >= ((WIDTH - 1) - BALL_RADIUS - PAD_WIDTH):
# Check whether the ball is actually striking right
# "paddle" when it touches right "gutter". If so,
# reflect the ball back into play; ball's "velocity"
# increased by the "acceleration" configured.
if ((paddle2_pos - HALF_PAD_HEIGHT) <= ball_pos[1] <= (paddle2_pos + HALF_PAD_HEIGHT)):
ball_vel[0] = -ball_vel[0] * BALL_VELOCITY_ACCELERATION
else:
# Ball touched "gutter". Respawn the ball in
# the center of the table headed towards the
# opposite "gutter" and of course update score
# of "Player" 1 (left) by the "points"
# configured.
score1 += POINTS
# Start a game of "Pong". Start also a "Timer"
# to "postpone" the beginning of the new game.
if not timer.is_running():
timer.start()
spawn_ball(LEFT)
# Collide and reflect off of top side of the "canvas".
elif ball_pos[1] <= BALL_RADIUS:
ball_vel[1] = -ball_vel[1]
# Collide and reflect off of bottom side of the "canvas".
elif ball_pos[1] >= ((HEIGHT - 1) - BALL_RADIUS):
ball_vel[1] = -ball_vel[1]
# Draw a ball moving across the "Pong" table.
canvas.draw_circle(ball_pos, BALL_RADIUS, 2 * LINE_WIDTH, COLOR, COLOR)
# Update paddle's vertical "position", by
# referencing the two global variables that contain the
# vertical "velocities" of the "paddle". Keep "paddle"
# on the screen by calling the proper "helper" function.
if keep_paddle_on_screen(paddle1_pos, paddle1_vel):
paddle1_pos += paddle1_vel
if keep_paddle_on_screen(paddle2_pos, paddle2_vel):
paddle2_pos += paddle2_vel
# Draw left and right "paddles" in their respective
# "gutters".
canvas.draw_polygon([[0, paddle1_pos - HALF_PAD_HEIGHT],
[PAD_WIDTH, paddle1_pos - HALF_PAD_HEIGHT],
[PAD_WIDTH, paddle1_pos + HALF_PAD_HEIGHT],
[0, paddle1_pos + HALF_PAD_HEIGHT]],
LINE_WIDTH, COLOR, COLOR)
canvas.draw_polygon([[WIDTH - PAD_WIDTH, paddle2_pos - HALF_PAD_HEIGHT],
[WIDTH , paddle2_pos - HALF_PAD_HEIGHT],
[WIDTH, paddle2_pos + HALF_PAD_HEIGHT],
[WIDTH - PAD_WIDTH, paddle2_pos + HALF_PAD_HEIGHT]],
LINE_WIDTH, COLOR, COLOR)
# Draw scores;
# but first get the width of the "score" text in pixels
# for each "Player"; useful in (later) computing the
# position to draw the "score" text - centered justified
# on the "canvas field" of each player.
score_textwidth_in_px = frame.get_canvas_textwidth(str(score1), FONT_SIZE, FONT_FACE)
score_point_x = (WIDTH // 4) - (score_textwidth_in_px // 2)
score_point_y = (HEIGHT // 4)
canvas.draw_text(str(score1), [score_point_x, score_point_y], FONT_SIZE, COLOR, FONT_FACE)
score_textwidth_in_px = frame.get_canvas_textwidth(str(score2), FONT_SIZE, FONT_FACE)
score_point_x = (3 * WIDTH // 4) - (score_textwidth_in_px // 2)
score_point_y = (HEIGHT // 4)
canvas.draw_text(str(score2), [score_point_x, score_point_y], FONT_SIZE, COLOR, FONT_FACE)
return None
| 11,693
|
def tf_idf(df, vocab):
"""[summary]
https://towardsdatascience.com/natural-language-processing-feature-engineering-using-tf-idf-e8b9d00e7e76
Args:
docs ([type]): [description]
"""
docs = []
for text in df['text'].tolist():
docs += [text]
vectorizer = TfidfVectorizer(tokenizer=token_list, lowercase=False, vocabulary=vocab)
vectors = vectorizer.fit_transform(docs)
feature_names = vectorizer.get_feature_names()
dense = vectors.todense()
denselist = dense.tolist()
tfidf_matrix = pd.DataFrame(denselist, columns=feature_names)
return tfidf_matrix
| 11,694
|
def runtest_teardown(item: IPyNbFile, nextitem: typing.Optional[IPyNbFile]) -> None:
"""Propagate cell failure."""
# Do the normal teardown
item.teardown()
# Inform next cell of the notebook of failure of any previous cells
if hasattr(item, "_force_skip"):
if nextitem is not None and nextitem.name != "Cell 0":
nextitem._force_skip = True
| 11,695
|
def test_tree_2_nodes_left_unbalanced(one_t):
"""Tree with 2 nodes and left sided should return balance of 1."""
one_t.insert(9)
assert one_t.balance() == 1
| 11,696
|
def setup_checkpoint_dir(cfg, args, phase):
"""Create checkpoint directory
# ROUND2-TODO: let's make this checkpoint director way more involved. Specific to user, to model, to config name, etc.
"""
root_dir = os.path.join(
cfg["checkpoints_dir"], cfg["model"]["name"], args.config_name
)
ckpt_dir = os.path.join(root_dir, "checkpoints")
if not os.path.exists(ckpt_dir):
if phase == "train":
os.makedirs(ckpt_dir)
else:
raise FileNotFoundError("Checkpoint directory doesn't exist!")
return ckpt_dir, root_dir
| 11,697
|
def get_activation_func(activation):
"""Turns a string activation function name into a function.
"""
if isinstance(activation, string_types):
# Get the activation function.
activation = activation.lower()
if activation == "tanh":
activation_func = tanh
elif activation == "abstanh":
activation_func = abs_tanh
elif activation in ["sig", "sigmoid"]:
activation_func = sigmoid
elif activation in ["rec", "relu"]:
activation_func = rectify
elif activation in ["prelu_shelf"]:
activation_func = parametric_flat_relu
elif activation == "relu_max":
activation_func = rectify_max # For performance comparisons with abs version of rectify
elif activation in ["rec_para", "prelu"]:
activation_func = parametric_rectifier
elif activation == "maxout":
activation_func = maxout
elif activation == "linear":
activation_func = linear
else:
raise ValueError("Unrecognized activation: {}".format(activation))
else:
activation_func = activation
return activation_func
| 11,698
|
def number_of_cores():
"""
number_of_cores()
Detect the number of cores in this system.
"""
# Linux, Unix and MacOS:
if hasattr(os, "sysconf"):
if "SC_NPROCESSORS_ONLN" in os.sysconf_names:
# Linux & Unix:
ncpus = os.sysconf("SC_NPROCESSORS_ONLN")
if isinstance(ncpus, int) and ncpus > 0:
return ncpus
else: # OSX:
return int(os.popen2("sysctl -n hw.ncpu")[1].read())
# Windows:
if "NUMBER_OF_PROCESSORS" in os.environ:
ncpus = int(os.environ["NUMBER_OF_PROCESSORS"])
if ncpus > 0:
return ncpus
return 1
| 11,699
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.