content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def _load_from_url(url: str,
chinese_only=False) -> Mapping[str, DictionaryEntry]:
"""Reads the dictionary from a local file
"""
logging.info('Opening the dictionary remotely')
with urllib.request.urlopen(url) as dict_file:
data = dict_file.read().decode('utf-8')
return _load_dictionary(data.splitlines(), chinese_only)
| 5,342,100
|
def get_images():
"""
Canned response for glance images list call
"""
return images
| 5,342,101
|
def firstUniqChar(self, s):
"""
:type s: str
:rtype: int
"""
letters = 'abcdefghijklmnopqrstuvwxyz'
index = [s.index(l) for l in letters if s.count(l) == 1]
return min(index) if len(index) > 0 else -1
| 5,342,102
|
def get_form_class_for_class(klass):
"""
A helper function for creating a model form class for a model on the fly. This is used with models (usually
part of an inheritance hierarchy) which define a function **get_editable_fields** which returns an iterable
of the field names which should be placed in the form.
"""
meta_dict = dict(model=klass)
if hasattr(klass, 'get_editable_fields'):
meta_dict['fields'] = klass.get_editable_fields()
meta = type('Meta', (),meta_dict)
modelform_class = type('modelform', (forms.ModelForm,), {"Meta": meta})
return modelform_class
| 5,342,103
|
def dict_remove_key(d, key, default=None):
"""
removes a key from dict __WITH__ side effects
Returns the found value if it was there (default=None). It also modifies the original dict.
"""
return d.pop(key, default)
| 5,342,104
|
def write_pickle(obj, path, makedir=True):
"""Write object in Python pickle format."""
# TODO: use normal pickling by implementing pickling protocol for Network
# class http://docs.python.org/library/pickle.html#the-pickle-protocol
# TODO: find out origin of maximum recursion depth problem, hack solution:
sys.setrecursionlimit(6000)
try:
os.makedirs(os.path.split(path)[0])
except OSError, e:
if e.errno!=errno.EEXIST and e.filename!='':
raise
fh = _get_fh(str(path), mode='wb')
pickle.dump(obj, fh, pickle.HIGHEST_PROTOCOL)
fh.close()
logger.info('instance of %s saved in %s' % (str(obj.__class__), path))
| 5,342,105
|
def momentum(snap: Snap, mask: Optional[ndarray] = None) -> ndarray:
"""Calculate the total momentum vector on a snapshot.
Parameters
----------
snap
The Snap object.
mask
Mask the particle arrays. Default is None.
Returns
-------
ndarray
The total momentum as a vector (px, py, pz).
"""
mass: ndarray = snap['mass']
vel: ndarray = snap['velocity']
if mask is None:
return (mass * vel).sum(axis=0)
return (mass * vel)[mask].sum(axis=0)
| 5,342,106
|
def limit_epochs(tensor, num_epochs=None, name=None):
"""Returns tensor num_epochs times and then raises an OutOfRange error.
Args:
tensor: Any Tensor.
num_epochs: An integer (optional). If specified, limits the number
of steps the output tensor may be evaluated.
name: A name for the operations (optional).
Returns:
tensor or OutOfRange.
"""
if num_epochs is None:
return tensor
if num_epochs <= 0:
raise ValueError("num_epochs must be > 0 not %d." % num_epochs)
with ops.op_scope([tensor], name, "limit_epochs") as name:
zero64 = constant_op.constant(0, dtype=types.int64)
epochs = variables.Variable(zero64, name="epochs")
counter = epochs.count_up_to(num_epochs)
with ops.control_dependencies([counter]):
return array_ops.identity(tensor, name=name)
| 5,342,107
|
def init_app(app):
"""Register database functions with app."""
app.teardown_appcontext(close_db)
app.cli.add_command(init_db_command)
app.cli.add_command(mock_db_command)
| 5,342,108
|
def hex_machine(emit):
"""
State machine for hex escaped characters in strings
Args:
emit (callable): callback for parsed value (number)
Returns:
callable: hex-parsing state machine
"""
left = 4
num = 0
def _hex(byte_data):
nonlocal num, left
if 0x30 <= byte_data <= 0x39: # 0-9
i = byte_data - 0x30
elif 0x61 <= byte_data <= 0x66: # a-f
i = byte_data - 0x57
elif 0x41 <= byte_data <= 0x46: # A-F
i = byte_data - 0x37
else:
raise Exception(
"Invalid hex char in string hex escape: " + hex(byte_data))
left -= 1
num |= i << (left * 4)
if left:
return _hex
return emit(num)
return _hex
| 5,342,109
|
def write_json_to_file(file: str, data: Union[dict, list]):
"""
Creates a manifest to the file containing it's sha256sum and the installation result
:param file: File to write to
:param data: Dict to write
:return:
"""
with open(file, "w", encoding="utf-8") as file_handle:
json.dump(data, file_handle, ensure_ascii=False)
| 5,342,110
|
def isbns(self, key, value):
"""Translates isbns fields."""
_isbns = self.get("identifiers", [])
for v in force_list(value):
subfield_u = clean_val("u", v, str)
isbn = {
"value": clean_val("a", v, str) or clean_val("z", v, str),
"scheme": "ISBN",
}
if not isbn["value"]:
raise IgnoreKey("identifiers")
if subfield_u:
volume = re.search(r"(\(*v[.| ]*\d+.*\)*)", subfield_u)
if volume:
volume = volume.group(1)
subfield_u = subfield_u.replace(volume, "").strip()
existing_volume = self.get("volume")
if existing_volume:
raise ManualImportRequired(subfield="u")
self["volume"] = volume
# WARNING! vocabulary document_identifiers_materials
material = mapping(
IDENTIFIERS_MEDIUM_TYPES,
subfield_u,
field=key, subfield="u"
)
if material:
isbn.update({"material": material})
if isbn not in _isbns:
_isbns.append(isbn)
return _isbns
| 5,342,111
|
def create_user(name, age, occupation):
"""
Function to post a new user.
Parameters
----------
name : str
Name of the user.
age : int
Age of the user.
occupation : str
Occupation of the user.
Returns
-------
message : str
request_status : int
HTTP response status code.
`400` "User already exists"
`201` "Created User `name`"
Examples
--------
>>> create_user(name = "micha", age= 28, occupation = 'PhD Student')
"Created User micha", 201
"""
# create a user
user = dict(
name = name,
age = age,
occupation = occupation,
)
# post it (as shortcut)
resp = requests.post("{}/user/{}".format(server,name), json=user)
if resp.status_code == 400:
return "User already exists", resp.status_code
elif resp.status_code == 201:
return "Created User {}".format(name), resp.status_code
else:
raise ApiError("Some unexpected ERROR code: {}".format(resp.status_code))
| 5,342,112
|
def gauss_dataset(dim, size=1e6):
"""
Creates a dataloader of randomly sampled gaussian noise
The returned dataloader produces batsize batches of dim-sized vectors
"""
def samplef(bsize):
return torch.randn(bsize, dim)
ret = SampleDataset(samplef, size=size)
return ret
| 5,342,113
|
def nightwatch_environment(request): # convenience spelling
"""Run tests against this environment (staging, production, etc.)"""
return request.config.getoption('--nightwatch-environment')
| 5,342,114
|
def get_hrs(pid_arg):
"""
Pulls all recorded heart rate data for a patient from the database
Args:
pid_arg: patient_id to pull heart rate data for
Returns:
list: containing all recorded heart rates
"""
u5 = User.objects.raw({"_id": pid_arg}).first()
return u5.heart_rate
| 5,342,115
|
def resolve(match, *objects):
"""Given an array of objects and a regex match, this function returns the first
matched group if it exists in one of the objects, otherwise returns the orginial
fully matches string by the regex.
Example: if regex = \\\.([a-z]) and string = test\.abc, then
the match = {group0: \.abc, group1: abc}. Assuimg one object:
- obj = {abc: def}, then we return 'def'
- obj = {test: value}, then we return \.abc
Args:
objects (array[dict]): the array of objects we use to look up the key in match.group(1)
match: the regex match object
Returns:
str: the value of the matched group(1) in the first object found if exists, otherwise
returns the fully matched string.
"""
for obj in objects:
if obj is not None and match.group(1) in obj:
return str(obj[match.group(1)])
return match.group(0)
| 5,342,116
|
def Collider_CollisionGroupsWorkflow():
# type: () -> None
"""
Summary:
Runs an automated test to ensure PhysX collision groups dictate whether collisions happen or not.
The test has two phases (A and B) for testing collision groups under different circumstances. Phase A
is run first and upon success Phase B starts.
Level Description:
Entities can be divided into 2 groups for the two phases, A and B. Each phase has identical entities with exception
to Terrain, where Terrain_A has a collision group/layer set for demo_group1/demo1 and Terrain_B has a collision
group/layer set for demo_group2/demo2.
Each Phase has two boxes, Box_1 and Box_2, where each box has it's collision group/layer set to it's number
(1 or 2). Each box is positioned just above the Terrain with gravity enabled.
All entities for Phase B are deactivated by default. If Phase A is setup and executed successfully it's
entities are deactivated and Phase B's entities are activated and validated before running the Phase B test.
Expected behavior:
When Phase A starts, it's two boxes should fall toward the terrain. Once the boxes' behavior is validated the
entities from Phase A are deactivated and Phase B's entities are activated. Like in Phase A, the boxes in Phase B
should fall towards the terrain. If all goes as expected Box_1_A and Box_2_B should collide with teh terrain, and
Box_2A and Box_1_B should fall through the terrain.
Test Steps:
0) [Define helper classes and functions]
1) Load the level
2) Enter game mode
3) Retrieve and validate entities
4) Phase A
a) set up
b) execute test
c) log results (deactivate Phase A entities)
5) Phase B
a) set up (activate Phase B entities)
b) execute test
c) log results
6) close editor
Note:
- This test file must be called from the Open 3D Engine Editor command terminal
- Any passed and failed tests are written to the Editor.log file.
Parsing the file or running a log_monitor are required to observe the test results.
- The level for this test uses two PhysX Terrains and must be run with cmdline argument "-autotest_mode"
to suppress the warning for having multiple terrains.
:return: None
"""
import os
import sys
from editor_python_test_tools.utils import Report
from editor_python_test_tools.utils import TestHelper as helper
import azlmbr.legacy.general as general
import azlmbr.bus
import azlmbr
# ******* Helper Classes ********
# Phase A's test results
class PhaseATestData:
total_results = 2
box_1_collided = False
box_1_fell_through = True
box_2_collided = False
box_2_fell_through = False
box_1 = None
box_2 = None
terrain = None
box_1_pos = None
box_2_pos = None
terrain_pos = None
@staticmethod
# Quick check for validating results for Phase A
def valid():
return (
PhaseATestData.box_1_collided
and PhaseATestData.box_2_fell_through
and not PhaseATestData.box_1_fell_through
and not PhaseATestData.box_2_collided
)
# Phase B's test results
class PhaseBTestData:
total_results = 2
box_1_collided = False
box_1_fell_through = False
box_2_collided = False
box_2_fell_through = True
box_1 = None
box_2 = None
terrain = None
box_1_pos = None
box_2_pos = None
terrain_pos = None
@staticmethod
# Quick check for validating results for Phase B
def valid():
return (
not PhaseBTestData.box_1_collided
and not PhaseBTestData.box_2_fell_through
and PhaseBTestData.box_1_fell_through
and PhaseBTestData.box_2_collided
)
# **** Helper Functions ****
# ** Validation helpers **
# Attempts to validate an entity based on the name parameter
def validate_entity(entity_name, msg_tuple):
# type: (str, (str, str)) -> EntityId
entity_id = general.find_game_entity(entity_name)
Report.critical_result(msg_tuple, entity_id.IsValid())
return entity_id
# Attempts to retrieve an entity's initial position and logs result
def validate_initial_position(entity_id, msg_tuple):
# type: (EntityId, (str, str)) -> azlmbr.math.Vector3
# Attempts to validate and return the entity's initial position.
# logs the result to Report.result() using the tuple parameter
pos = azlmbr.components.TransformBus(azlmbr.bus.Event, "GetWorldTranslation", entity_id)
valid = not (pos is None or pos.IsZero())
entity_name = azlmbr.entity.GameEntityContextRequestBus(azlmbr.bus.Broadcast, "GetEntityName", entity_id)
Report.critical_result(msg_tuple, valid)
Report.info_vector3(pos, "{} initial position:".format(entity_name))
return pos
# ** Phase completion checks checks **
# Checks if we are done collecting data for phase A
def done_collecting_results_a():
# type: () -> bool
# Update positions
PhaseATestData.box_1_pos = azlmbr.components.TransformBus(
azlmbr.bus.Event, "GetWorldTranslation", PhaseATestData.box_1
)
PhaseATestData.box_2_pos = azlmbr.components.TransformBus(
azlmbr.bus.Event, "GetWorldTranslation", PhaseATestData.box_2
)
# Check for boxes to fall through terrain
if PhaseATestData.box_1_pos.z < PhaseATestData.terrain_pos.z:
PhaseATestData.box_1_fell_through = True
else:
PhaseATestData.box_1_fell_through = False
if PhaseATestData.box_2_pos.z < PhaseATestData.terrain_pos.z:
PhaseATestData.box_2_fell_through = True
else:
PhaseATestData.box_2_fell_through = False
results = 0
if PhaseATestData.box_1_collided or PhaseATestData.box_1_fell_through:
results += 1
if PhaseATestData.box_2_collided or PhaseATestData.box_2_fell_through:
results += 1
return results == PhaseATestData.total_results
# Checks if we are done collecting data for phase B
def done_collecting_results_b():
# type: () -> bool
# Update positions
PhaseBTestData.box_1_pos = azlmbr.components.TransformBus(
azlmbr.bus.Event, "GetWorldTranslation", PhaseBTestData.box_1
)
PhaseBTestData.box_2_pos = azlmbr.components.TransformBus(
azlmbr.bus.Event, "GetWorldTranslation", PhaseBTestData.box_2
)
# Check for boxes to fall through terrain
if PhaseBTestData.box_1_pos.z < PhaseBTestData.terrain_pos.z:
PhaseBTestData.box_1_fell_through = True
else:
PhaseBTestData.box_1_fell_through = False
if PhaseBTestData.box_2_pos.z < PhaseBTestData.terrain_pos.z:
PhaseBTestData.box_2_fell_through = True
else:
PhaseBTestData.box_2_fell_through = False
results = 0
if PhaseBTestData.box_1_collided or PhaseBTestData.box_1_fell_through:
results += 1
if PhaseBTestData.box_2_collided or PhaseBTestData.box_2_fell_through:
results += 1
return results == PhaseBTestData.total_results
# **** Event Handlers ****
# Collision even handler for Phase A
def on_collision_begin_a(args):
# type: ([EntityId]) -> None
collider_id = args[0]
if (not PhaseATestData.box_1_collided) and PhaseATestData.box_1.Equal(collider_id):
Report.info("Box_1_A / Terrain_A collision detected")
PhaseATestData.box_1_collided = True
if (not PhaseATestData.box_2_collided) and PhaseATestData.box_2.Equal(collider_id):
Report.info("Box_2_A / Terrain_A collision detected")
PhaseATestData.box_2_collided = True
# Collision event handler for Phase B
def on_collision_begin_b(args):
# type: ([EntityId]) -> None
collider_id = args[0]
if (not PhaseBTestData.box_1_collided) and PhaseBTestData.box_1.Equal(collider_id):
Report.info("Box_1_B / Terrain_B collision detected")
PhaseBTestData.box_1_collided = True
if (not PhaseBTestData.box_2_collided) and PhaseBTestData.box_2.Equal(collider_id):
Report.info("Box_2_B / Terrain_B collision detected")
PhaseBTestData.box_2_collided = True
TIME_OUT = 1.5
# 1) Open level
helper.init_idle()
helper.open_level("Physics", "Collider_CollisionGroupsWorkflow")
# 2) Enter game mode
helper.enter_game_mode(Tests.enter_game_mode)
# 3) Retrieve and validate entities
PhaseATestData.box_1 = validate_entity("Box_1_A", Tests.box_1_a_valid)
PhaseATestData.box_2 = validate_entity("Box_2_A", Tests.box_2_a_valid)
PhaseATestData.terrain = validate_entity("Terrain_Entity_A", Tests.terrain_a_valid)
PhaseBTestData.box_1 = validate_entity("Box_1_B", Tests.box_1_b_valid)
PhaseBTestData.box_2 = validate_entity("Box_2_B", Tests.box_2_b_valid)
PhaseBTestData.terrain = validate_entity("Terrain_Entity_B", Tests.terrain_b_valid)
# Make sure Phase B objects are disabled
azlmbr.entity.GameEntityContextRequestBus(azlmbr.bus.Broadcast, "DeactivateGameEntity", PhaseBTestData.box_1)
azlmbr.entity.GameEntityContextRequestBus(azlmbr.bus.Broadcast, "DeactivateGameEntity", PhaseBTestData.box_2)
azlmbr.entity.GameEntityContextRequestBus(azlmbr.bus.Broadcast, "DeactivateGameEntity", PhaseBTestData.terrain)
# 4) *********** Phase A *****************
# 4.a) ** Set Up **
Report.info(" **** Beginning Phase A **** ")
# Locate Phase A entities
PhaseATestData.box_1_pos = validate_initial_position(PhaseATestData.box_1, Tests.box_1_a_pos_found)
PhaseATestData.box_2_pos = validate_initial_position(PhaseATestData.box_2, Tests.box_2_a_pos_found)
PhaseATestData.terrain_pos = validate_initial_position(PhaseATestData.terrain, Tests.terrain_a_pos_found)
# Assign Phase A event handler
handler_a = azlmbr.physics.CollisionNotificationBusHandler()
handler_a.connect(PhaseATestData.terrain)
handler_a.add_callback("OnCollisionBegin", on_collision_begin_a)
# 4.b) Execute Phase A
if not helper.wait_for_condition(done_collecting_results_a, TIME_OUT):
Report.info("Phase A timed out: make sure the level is set up properly or adjust time out threshold")
# 4.c) Log results for Phase A
Report.result(Tests.box_1_a_did_collide_with_terrain, PhaseATestData.box_1_collided)
Report.result(Tests.box_1_a_did_not_pass_through_terrain, not PhaseATestData.box_1_fell_through)
Report.info_vector3(PhaseATestData.box_1_pos, "Box_1_A's final position:")
Report.result(Tests.box_2_a_did_pass_through_terrain, PhaseATestData.box_2_fell_through)
Report.result(Tests.box_2_a_did_not_collide_with_terrain, not PhaseATestData.box_2_collided)
Report.info_vector3(PhaseATestData.box_2_pos, "Box_2_A's final position:")
if not PhaseATestData.valid():
Report.info("Phase A failed test")
# Deactivate entities for Phase A
azlmbr.entity.GameEntityContextRequestBus(azlmbr.bus.Broadcast, "DeactivateGameEntity", PhaseATestData.box_1)
azlmbr.entity.GameEntityContextRequestBus(azlmbr.bus.Broadcast, "DeactivateGameEntity", PhaseATestData.box_2)
azlmbr.entity.GameEntityContextRequestBus(azlmbr.bus.Broadcast, "DeactivateGameEntity", PhaseATestData.terrain)
# 5) *********** Phase B *****************
# 5.a) ** Set Up **
Report.info(" *** Beginning Phase B *** ")
# Activate entities for Phase B
azlmbr.entity.GameEntityContextRequestBus(azlmbr.bus.Broadcast, "ActivateGameEntity", PhaseBTestData.box_1)
azlmbr.entity.GameEntityContextRequestBus(azlmbr.bus.Broadcast, "ActivateGameEntity", PhaseBTestData.box_2)
azlmbr.entity.GameEntityContextRequestBus(azlmbr.bus.Broadcast, "ActivateGameEntity", PhaseBTestData.terrain)
# Initialize positions for Phase B
PhaseBTestData.box_1_pos = validate_initial_position(PhaseBTestData.box_1, Tests.box_1_b_pos_found)
PhaseBTestData.box_2_pos = validate_initial_position(PhaseBTestData.box_2, Tests.box_2_b_pos_found)
PhaseBTestData.terrain_pos = validate_initial_position(PhaseBTestData.terrain, Tests.terrain_b_pos_found)
# Assign Phase B event handler
handler_b = azlmbr.physics.CollisionNotificationBusHandler()
handler_b.connect(PhaseBTestData.terrain)
handler_b.add_callback("OnCollisionBegin", on_collision_begin_b)
# 5.b) Execute Phase B
if not helper.wait_for_condition(done_collecting_results_b, TIME_OUT):
Report.info("Phase B timed out: make sure the level is set up properly or adjust time out threshold")
# 5.c) Log results for Phase B
Report.result(Tests.box_1_b_did_not_collide_with_terrain, not PhaseBTestData.box_1_collided)
Report.result(Tests.box_1_b_did_pass_through_terrain, PhaseBTestData.box_1_fell_through)
Report.info_vector3(PhaseBTestData.box_1_pos, "Box_1_B's final position:")
Report.result(Tests.box_2_b_did_not_pass_through_terrain, not PhaseBTestData.box_2_fell_through)
Report.result(Tests.box_2_b_did_collide_with_terrain, PhaseBTestData.box_2_collided)
Report.info_vector3(PhaseBTestData.box_2_pos, "Box_2_B's final position:")
if not PhaseBTestData.valid():
Report.info("Phase B failed test")
# 6) Exit Game mode
helper.exit_game_mode(Tests.exit_game_mode)
Report.info(" **** TEST FINISHED ****")
| 5,342,117
|
def _timestamp(zone="Europe/Istanbul") -> int:
"""Return timestamp of now."""
return int(time.mktime(datetime.now(timezone(zone)).timetuple()))
| 5,342,118
|
def sumReplacements(tex, functionName):
"""
Search tex file for the keyString "\\apisummary{" and its matching
parenthesis. All text between will be processed such that there are no
consecutive spaces, no tabs, and unnecessary "\\n". The text will then
have all the macros replaced and put back into its corresponding place
in the text file.
The strings "./ sectionStart" and "./ sectionEnd" are appending at the
beginning and end of the processed text, respectively, for differenti-
ation between the text with sections and "dangling text".
These strings will not appear in the manpage as any line that begins
with a period will be treated as a comment.
"""
startOfText = tex.find("\\apisummary{")
endOfText = findMatchingBrace(tex, tex.find("{", startOfText))
sectionText = cleanText(tex[startOfText:endOfText])
tex = tex[:startOfText] + \
"./ sectionStart\n" + \
".SH NAME\n" + functionName + " \- " \
+ sectionText + "\n" + \
"./ sectionEnd\n" + tex[endOfText+1:]
tex = tex.replace("\\apisummary{", "")
return tex
| 5,342,119
|
def parse_args(args):
"""
Parse command line parameters
Parameters
----------
args : list
command line parameters as list of strings
Returns
-------
argparse.Namespace : obj
command line parameters namespace
"""
parser = argparse.ArgumentParser(
description="Create mesh and linear system of a PDE via Galerkins method."
)
parser.add_argument(
"-f",
"--file",
dest="data_path",
help="filepath to save data at",
default="../../data/Galerkins_method/",
type=str,
)
parser.add_argument(
"-r",
"--resolutions",
dest="resolutions",
help="Mesh resolutions.",
default=[6, 128],
type=list,
)
return parser.parse_args(args)
| 5,342,120
|
def rle(seq, keyfunc=None):
"""
Run-length encode a sequence of values.
This implements a generator which yields tuples `(symbol, begin,
end)`.
`begin` and `end` are indices into the passed sequence, begin
inclusive and end exclusive.
:param iterable seq:
:param function keyfunc: optional, a function to specify how
values should be grouped. Defaults to lambda x: x.
"""
idx = None
for idx, symbol in enumerate(seq):
if idx == 0:
begin_symbol = symbol
begin_idx = idx
else:
if keyfunc is None:
unequal = symbol != begin_symbol
else:
unequal = keyfunc(symbol) != keyfunc(begin_symbol)
if unequal:
yield (begin_symbol, begin_idx, idx)
begin_symbol = symbol
begin_idx = idx
if idx is not None:
yield (begin_symbol, begin_idx, idx + 1)
| 5,342,121
|
def path(path: Union[str, List[str]], *, disable_stage_removal: Optional[bool] = False):
"""Validate the path in the event against the given path(s).
The following APIErrorResponse subclasses are used:
PathNotFoundError: When the path doesn't match.
Args:
path: A path literal or list of path literals to validate against.
disable_stage_removal (bool): preserve the original path with stage.
"""
return _get_decorator(
validate_path, path=path, disable_stage_removal=disable_stage_removal
)
| 5,342,122
|
def get_last_id(statefile):
"""Retrieve last status ID from a file"""
debug_print('Getting last ID from %s' % (statefile,))
try:
f = open(statefile,'r')
id = int(f.read())
f.close()
except IOError:
debug_print('IOError raised, returning zero (0)')
return 0
debug_print('Got %d' % (id,))
return id
| 5,342,123
|
def inference_fn(trained_model,
remove,
fixed_params,
overwrite_fixed_params=False,
days_of_purchases=710,
days_of_clicks=710,
lifespan_of_items=710,
**params):
"""
Function to run inference inside the hyperparameter loop and calculate metrics.
Parameters
----------
trained_model:
Model trained during training of hyperparameter loop.
remove:
Percentage of data removed. See src.utils_data for more details.
fixed_params:
All parameters used during training of hyperparameter loop. See src.utils_data for more details.
overwrite_fixed_params:
If true, training parameters will overwritten by the parameters below. Can be useful if need to test the model
on different parameters, e.g. that includes older clicks or purchases.
days_of_purchases, days_of_clicks, lifespan_of_items:
All parameters that can overwrite the training parameters. Only useful if overwrite_fixed_params is True.
params:
All other parameters used during training.
Returns
-------
recall:
Recall on the test set. Relevant to compare with recall computed on hyperparametrization test set (since
parameters like 'remove' and all overwritable parameters are different)
Saves to file
-------------
Metrics computed on the test set.
"""
# Import parameters
if isinstance(fixed_params, str):
path = fixed_params
fixed_params = read_data(path)
class objectview(object):
def __init__(self, d):
self.__dict__ = d
fixed_params = objectview(fixed_params)
if 'params' in params.keys():
# if isinstance(params['params'], str):
path = params['params']
params = read_data(path)
# Initialize data
data_paths = DataPaths()
fixed_params.remove = remove
if overwrite_fixed_params:
fixed_params.days_of_purchases = days_of_purchases
fixed_params.days_of_clicks = days_of_clicks
fixed_params.lifespan_of_items = lifespan_of_items
data = DataLoader(data_paths, fixed_params)
# Get graph
valid_graph = create_graph(
data.graph_schema,
)
valid_graph = assign_graph_features(valid_graph,
fixed_params,
data,
**params,
)
dim_dict = {'user': valid_graph.nodes['user'].data['features'].shape[1],
'item': valid_graph.nodes['item'].data['features'].shape[1],
'out': params['out_dim'],
'hidden': params['hidden_dim']}
all_sids = None
if 'sport' in valid_graph.ntypes:
dim_dict['sport'] = valid_graph.nodes['sport'].data['features'].shape[1]
all_sids = np.arange(valid_graph.num_nodes('sport'))
# get training and test ids
(
train_graph,
train_eids_dict,
valid_eids_dict,
subtrain_uids,
valid_uids,
test_uids,
all_iids,
ground_truth_subtrain,
ground_truth_valid,
all_eids_dict
) = train_valid_split(
valid_graph,
data.ground_truth_test,
fixed_params.etype,
fixed_params.subtrain_size,
fixed_params.valid_size,
fixed_params.reverse_etype,
fixed_params.train_on_clicks,
fixed_params.remove_train_eids,
params['clicks_sample'],
params['purchases_sample'],
)
(
edgeloader_train,
edgeloader_valid,
nodeloader_subtrain,
nodeloader_valid,
nodeloader_test
) = generate_dataloaders(valid_graph,
train_graph,
train_eids_dict,
valid_eids_dict,
subtrain_uids,
valid_uids,
test_uids,
all_iids,
fixed_params,
num_workers,
all_sids,
embedding_layer=params['embedding_layer'],
n_layers=params['n_layers'],
neg_sample_size=params['neg_sample_size'],
)
num_batches_test = math.ceil((len(test_uids) + len(all_iids)) / fixed_params.node_batch_size)
# Import model
if isinstance(trained_model, str):
path = trained_model
trained_model = ConvModel(valid_graph,
params['n_layers'],
dim_dict,
params['norm'],
params['dropout'],
params['aggregator_type'],
fixed_params.pred,
params['aggregator_hetero'],
params['embedding_layer'],
)
trained_model.load_state_dict(torch.load(path, map_location=device))
if cuda:
trained_model = trained_model.to(device)
trained_model.eval()
with torch.no_grad():
embeddings = get_embeddings(valid_graph,
params['out_dim'],
trained_model,
nodeloader_test,
num_batches_test,
cuda,
device,
params['embedding_layer'],
)
for ground_truth in [data.ground_truth_purchase_test, data.ground_truth_test]:
precision, recall, coverage = get_metrics_at_k(
embeddings,
valid_graph,
trained_model,
params['out_dim'],
ground_truth,
all_eids_dict[('user', 'buys', 'item')],
fixed_params.k,
True, # Remove already bought
cuda,
device,
fixed_params.pred,
params['use_popularity'],
params['weight_popularity'],
)
sentence = ("TEST Precision "
"{:.3f}% | Recall {:.3f}% | Coverage {:.2f}%"
.format(precision * 100,
recall * 100,
coverage * 100))
print(sentence)
save_txt(sentence, data_paths.result_filepath, mode='a')
return recall
| 5,342,124
|
def main():
"""Gitlab."""
| 5,342,125
|
def initialize_mean_variance(args):
"""Initialize the current mean and variance values semi-intelligently.
Inspired by the kmeans++ algorithm: iteratively choose new centers from the data
by weighted sampling, favoring points that are distant from those already chosen
"""
X = args.X.reshape(args.X.shape[0] * args.X.shape[1], args.X.shape[2])
# kmeans++ inspired choice
centers = [random.choice(X)]
min_dists = scipy.array([distance(centers[-1], x) for x in X])
for l in range(1, args.K):
weights = min_dists * min_dists
new_center = weighted_sample(zip(weights, X), 1).next()
centers.append(new_center)
min_dists = scipy.fmin(min_dists, scipy.array([distance(centers[-1], x) for x in X]))
means = scipy.array(centers)
# for the variance, get the variance of the data in this cluster
variances = []
for c in centers:
idxs = tuple(i for i, (x, m) in enumerate(zip(X, min_dists)) if distance(c, x) == m)
v = scipy.var(X[idxs, :], axis=0)
variances.append(v)
variances = scipy.array(variances) + args.pseudocount
#import pdb; pdb.set_trace()
#for k in range(args.K):
# print sp.sqrt(variances[k,:])
variances[variances < .1] = .1
return means, variances
| 5,342,126
|
def verify_file_checksum(path, expected_checksum):
"""Verifies the sha256 checksum of a file."""
actual_checksum = calculate_file_checksum(path)
return actual_checksum == expected_checksum
| 5,342,127
|
def float_to_str(f, p=20):
""" 将给定的float转换为字符串,而无需借助科学计数法。
@param f 浮点数参数
@param p 精读
"""
if type(f) == str:
f = float(f)
ctx = decimal.Context(p)
d1 = ctx.create_decimal(repr(f))
return format(d1, 'f')
| 5,342,128
|
def get_classes(parsed) -> List[ClassDef]:
"""Returns classes identified in parsed Python code."""
return [
element
for element in parsed.body
if isinstance(element, ClassDef)
]
| 5,342,129
|
def get_steps(r):
"""Clone OSA."""
nextsteps = []
nextsteps.append(
steps.SimpleCommandStep(
'git-clone-osa',
('git clone %s/openstack/openstack-ansible '
'/opt/openstack-ansible'
% r.complete['git-mirror-openstack']),
**r.kwargs
)
)
nextsteps.append(
steps.KwargsStep(
'kwargs-osa',
r,
{
'cwd': '/opt/openstack-ansible',
'env': {
'ANSIBLE_ROLE_FETCH_MODE': 'git-clone',
'ANSIBLE_DEBUG': _ansible_debug(r),
'ANSIBLE_KEEP_REMOTE_FILES': '1'
}
},
**r.kwargs
)
)
if utils.is_ironic(r):
nextsteps.append(
steps.KwargsStep(
'kwargs-ironic',
r,
{
'env': {
'BOOTSTRAP_OPTS': 'nova_virt_type=ironic'
}
},
**r.kwargs
)
)
if r.complete['enable-ceph'] == 'yes':
if r.complete['osa-branch'] in ['stable/mitaka',
'stable/newton']:
# This isn't implemented for these releases
pass
else:
nextsteps.append(
steps.KwargsStep(
'kwargs-ceph',
r,
{
'env': {
'SCENARIO': 'ceph'
}
},
**r.kwargs
)
)
return nextsteps
| 5,342,130
|
def check_java(interface):
"""
Checks for the presence of a minimally useful java on the user's system.
"""
interface.info("""\
I'm compiling a short test program, to see if you have a working JDK on your
system.
""")
SOURCE = """\
class test {
public static void main(String args[]) {
}
}
"""
f = file("test.java", "w")
f.write(SOURCE)
f.close()
if not run(plat.javac, "test.java"):
interface.info("""\
I was unable to use javac to compile a test file. If you haven't installed
the JDK yet, please download it from:
http://www.oracle.com/technetwork/java/javase/downloads/index.html
The JDK is different from the JRE, so it's possible you have Java
without having the JDK.""")
interface.fail("""\
Without a working JDK, I can't continue.
""")
interface.success("The JDK is present and working. Good!")
os.unlink("test.java")
os.unlink("test.class")
| 5,342,131
|
def test_list_nmtoken_max_length_1_nistxml_sv_iv_list_nmtoken_max_length_2_2(mode, save_output, output_format):
"""
Type list/NMTOKEN is restricted by facet maxLength with value 6.
"""
assert_bindings(
schema="nistData/list/NMTOKEN/Schema+Instance/NISTSchema-SV-IV-list-NMTOKEN-maxLength-2.xsd",
instance="nistData/list/NMTOKEN/Schema+Instance/NISTXML-SV-IV-list-NMTOKEN-maxLength-2-2.xml",
class_name="NistschemaSvIvListNmtokenMaxLength2",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
| 5,342,132
|
def create_index(
corpus_f: str,
model_name_or_path: str,
output_f: str,
mode: str = "sent2vec",
batch_size: int = 64,
use_cuda: bool = False,
):
"""Given a corpus file `corpus_f` and a sent2vec model `sent2vec_f`, convert the sentences in
the corpus (line-by-line) to vector representations, normalise them (L2norm), and add them
to a Flat FAISS index. Finally, save the index to `output_f`.
:param corpus_f: path to the corpus file, with one sentence per line
:param model_name_or_path: path to the binary sent2vec model (when mode=="sent2vec") or model name of the stransformer to use
:param output_f: path to save the FAISS index to
:param mode: whether to use "sent2vec" or "stransformers" (sentence-transformers)
:param batch_size: batch_size to use to create sent2vec embeddings or sentence-transformers embeddings
:param use_cuda: whether to use GPU when using sentence-transformers
:return: the created FAISS index
"""
if not FAISS_AVAILABLE:
raise ImportError(
"Faiss not installed. Please install the right version before continuing. If you have a "
"CUDA-enabled device and want to use GPU acceleration, you can `pip install faiss-gpu`."
" Otherwise, install faiss-cpu. For more, see https://github.com/facebookresearch/faiss"
)
if mode == "sent2vec":
if not SENT2VEC_AVAILABLE:
raise ImportError(
"Requested 'sent2vec', but module not installed. Install the right version from"
" https://github.com/epfml/sent2vec"
)
try:
model = sent2vec.Sent2vecModel()
except AttributeError as exc:
raise AttributeError(
"'sent2vec' does not have attribute Sent2vecModel. You may have uninstalled an"
" incorrect version of sent2vec. The correct version can be found here:"
" https://github.com/epfml/sent2vec"
) from exc
logger.info(f"Loading sent2vec model of {model_name_or_path}")
model.load_model(model_name_or_path, inference_mode=True)
hidden_size = model.get_emb_size()
elif mode == "stransformers":
if not STRANSFORMERS_AVAILABLE:
raise ImportError(
"Requested 'stransformers', but module not installed. Please install the library"
" before continuing. https://github.com/UKPLab/sentence-transformers#installation"
)
logger.info(f"Loading SentenceTransformer model {model_name_or_path}")
model = SentenceTransformer(model_name_or_path, device="cuda" if use_cuda else "cpu")
hidden_size = model.encode(["This is a test ."]).shape[1]
else:
raise ValueError("'mode' must be 'sent2vec' or 'stransformers'")
logger.info(f"Creating empty index with hidden_size {hidden_size:,}...")
# We want to do cosine similarity search, so we use inner product as suggested here:
# https://github.com/facebookresearch/faiss/wiki/MetricType-and-distances#how-can-i-index-vectors-for-cosine-similarity
index = faiss.index_factory(hidden_size, "Flat", faiss.METRIC_INNER_PRODUCT)
vecs = []
n_lines = get_n_lines(corpus_f)
logger.info("Converting corpus into vectors. This can take a while...")
batch = []
with open(corpus_f, encoding="utf-8") as fhin:
for line_idx, line in tqdm(enumerate(fhin, 1), total=n_lines, unit="line"):
line = line.rstrip()
if line:
batch.append(line)
if len(batch) == batch_size or line_idx == n_lines:
if mode == "sent2vec":
# Normalize vectors for cosine distance as suggested here:
# https://github.com/facebookresearch/faiss/wiki/MetricType-and-distances#how-can-i-index-vectors-for-cosine-similarity
vecs.extend(model.embed_sentences(batch))
else:
vecs.extend(model.encode(batch, batch_size=batch_size, show_progress_bar=False))
batch = []
logger.info(f"Number of entries: {len(vecs)}")
logger.info("Normalizing vectors...")
sent_vecs = np.array(vecs)
# normalize_L2 works in-place so do not assign
faiss.normalize_L2(sent_vecs)
logger.info("Adding vectors to index...")
index.add(sent_vecs)
logger.info(f"Saving index to {output_f}...")
faiss.write_index(index, output_f)
return index
| 5,342,133
|
def _convert_and_call(function, *args, **kwargs):
"""
Use annotation to convert args and kwargs to the correct type before calling function
If __annotations__ is not present (py2k) or empty, do not perform any conversion.
This tries to perform the conversion by calling the type (works for int,str).
If calling the type results in an error, no conversion is performed.
"""
args = list(args)
if PY3K:
argspec = inspect.getfullargspec(function)
annot = argspec.annotations
log.debug("Function's annotations are: %s", annot)
for i, arg in enumerate(argspec.args):
i=i-1 # cls/ self does not count
if arg in annot:
log.debug("For arg %s: i=%s, args=%s", arg, i, args)
if i<len(args):
args[i]=_try_convert(args[i], annot[arg])
elif arg in kwargs:
kwargs[arg]=_try_convert(kwargs[arg], annot[arg])
else:
log.debug("No annotation present for %s", arg)
log.debug("Calling %s with args=%s, kwargs=%s", function.__name__, args, kwargs)
return function(*args, **kwargs)
| 5,342,134
|
def import_module(name, path):
"""
correct way of importing a module dynamically in python 3.
:param name: name given to module instance.
:param path: path to module.
:return: module: returned module instance.
"""
spec = importlib.util.spec_from_file_location(name, path)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
return module
| 5,342,135
|
def response_ssml_text_and_prompt(output, endsession, reprompt_text):
""" create a Ssml response with prompt """
return {
'outputSpeech': {
'type': 'SSML',
'ssml': "<speak>" + output + "</speak>"
},
'reprompt': {
'outputSpeech': {
'type': 'SSML',
'ssml': "<speak>" + reprompt_text + "</speak>"
}
},
'shouldEndSession': endsession
}
| 5,342,136
|
def test_queryset_serialization():
"""Verify that querysets may be serialized."""
articles = Article.objects.all()
request = factory.get('/')
assert Serializer(articles).serialize(request) == [
{
'id': 1,
'title': 'Title',
'content': 'Content',
'is_published': False,
'created_at': '1970-01-01T00:00:00',
'author': {
'id': 1,
'name': 'John Doe'
},
'tags': [{
'id': 1,
'name': 'sports'
}]
},
{
'id': 2,
'title': 'Another title',
'content': 'Another content',
'is_published': False,
'created_at': '1970-01-01T00:00:00',
'author': {
'id': 1,
'name': 'John Doe'
},
'tags': [{
'id': 1,
'name': 'sports'
}]
}
]
assert JSONSerializer(articles).serialize(request)
assert JSONPSerializer(articles).serialize(request)
assert XMLSerializer(articles).serialize(request)
| 5,342,137
|
def getAreaQuantityQuantUnit(words):
"""
from training data:
count perc cum_sum cum_perc
kind_c
hectare 7 58.333333 7 58.333333
acre 2 16.666667 9 75.000000
meter 1 8.333333 10 83.333333
square-foot 1 8.333333 11 91.666667
square-meter 1 8.333333 12 100.000000
"""
allWords = ' '.join(words)
unitKind = 'hectare'
quant = None
units = ['hectare', 'acre', 'euro', 'meter', 'square-foot', 'square-meter' ]
for u in units:
if u in allWords : unitKind=u; break
if 'square foot' in allWords : unitKind='square-foot'
if 'square feet' in allWords : unitKind='square-foot'
if 'square meter' in allWords : unitKind='square-meter'
m = re.search(r'([0-9,\.]+)', allWords.lower())
if m:
quant = m.group(1)
quant = quant.replace(",", "")
quant = float(quant)
if quant=='.':
quant=None
if not quant:
q = text2int(allWords)
if q:
quant = q
else:
m = text2int(allWords)
if m:
quant *= m
if not quant:
quant = 1
quant = ('%f' % quant).rstrip('0').rstrip('.')
return quant, unitKind
#subGraph['attrDict_p'] = attrDict_p
| 5,342,138
|
def add_user(
username: str,
password: Optional[str] = None,
shell: str = "/bin/bash",
system_user: bool = False,
primary_group: str = None,
secondary_groups: List[str] = None,
uid: int = None,
home_dir: str = None,
) -> str:
"""Add a user to the system.
Will log but otherwise succeed if the user already exists.
Arguments:
username: Username to create
password: Password for user; if ``None``, create a system user
shell: The default shell for the user
system_user: Whether to create a login or system user
primary_group: Primary group for user; defaults to username
secondary_groups: Optional list of additional groups
uid: UID for user being created
home_dir: Home directory for user
Returns:
The password database entry struct, as returned by `pwd.getpwnam`
"""
try:
if uid:
user_info = pwd.getpwuid(int(uid))
logger.info("user '%d' already exists", uid)
return user_info
user_info = pwd.getpwnam(username)
logger.info("user with uid '%s' already exists", username)
return user_info
except KeyError:
logger.info("creating user '%s'", username)
cmd = ["useradd", "--shell", shell]
if uid:
cmd.extend(["--uid", str(uid)])
if home_dir:
cmd.extend(["--home", str(home_dir)])
if password:
cmd.extend(["--password", password, "--create-home"])
if system_user or password is None:
cmd.append("--system")
if not primary_group:
try:
grp.getgrnam(username)
primary_group = username # avoid "group exists" error
except KeyError:
pass
if primary_group:
cmd.extend(["-g", primary_group])
if secondary_groups:
cmd.extend(["-G", ",".join(secondary_groups)])
cmd.append(username)
check_output(cmd, stderr=STDOUT)
user_info = pwd.getpwnam(username)
return user_info
| 5,342,139
|
def xyz_insert(weeded_list):
"""Inserts geometries into both orca and gaussian input files"""
extension = ".inp" if preferences.comp_software == "orca" else preferences.gauss_ext
sub_folder = os.path.join(os.getcwd(),"inserted_input_files")
if os.path.exists(sub_folder):
print("'inserted_input_files' directory already exists in current directory!")
print("Please remove it and try again!")
return
os.mkdir(sub_folder)
for i in weeded_list:
try:
xyz = XyzFile(read_item(i))
if preferences.comp_software == "orca": comp_input = InpFile(read_item(i.replace(".xyz",extension)))
else: comp_input = GjfFile(read_item(i.replace(".xyz",extension)))
comp_input = comp_input.replace_cord(xyz)
with open(os.path.join(sub_folder,i.replace(".xyz",extension)),"w") as file:
file.write(comp_input.return_print)
except FileNotFoundError: print("file " + i.replace(".xyz",extension) + " could not be found!")
print("\nJob done!\nPlease lookup the inserted_input_files directory\n")
return
| 5,342,140
|
def main():
"""Do nothing."""
pass
| 5,342,141
|
def tokenize_and_align(tokenizer, words, cased=False):
"""Splits up words into subword-level tokens."""
words = ["[CLS]"] + list(words) + ["[SEP]"]
basic_tokenizer = tokenizer.basic_tokenizer
tokenized_words = []
for word in words:
word = tokenization.convert_to_unicode(word)
word = basic_tokenizer._clean_text(word)
if word == "[CLS]" or word == "[SEP]":
word_toks = [word]
else:
if not cased:
word = word.lower()
word = basic_tokenizer._run_strip_accents(word)
word_toks = basic_tokenizer._run_split_on_punc(word)
tokenized_word = []
for word_tok in word_toks:
tokenized_word += tokenizer.wordpiece_tokenizer.tokenize(word_tok)
tokenized_words.append(tokenized_word)
assert len(tokenized_words) == len(words)
return tokenized_words
| 5,342,142
|
def _vagrant_format_results(line):
"""Extract fields from vm status line.
:param line: Status line for a running vm
:type line: str
:return: (<vm directory path>, <vm status>)
:rtype: tuple of strings
"""
line_split = line.split()
return (line_split[-1], line_split[-2],)
| 5,342,143
|
def rect_to_xys(rect, image_shape):
"""Convert rect to xys, i.e., eight points
The `image_shape` is used to to make sure all points return are valid, i.e., within image area
"""
h, w = image_shape[0:2]
def get_valid_x(x):
if x < 0:
return 0
if x >= w:
return w - 1
return x
def get_valid_y(y):
if y < 0:
return 0
if y >= h:
return h - 1
return y
rect = ((rect[0], rect[1]), (rect[2], rect[3]), rect[4])
points = cv2.cv.BoxPoints(rect)
points = np.int0(points)
for i_xy, (x, y) in enumerate(points):
x = get_valid_x(x)
y = get_valid_y(y)
points[i_xy, :] = [x, y]
points = np.reshape(points, -1)
return points
| 5,342,144
|
def determine_aws_service_name(
request: Request, services: ServiceCatalog = get_service_catalog()
) -> Optional[str]:
"""
Tries to determine the name of the AWS service an incoming request is targeting.
:param request: to determine the target service name of
:param services: service catalog (can be handed in for caching purposes)
:return: service name string (or None if the targeting service could not be determined exactly)
"""
signing_name, target_prefix, operation, host, path = _extract_service_indicators(request)
candidates = set()
# 1. check the signing names
if signing_name:
signing_name_candidates = services.by_signing_name(signing_name)
if len(signing_name_candidates) == 1:
# a unique signing-name -> service name mapping is the case for ~75% of service operations
return signing_name_candidates[0]
# try to find a match with the custom signing name rules
custom_match = custom_signing_name_rules(signing_name, path)
if custom_match:
return custom_match
# still ambiguous - add the services to the list of candidates
candidates.update(signing_name_candidates)
# 2. check the target prefix
if target_prefix and operation:
target_candidates = services.by_target_prefix(target_prefix)
if len(target_candidates) == 1:
# a unique target prefix
return target_candidates[0]
# still ambiguous - add the services to the list of candidates
candidates.update(target_candidates)
# exclude services where the operation is not contained in the service spec
for service_name in list(candidates):
service = services.get(service_name)
if operation not in service.operation_names:
candidates.remove(service_name)
else:
# exclude services which have a target prefix (the current request does not have one)
for service_name in list(candidates):
service = services.get(service_name)
if service.metadata.get("targetPrefix") is not None:
candidates.remove(service_name)
if len(candidates) == 1:
return candidates.pop()
# 3. check the path
if path:
# iterate over the service spec's endpoint prefix
for prefix, services_per_prefix in services.endpoint_prefix_index.items():
if path.startswith(prefix):
if len(services_per_prefix) == 1:
return services_per_prefix[0]
candidates.update(services_per_prefix)
# try to find a match with the custom path rules
custom_path_match = custom_path_addressing_rules(path)
if custom_path_match:
return custom_path_match
# 4. check the host (custom host addressing rules)
if host:
custom_host_match = custom_host_addressing_rules(host)
if custom_host_match:
return custom_host_match
# 5. check the query / form-data
values = request.values
if "Action" in values and "Version" in values:
# query / ec2 protocol requests always have an action and a version (the action is more significant)
query_candidates = services.by_operation(values["Action"])
if len(query_candidates) == 1:
return query_candidates[0]
for service in list(query_candidates):
service_model = services.get(service)
if values["Version"] != service_model.api_version:
# the combination of Version and Action is not unique, add matches to the candidates
query_candidates.remove(service)
if len(query_candidates) == 1:
return query_candidates[0]
candidates.update(query_candidates)
# 6. check the legacy rules in the end
legacy_match = legacy_rules(request)
if legacy_match:
return legacy_match
LOG.warning("could not uniquely determine service from request, candidates=%s", candidates)
if signing_name:
return signing_name
if candidates:
return candidates.pop()
return None
| 5,342,145
|
def comorder(node):
"""层序遍历, 广度优先搜索bfs
通过队列来实现
"""
q = Queue()
q.put(node)
if node.data != None:
# 循环取出node
while q.empty() != True:
node = q.get(0)
print(node.data, end='')
if node.node_left:
q.put(node.node_left)
if node.node_right:
q.put(node.node_right)
| 5,342,146
|
def file_senzing_info():
"""#!/usr/bin/env bash
# --- Main --------------------------------------------------------------------
SCRIPT_DIR="$( cd "$( dirname "${{BASH_SOURCE[0]}}" )" >/dev/null 2>&1 && pwd )"
PROJECT_DIR="$(dirname ${{SCRIPT_DIR}})"
source ${{SCRIPT_DIR}}/docker-environment-vars.sh
RED='\033[0;31m'
GREEN='\033[0;32m'
NC='\033[0m' # No Color
COLUMN_WIDTH_1=${{#SENZING_PROJECT_NAME}}
COLUMN_WIDTH=$((${{COLUMN_WIDTH_1}}+16))
DOCKER_CONTAINERS=(
"${{SENZING_DOCKER_CONTAINER_NAME_SENZING_API_SERVER}};${{SENZING_DOCKER_PORT_SENZING_API_SERVER}};senzing/senzing-api-server:${{SENZING_DOCKER_IMAGE_VERSION_SENZING_API_SERVER}}"
"${{SENZING_DOCKER_CONTAINER_NAME_SENZING_DEBUG}};----;senzing/senzing-debug:${{SENZING_DOCKER_IMAGE_VERSION_SENZING_DEBUG}}"
"${{SENZING_DOCKER_CONTAINER_NAME_JUPYTER}};${{SENZING_DOCKER_PORT_JUPYTER}};senzing/jupyter:${{SENZING_DOCKER_IMAGE_VERSION_JUPYTER}}"
"${{SENZING_DOCKER_CONTAINER_NAME_PHPPGADMIN}};${{SENZING_DOCKER_PORT_PHPPGADMIN_HTTP}};senzing/phppgadmin:${{SENZING_DOCKER_IMAGE_VERSION_PHPPGADMIN}}"
"${{SENZING_DOCKER_CONTAINER_NAME_PORTAINER}};${{SENZING_DOCKER_PORT_PORTAINER}};portainer/portainer:${{SENZING_DOCKER_IMAGE_VERSION_PORTAINER}}"
"${{SENZING_DOCKER_CONTAINER_NAME_POSTGRES}};${{SENZING_DOCKER_PORT_POSTGRES}};postgres:${{SENZING_DOCKER_IMAGE_VERSION_POSTGRES}}"
"${{SENZING_DOCKER_CONTAINER_NAME_QUICKSTART}};${{SENZING_DOCKER_PORT_ENTITY_SEARCH_WEB_APP}};senzing/web-app-demo:${{SENZING_DOCKER_IMAGE_VERSION_WEB_APP_DEMO}}"
"${{SENZING_DOCKER_CONTAINER_NAME_SQLITE_WEB}};${{SENZING_DOCKER_PORT_SENZING_SQLITE_WEB}};coleifer/sqlite-web:${{SENZING_DOCKER_IMAGE_VERSION_SQLITE_WEB}}"
"${{SENZING_DOCKER_CONTAINER_NAME_SSHD}};${{SENZING_DOCKER_PORT_SSHD}};senzing/sshd:${{SENZING_DOCKER_IMAGE_VERSION_SSHD}}"
"${{SENZING_DOCKER_CONTAINER_NAME_STREAM_LOADER}};----;senzing/stream-loader:${{SENZING_DOCKER_IMAGE_VERSION_STREAM_LOADER}}"
"${{SENZING_DOCKER_CONTAINER_NAME_STREAM_PRODUCER}};----;senzing/stream-producer:${{SENZING_DOCKER_IMAGE_VERSION_STREAM_PRODUCER}}"
"${{SENZING_DOCKER_CONTAINER_NAME_SWAGGERAPI_SWAGGER_UI}};${{SENZING_DOCKER_PORT_SENZING_SWAGGERAPI_SWAGGER_UI}};swaggerapi/swagger-ui:${{SENZING_DOCKER_IMAGE_VERSION_SWAGGERAPI_SWAGGER_UI}}"
"${{SENZING_DOCKER_CONTAINER_NAME_ENTITY_SEARCH_WEB_APP}};${{SENZING_DOCKER_PORT_ENTITY_SEARCH_WEB_APP}};senzing/entity-search-web-app:${{SENZING_DOCKER_IMAGE_VERSION_ENTITY_SEARCH_WEB_APP}}"
"${{SENZING_DOCKER_CONTAINER_NAME_WEB_APP_DEMO}};${{SENZING_DOCKER_PORT_ENTITY_SEARCH_WEB_APP}};senzing/web-app-demo:${{SENZING_DOCKER_IMAGE_VERSION_WEB_APP_DEMO}}"
"${{SENZING_DOCKER_CONTAINER_NAME_XTERM}};${{SENZING_DOCKER_PORT_XTERM}};senzing/xterm:${{SENZING_DOCKER_IMAGE_VERSION_XTERM}}"
"${{SENZING_DOCKER_CONTAINER_NAME_RABBITMQ}};${{SENZING_DOCKER_PORT_RABBITMQ_UI}};bitnami/rabbitmq:${{SENZING_DOCKER_IMAGE_VERSION_RABBITMQ}}"
)
echo "${{SENZING_HORIZONTAL_RULE}}"
echo "${{SENZING_HORIZONTAL_RULE:0:2}} senzing-info.sh {environment_version} ({environment_updated})"
if [[ ( -n "$(command -v jq)" ) ]]; then
G2_BUILD_VERSION_FILE=${{PROJECT_DIR}}/g2BuildVersion.json
if [ -f "${{PROJECT_DIR}}/g2/g2BuildVersion.json" ]; then
G2_BUILD_VERSION_FILE=${{PROJECT_DIR}}/g2/g2BuildVersion.json
fi
SENZING_VERSION_API=$(jq --raw-output ".VERSION" ${{G2_BUILD_VERSION_FILE}})
SENZING_VERSION_DATA=$(jq --raw-output ".DATA_VERSION" ${{G2_BUILD_VERSION_FILE}})
echo "${{SENZING_HORIZONTAL_RULE:0:2}} senzing api: ${{SENZING_VERSION_API}} data: ${{SENZING_VERSION_DATA}}"
fi
echo "${{SENZING_HORIZONTAL_RULE:0:2}}"
for DOCKER_CONTAINER in ${{DOCKER_CONTAINERS[@]}};
do
IFS=";" read -r -a CONTAINER_DATA <<< "${{DOCKER_CONTAINER}}"
CONTAINER_NAME="${{CONTAINER_DATA[0]}} "
CONTAINER_PORT="${{CONTAINER_DATA[1]}}"
CONTAINER_VERSION="${{CONTAINER_DATA[2]}}"
if [ "$( docker container inspect -f '{{{{.State.Status}}}}' ${{CONTAINER_NAME}} 2>/dev/null )" == "running" ]; then
printf "${{SENZING_HORIZONTAL_RULE:0:2}} %-${{COLUMN_WIDTH}}s ${{GREEN}}up${{NC}} http://${{SENZING_DOCKER_HOST_IP_ADDR}}:${{CONTAINER_PORT}} ${{CONTAINER_VERSION}}\\n" ${{CONTAINER_NAME}}
else
printf "${{SENZING_HORIZONTAL_RULE:0:2}} %-${{COLUMN_WIDTH}}s ${{RED}}down${{NC}} http://${{SENZING_DOCKER_HOST_IP_ADDR}}:${{CONTAINER_PORT}} ${{CONTAINER_VERSION}}\\n" ${{CONTAINER_NAME}}
fi
done
echo "${{SENZING_HORIZONTAL_RULE:0:2}}"
echo "${{SENZING_HORIZONTAL_RULE:0:2}} For more information:"
echo "${{SENZING_HORIZONTAL_RULE:0:2}} ${{SENZING_REFERENCE_URL}}#senzing-info"
echo "${{SENZING_HORIZONTAL_RULE}}"
"""
return 0
| 5,342,147
|
def remove_barrier(tape):
"""Quantum function transform to remove Barrier gates.
Args:
qfunc (function): A quantum function.
Returns:
function: the transformed quantum function
**Example**
Consider the following quantum function:
.. code-block:: python
def qfunc(x, y):
qml.Hadamard(wires=0)
qml.Hadamard(wires=1)
qml.Barrier(wires=[0,1])
qml.PauliX(wires=0)
return qml.expval(qml.PauliZ(0))
The circuit before optimization:
>>> dev = qml.device('default.qubit', wires=2)
>>> qnode = qml.QNode(qfunc, dev)
>>> print(qml.draw(qnode)(1, 2))
0: ──H──╭||──X──┤ ⟨Z⟩
1: ──H──╰||─────┤
We can remove the Barrier by running the ``remove_barrier`` transform:
>>> optimized_qfunc = remove_barrier(qfunc)
>>> optimized_qnode = qml.QNode(optimized_qfunc, dev)
>>> print(qml.draw(optimized_qnode)(1, 2))
0: ──H──X──┤ ⟨Z⟩
1: ──H─────┤
"""
# Make a working copy of the list to traverse
list_copy = tape.operations.copy()
while len(list_copy) > 0:
current_gate = list_copy[0]
# Remove Barrier gate
if current_gate.name != "Barrier":
apply(current_gate)
list_copy.pop(0)
continue
# Queue the measurements normally
for m in tape.measurements:
apply(m)
| 5,342,148
|
async def async_get_config_entry_diagnostics(
hass: HomeAssistant, entry: ConfigEntry
) -> dict[str, Any]:
"""Return diagnostics for a config entry."""
coordinator: DataUpdateCoordinator[Domain] = hass.data[DOMAIN][entry.entry_id]
return {
"creation_date": coordinator.data.creation_date,
"expiration_date": coordinator.data.expiration_date,
"last_updated": coordinator.data.last_updated,
"status": coordinator.data.status,
"statuses": coordinator.data.statuses,
"dnssec": coordinator.data.dnssec,
}
| 5,342,149
|
def binary_to_single(param_dict: Dict[str, float], star_index: int) -> Dict[str, float]:
"""
Function for converting a dictionary with atmospheric parameters
of a binary system to a dictionary of parameters for one of the
two stars.
Parameters
----------
param_dict : dict
Dictionary with the atmospheric parameters of both stars. The
keywords end either with ``_0`` or ``_1`` that correspond with
``star_index=0`` or ``star_index=1``.
star_index : int
Star index (0 or 1) that is used for the parameters in
``param_dict``.
Returns
-------
dict
Dictionary with the parameters of the selected star.
"""
new_dict = {}
for key, value in param_dict.items():
if star_index == 0 and key[-1] == "0":
new_dict[key[:-2]] = value
elif star_index == 1 and key[-1] == "1":
new_dict[key[:-2]] = value
elif key in ["teff", "logg", "feh", "c_o_ratio", "fsed", "radius", "distance"]:
new_dict[key] = value
return new_dict
| 5,342,150
|
def welcome():
"""List all available api routes."""
return (
"""Available Routes:
/api/v1.0/precipitation
Convert the query results to a dictionary using date as the key and prcp as the value.
Return the JSON representation of your dictionary.
/api/v1.0/stations
Return a JSON list of stations from the dataset.
/api/v1.0/tobs
Return a JSON list of temperature observations (TOBS) for the previous year.
/api/v1.0/start_date
/api/v1.0/start_date/end_date
Return a JSON list of the minimum temperature, the average temperature, and the max temperature for a given start or start-end range.
When given the start only, calculate TMIN, TAVG, and TMAX for all dates greater than and equal to the start date.
When given the start and the end date, calculate the TMIN, TAVG, and TMAX for dates between the start and end date inclusive.
"""
)
| 5,342,151
|
def armenian_input_latin(field, text):
"""
Prepare a string from one of the query fields for subsequent
processing: replace latin characters with Armenian equivalents.
"""
if field not in ('wf', 'lex', 'lex2', 'trans_ru', 'trans_ru2'):
return text
textTrans = ''
for c in re.findall('.[\'_]+|.', text):
try:
c = dictLat2Arm[c]
except KeyError:
try:
c = dictLat2Arm[c.lower()].upper()
except KeyError:
pass
textTrans += c
return textTrans
| 5,342,152
|
def create_audit_directory():
"""
Creates directory for analyze_iam_policy audit files and places audit files there.
"""
audit_directory_path = HOME + CONFIG_DIRECTORY + AUDIT_DIRECTORY_FOLDER
create_directory_if_it_doesnt_exist(audit_directory_path)
destination = audit_directory_path
existing_audit_files_directory = os.path.abspath(os.path.dirname(__file__)) + '/data/audit/'
source = existing_audit_files_directory
file_list = list_files_in_directory(existing_audit_files_directory)
for file in file_list:
if file.endswith(".txt"):
shutil.copy(source + '/' + file, destination)
print("copying " + file + " to " + destination)
| 5,342,153
|
def df_style_cell(*styles: Union[
Tuple[Callable[['cell'], bool], 'style'],
Tuple['cell', 'style'],
Callable[['cell'], Optional['style']],
]) -> Callable[['cell'], 'style']:
"""
Shorthand for df.style.applymap(...). Example usage:
df.style.applymap(df_style_cell(
(lambda x: 0 < x < 1, 'color: red'),
(0, 'color: green'),
lambda x: 'background: %s' % to_rgb_hex(x),
))
"""
def f(x):
y = None
for style in styles:
if isinstance(style, tuple) and isinstance(style[0], types.FunctionType) and style[0](x):
y = style[1]
elif isinstance(style, tuple) and x == style[0]:
y = style[1]
elif isinstance(style, types.FunctionType):
y = style(x)
if y:
break
return y or ''
return f
| 5,342,154
|
def _interactive_pick(
plotter,
model,
picking_list: Optional[list] = None,
key: str = "groups",
label_size: int = 12,
checkbox_size: int = 27,
checkbox_color: Union[str, tuple, list] = "blue",
checkbox_position: tuple = (5.0, 5.0),
):
"""Add a checkbox button widget to the scene."""
def toggle_vis(flag):
actor.SetVisibility(flag)
if picking_list is not None:
if flag is True:
picking_list.append(model)
elif flag is False and model in picking_list:
picking_list.remove(model)
# Make a separate callback for each widget
actor = plotter.add_mesh(
model,
scalars=f"{key}_rgba",
rgba=True,
render_points_as_spheres=True,
point_size=10,
)
plotter.add_checkbox_button_widget(
toggle_vis,
value=True,
position=checkbox_position,
size=checkbox_size,
border_size=3,
color_on=checkbox_color,
color_off="white",
background_color=checkbox_color,
)
plotter.add_text(
f"\n {model[key][0]}",
position=checkbox_position,
font_size=label_size,
color="black",
font="arial",
)
| 5,342,155
|
def bbox_transform(boxes, deltas, weights=(1.0, 1.0, 1.0, 1.0)):
"""Forward transform that maps proposal boxes to predicted ground-truth
boxes using bounding-box regression deltas. See bbox_transform_inv for a
description of the weights argument.
"""
if boxes.shape[0] == 0:
return np.zeros((0, deltas.shape[1]), dtype=deltas.dtype)
boxes = boxes.astype(deltas.dtype, copy=False)
widths = boxes[:, 2] - boxes[:, 0] + 1.0
heights = boxes[:, 3] - boxes[:, 1] + 1.0
ctr_x = boxes[:, 0] + 0.5 * widths
ctr_y = boxes[:, 1] + 0.5 * heights
wx, wy, ww, wh = weights
dx = deltas[:, 0::4] / wx
dy = deltas[:, 1::4] / wy
dw = deltas[:, 2::4] / ww
dh = deltas[:, 3::4] / wh
# Prevent sending too large values into np.exp()
dw = np.minimum(dw, np.log(1000. / 16.))
dh = np.minimum(dh, np.log(1000. / 16.))
pred_ctr_x = dx * widths[:, np.newaxis] + ctr_x[:, np.newaxis]
pred_ctr_y = dy * heights[:, np.newaxis] + ctr_y[:, np.newaxis]
pred_w = np.exp(dw) * widths[:, np.newaxis]
pred_h = np.exp(dh) * heights[:, np.newaxis]
pred_boxes = np.zeros(deltas.shape, dtype=deltas.dtype)
# x1
pred_boxes[:, 0::4] = pred_ctr_x - 0.5 * pred_w
# y1
pred_boxes[:, 1::4] = pred_ctr_y - 0.5 * pred_h
# x2 (note: "- 1" is correct; don't be fooled by the asymmetry)
pred_boxes[:, 2::4] = pred_ctr_x + 0.5 * pred_w - 1
# y2 (note: "- 1" is correct; don't be fooled by the asymmetry)
pred_boxes[:, 3::4] = pred_ctr_y + 0.5 * pred_h - 1
return pred_boxes
| 5,342,156
|
def get_item_tds(item_id):
"""
Method conntect to ILS to retrieve item information and generates
an html table cells with the information.
:param item_id: Item id
:rtype: HTML string
"""
item_bot = ItemBot(opac_url=ils_settings.OPAC_URL,item_id=item_id)
output_html = "<td>{0}</td><td>{1}</td><td>{2}</td>".format(item_bot.status(),
item_bot.location(),
item_bot.callnumber())
return mark_safe(output_html)
| 5,342,157
|
def get_data():
""" Reads the data files.
"""
pass
| 5,342,158
|
def query_ports(session, switch_resource_id, **kwargs):
"""
Retrieve multiple :class:`Port` objects from database.
switch_resource_id is optional
# TODO: Implement and document query_ports() correctly
"""
# TODO: Add csv output option to query_ports()
# Check all arguments before querying
Port.check_params(**kwargs)
# Query
pts = session.query(Port)
# Filter by switch if given
if switch_resource_id is not None:
sw = query_switch(session, switch_resource_id)
if sw is None:
raise ValueError('Given switch does not exist')
pts = pts.filter_by(_switch_id = sw._switch_id)
# Filter
for key, val in kwargs.items():
if key == 'vlans':
for vlan in val:
pts = pts.filter(Port._vlans.contains(vlan))
else:
raise NotImplementedError('query_ports() is not yet implemented')
return pts.all()
| 5,342,159
|
def run(prefix):
"""
Exercise TensorMol calculator works together with Q|R.
"""
tm = TensormolCalculator()
energy, gradient = tm.run_qr(tm.atoms.mols[0])
assert approx_equal(energy, -7.01911003671)
assert approx_equal(gradient, [[54.72373444954559, 43.50211011650701, 55.0681687461352],
[-110.79126231492444, 25.98146937078293, -11.155392610622426],
[5.433491436770188, 6.52893640005011, 28.622889693640612],
[-71.50338529045021, -3.6578466193136236, -14.458647838597882],
[14.741859019844105, 13.126236850151095, 14.380723899173134],
[-4.593095522329837, -6.852591886564875, -16.283703321909563],
[70.52208042638348, -59.5259141945174, -20.292972622828866],
[-56.19796768577346, 9.263974807632003, -51.28296067705901],
[2.6644043343068895, -16.47976823726328, 14.091653049606416],
[8.678765929867977, -11.392644499808615, -9.866803503526413],
[-79.61664600629528, 37.13009889787888, -10.907654880253387],
[128.39042327657435, -29.74868016959136, 41.756884083846124],
[-15.024960893702614, 18.255828278610956, -14.611452494117255],
[21.28006335078961, -0.2172458532869804, -25.080542379037084],
[14.285160386794596, 4.564534513048582, -18.32772914956326],
[2.439490468899258, 10.79255266315983, 26.174699521010446],
[1.7264958417478928, 15.38243123306247, -42.04282200366032],
[23.94582040213049, -28.693705758494268, 18.005391717834794],
[-39.190252757996774, -23.244663074400655, 22.006558947672374],
[-7.5440689820869435, -17.155966192899715, -22.01783573379451],
[-14.229515476296564, 13.503061551269198, 17.968817012420786],
[-42.692458567404834, 31.55037444695317, 62.63401289342725],
[3.224648029245346, 17.928547580646576, -4.524288383451061],
[-14.150630324115214, 1.2147768632076443, 1.967302103238517],
[-11.589212064363652, -6.444221705381739, -4.77406901682601],
[-5.5705251392947135, -49.58353381805122, 17.851453490207053],
[12.42994083133531, -31.32628534955386, 0.5620932081847977],
[-15.209140041336132, 2.6841055262949185, 18.113311015189275],
[-38.73187194009035, -15.72274317196305, -11.82050674146153],
[36.242545183467506, -10.805354680557413, -12.277902260267444],
[15.232739148782514, -4.663316010381281, 8.999606560034826],
[59.63334514038209, 26.088864339029772, 11.30765051583584],
[15.608156356402093, 27.943937629842292, -43.96675247210592],
[18.13331849770548, 7.431414655697998, 16.906001192291857],
[17.86199385016078, -8.699557589383213, 33.00195465513993],
[-10.848074631757044, 39.029932538400196, -25.227204796906726],
[-39.85554941979901, 1.527429754721101, -19.432085940661143],
[31.92966974414988, -30.73366946479253, -4.944928236082764],
[3.6813964573627045, -4.724979244557905, -38.6569203686534],
[14.529074495458673, 6.242069503845828, 12.534003126275708]], 0.001)
| 5,342,160
|
def build_data_request(mac, request_type='current', interval=1, units='english'):
"""
Creates RainWise API request for Recent Data based on station mac, format (optional), and units (optional)
"""
# Check if interval requested is valid interval
if interval not in [1, 5, 10, 15, 30, 60]:
raise ValueError('Invalid Request: Parameter interval must be 1, 5, 10, 15, 30, or 60')
# Check if units requested are valid units
if units.lower() not in ['english', 'metric']:
raise ValueError('Invalid Request: Parameter units must be english or metric')
# Build request URL for current conditions
if request_type == 'current':
return f'http://api.rainwise.net/main/v1.4/get-data.php?mac={mac}&format=json'
# Build request URL for recent data
elif request_type == 'recent':
return f'http://api.rainwise.net/main/v1.4/get-recent.php?mac={mac}&interval={interval}&units={units}&format=json'
raise ValueError('Invalid Request: Parameter request_type must be either ''current'' or ''recent''')
| 5,342,161
|
def load_acs_access_to_car() -> pd.DataFrame:
"""Function to merge the two files for the QOL outputs and do some standard renaming. Because
these are QOL indicators they remain in the same csv output with columns indicating year"""
df_0812 = pd.read_excel(
"./resources/ACS_PUMS/EDDT_ACS2008-2012.xlsx",
sheet_name="ACS08-12",
dtype={"Geog": str},
)
df_1519 = pd.read_excel(
"./resources/ACS_PUMS/EDDT_ACS2015-2019.xlsx",
sheet_name="ACS15-19",
dtype={"Geog": str},
)
df = pd.merge(df_0812, df_1519, on="Geog", how="left")
df = df.filter(regex="Geog|Wk16p|CWCar")
df = df.replace(
{
"Geog": {
"Bronx": "BX",
"Brooklyn": "BK",
"Manhattan": "MN",
"Queens": "QN",
"Staten Island": "SI",
"NYC": "citywide",
}
}
)
df.set_index("Geog", inplace=True)
return df
| 5,342,162
|
def canonicalize(top_dir):
"""
Canonicalize filepath.
"""
return os.path.realpath(top_dir)
| 5,342,163
|
def test_cpu_limit():
"""Test the cpu n argument."""
if 'cpu' in fresnel.Device.available_modes:
fresnel.Device(mode='cpu', n=2)
| 5,342,164
|
def elastic_transform(image, alpha, sigma, random_state=None):
"""Elastic deformation of images as described in [Simard2003]_.
.. [Simard2003] Simard, Steinkraus and Platt, "Best Practices for
Convolutional Neural Networks applied to Visual Document Analysis", in
Proc. of the International Conference on Document Analysis and
Recognition, 2003.
"""
assert len(image.shape) == 2
if random_state is None:
random_state = np.random.RandomState(None)
shape = image.shape
dx = gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma, mode="constant", cval=0) * alpha
dy = gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma, mode="constant", cval=0) * alpha
x, y = np.meshgrid(np.arange(shape[0]), np.arange(shape[1]), indexing='ij')
indices = np.reshape(x + dx, (-1, 1)), np.reshape(y + dy, (-1, 1))
return map_coordinates(image, indices, order=1).reshape(shape)
| 5,342,165
|
def convert_epoch_to_mysql_timestamp(epoch_timestamp):
"""
Converts a given epoch timestamp in seconds to the MySQL datetime format.
:param epoch_timestamp: The timestamp as seconds since epoch time
:return: The MySQL timestamp string in the format 'Y-m-d HH:MM:SS'
:rtype: str
"""
try:
epoch = time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(epoch_timestamp))
return epoch
except Exception as e:
print(e)
return None
| 5,342,166
|
def _ensure_eventlet(func):
"""Decorator that verifies we have the needed eventlet components."""
@six.wraps(func)
def wrapper(*args, **kwargs):
if not _utils.EVENTLET_AVAILABLE or greenthreading is None:
raise RuntimeError('Eventlet is needed to wait on green futures')
return func(*args, **kwargs)
return wrapper
| 5,342,167
|
def ReplaceOldWithNewFile(orig_file='', new_temp_file=''):
"""
Compare original file and the new temp file ( contents and permissions).
If they are the same, just remove the temp version. ( maybe not needed, handled in calling function)
If they are different, backup original and then replace teh orig_file with the new_temp_file
Return code values:
0: No changes made
1: Changes made
"""
## Well I've broken this by moving it to another module.
## I need to read up on logging, and logging config files
# https://docs.python.org/3.8/howto/logging.html#configuring-logging
import os
import time
import shutil
# If file exists,
try:
type(logMessage) ## check if logMessage is already set up
except:
from general_functions import LogMessage
logMessage=LogMessage() # test just using default log file
if os.path.exists(orig_file):
import filecmp
#content_matches=filecmp.cmp(orig_file,new_temp_file)
#permission_matches=os.stat(orig_file).st_mode == os.stat(new_temp_file).st_mode
#user_matches=os.stat(orig_file).st_uid == os.stat(new_temp_file).st_uid
#group_matches=os.stat(orig_file).st_gid == os.stat(new_temp_file).st_gid
orig_permission=os.stat(orig_file).st_mode
new_permission=os.stat(new_temp_file).st_mode
orig_user=os.stat(orig_file).st_uid
new_user=os.stat(new_temp_file).st_uid
orig_group=os.stat(orig_file).st_gid
new_group=os.stat(new_temp_file).st_gid
content_matches=filecmp.cmp(orig_file,new_temp_file)
permission_matches=orig_permission == new_permission
user_matches=orig_user == new_user
group_matches=orig_group == new_group
logMessage.info( 'Checking file ' + orig_file + '. content_matches:' + str(content_matches) + '; permission_matches:' + str(permission_matches) + '; user_matches:' + str(user_matches) + '; group_matches:' + str(group_matches))
if content_matches and permission_matches and user_matches and group_matches:
logMessage.info(orig_file + ' is unchanged.')
os.remove(new_temp_file)
return 0
else:
logMessage.info( orig_file + 'Permission: ' + str(orig_permission) + ', owner: ' + str(orig_user) + ', group :' + str(orig_group) )
logMessage.info( new_temp_file + 'Permission: ' + str(new_permission) + ', owner: ' + str(new_user) + ', group :' + str(new_group) )
# backup the original file
t = time.localtime()
backupfile=orig_file + time.strftime('%Y%m%d_%H%M%S', t)
shutil.copyfile(orig_file,backupfile)
else:
logMessage.info(orig_file + ' - does not exist. Creating new file.')
## Only got to here if does not match (ie new or different)
logMessage.info(orig_file + ' - has been amended. ( to match ' + new_temp_file + ' )')
#shutil.copyfile(new_temp_file, orig_file)
shutil.move(new_temp_file, orig_file)
## On some test servers this doesn't end up with correct ownership and permissions. So adding this to get round it
try:
os.chown(orig_file,new_user,new_group)
except PermissionError:
logMessage.error('Unable to set ownership on ' + orig_file + '. Trying to change to chown ' + new_user + ':' + new_group() )
try:
os.chmod(orig_file,new_permission)
except:
logMessage.error('Unable to set permissions on ' + orig_file + '. Trying to change to chmod ' + orig_file + ' ' + new_permission )
return 1
| 5,342,168
|
def check_public_key(pk):
""" Checks if a given string is a public (or at least if it is formatted as if it is).
:param pk: ECDSA public key to be checked.
:type pk: hex str
:return: True if the key matches the format, raise exception otherwise.
:rtype: bool
"""
prefix = pk[0:2]
l = len(pk)
if prefix not in ["02", "03", "04"]:
raise Exception("Wrong public key format.")
if prefix == "04" and l != 130:
raise Exception(
"Wrong length for an uncompressed public key: " + str(l))
elif prefix in ["02", "03"] and l != 66:
raise Exception("Wrong length for a compressed public key: " + str(l))
else:
return True
| 5,342,169
|
def write_json(data):
"""
Writing dictionary contents to a json file.
Args:
data (dict) -- The dictionary that will be placed
into the json file.
Returns:
JsonPath -- The path to the json file.
"""
with open(_UI_CONFIGURATION, 'w') as json_file:
json.dump(data, json_file)
| 5,342,170
|
def empty_tree(input_list):
"""Recursively iterate through values in nested lists."""
for item in input_list:
if not isinstance(item, list) or not empty_tree(item):
return False
return True
| 5,342,171
|
def main():
""" Calls the TEST functions in this module. """
run_test_problem1()
| 5,342,172
|
def validate_config_params(optimo_url, version, access_key):
"""Validates and normalizes the parameters passed to
:class:`optimo.api.OptimoAPI` constructor.
:param optimo_url: string url of the optimoroute's service
:param version: ``int`` or ``str`` denoting the API version
:param access_key: string access key provided by optimoroute
:return: ``tuple`` of the, possibly adjusted, passed parameters.
:raises OptimoError: On providing incomplete or invalid config data
"""
if not optimo_url or not isinstance(optimo_url, basestring):
raise OptimoError("'optimo_url' must be a url string")
validate_url(optimo_url)
if not version or not isinstance(version, basestring) or not \
version.startswith('v'):
raise OptimoError("'version' must be a string denoting the API version "
"you want to use('v1', 'v2', etc")
if not access_key or not isinstance(access_key, basestring):
raise OptimoError("'access_key' must be the string access key provided "
"to you by optimoroute")
return optimo_url, version, access_key
| 5,342,173
|
def deque_and_stack():
"""Solution to exercise R-6.14.
Repeat the previous problem using the deque D and an initially empty
stack S.
--------------------------------------------------------------------------
Solution:
--------------------------------------------------------------------------
0. Initial state Deque [1, 2, 3, 4, 5, 6, 7, 8]
Stack []
1. popright() 4 nums to S Deque [1, 2, 3, 4]
from D Stack [8, 7, 6, 5]
2. popright() 1 num from D, Deque [4, 1, 2, 3]
addleft() that num to D Stack [8, 7, 6, 5]
3. addright() 1 nums to D Deque [4, 1, 2, 3, 5]
from S Stack [8, 7, 6]
4. popleft() 1 num from D, Deque [1, 2, 3, 5, 4]
addright() that num to D Stack [8, 7, 6]
5. addright() 3 nums to D Deque [1, 2, 3, 5, 4, 6, 7, 8]
from S Stack []
"""
deq = deque([1, 2, 3, 4, 5, 6, 7, 8])
stack = ArrayStack()
for _ in range(4):
stack.push(deq.pop()) # Step 1
deq.appendleft(deq.pop()) # Step 2
deq.append(stack.pop()) # Step 3
deq.append(deq.popleft()) # Step 4
for _ in range(3):
deq.append(stack.pop()) # Step 5
return deq, stack
| 5,342,174
|
def test_save_fails_without_path():
"""User asked for save without specifying path"""
with pytest.raises(ValueError):
watchdog = PrivacyWatchdog(
udl, target_delta=1e-5, target_epsilon=1.0, abort=True, save=True, path=None
)
| 5,342,175
|
def apply_subst(name, user):
"""
user.username forced in lowercase (VMware Horizon)
"""
name = re.sub(r'_SCIPER_DIGIT_', user.sciper_digit, name)
name = re.sub(r'_SCIPER_', user.sciper, name)
name = re.sub(r'_USERNAME_', user.username.lower(), name)
name = re.sub(r'_HOME_DIR_', user.home_dir, name)
name = re.sub(r'_GROUPNAME_', user.groupname, name)
name = re.sub(r'_DOMAIN_', user.domain, name)
name = re.sub(r'_UID_', user.uid, name)
name = re.sub(r'_GID_', user.gid, name)
name = re.sub(r'_FSTYPE_', user.automount_fstype, name)
name = re.sub(r'_HOST_', user.automount_host, name)
name = re.sub(r'_PATH_', user.automount_path, name)
name = re.sub(r'_OPTIONS_', user.automount_options, name)
return name
| 5,342,176
|
def train_word2vec():
"""
train word2vec model
:return:
"""
class MyCorpus(object):
def __init__(self):
pass
def __iter__(self):
for fname in os.listdir(TEXT_DIR):
text = read_document_from_text(os.path.join(TEXT_DIR, fname))
segmented_words = '/'.join(cut_words(''.join(text))).split('/')
yield segmented_words
sentences = MyCorpus()
model = gensim.models.Word2Vec(sentences, workers=8)
model.save(WORD2VEC_SAVE_PATH)
| 5,342,177
|
def A_real_deph(Q_deph, Kt_real_deph, deltaT_diff_deph):
"""
Calculates the real heatransfer area.
Parameters
----------
Q_deph : float
The heat load of dephlegmator, [W] , [J/s]
deltaT_diff_deph : float
The coefficient difference of temperatures, [degrees celcium]
Kt_real_deph : float
The heat ransfer coefficient [W/(m**2 * degrees celcium)]
Returns
-------
A_real_deph : float
The real heat ransfer area, [m**2]
References
----------
Романков, формула 4.72, стр.168
"""
return Q_deph / (Kt_real_deph * deltaT_diff_deph)
| 5,342,178
|
def is_1d_like(oned_like_object: Union[np.ndarray, np.void]) -> bool:
"""
Checks if the input is either a 1D numpy array or a structured numpy row.
Parameters
----------
oned_like_object : Union[numpy.ndarray, numpy.void]
The object to be checked.
Raises
------
TypeError
The input is neither a numpy ndarray -- array-like object -- nor a
numpy void -- a row of a structured numpy array.
Returns
-------
is_1d_like_array : boolean
True if the input is either a 1-dimensional numpy array or a row of a
structured numpy array, False otherwise.
"""
is_1d_like_array = False
if isinstance(oned_like_object, np.void):
is_1d_like_array = is_structured_row(oned_like_object)
elif isinstance(oned_like_object, np.ndarray):
is_1d_like_array = is_1d_array(oned_like_object)
else:
raise TypeError('The input should either be a numpy array-like object '
'(numpy.ndarray) or a row of a structured numpy array '
'(numpy.void).')
return is_1d_like_array
| 5,342,179
|
def PDef (inDict):
""" Create TableDesc from the contents of a Python Dictionary
Returns new Table Descriptor
inDict = Python dictionary with values, must be in the form produced
by PGetDict
"""
################################################################
#
outTD = TableDesc(inDict["Table name"])
outTD.me = Obit.TableDescDef(inDict)
# Check
if len(outTD.Dict) <= 0:
raise RuntimeError("Failed to create valid Table Descriptor")
return outTD
# end PDef
| 5,342,180
|
def minf(ar, min_val=nan):
"""
Gets the minimum value in the entire N-D array.
@param ar The array.
"""
sa = shape(ar)
np = 1
for n in sa:
np *= n
ar2 = reshape(ar, np)
ar2 = delete(ar2, get_nan_inds(ar2), 0)
cinds = []
if not isnan(min_val):
for ii in range(0, len(ar2)):
if ar2[ii] <= min_val:
cinds.append(ii)
if len(cinds) > 0:
ar2 = delete(ar2, cinds, 0)
if size(ar2) == 0:
return nan
return min(ar2)
| 5,342,181
|
def read_from_netcdf(netcdf_file_name=None):
"""Reads boundary from NetCDF file.
:param netcdf_file_name: Path to input file. If None, will look for file in
repository.
:return: latitudes_deg: See doc for `_check_boundary`.
:return: longitudes_deg: Same.
"""
if netcdf_file_name is None:
module_dir_name = os.path.dirname(__file__)
parent_dir_name = '/'.join(module_dir_name.split('/')[:-1])
netcdf_file_name = '{0:s}/conus_polygon.nc'.format(parent_dir_name)
dataset_object = netCDF4.Dataset(netcdf_file_name)
latitudes_deg = numpy.array(
dataset_object.variables[NETCDF_LATITUDES_KEY][:]
)
longitudes_deg = numpy.array(
dataset_object.variables[NETCDF_LONGITUDES_KEY][:]
)
dataset_object.close()
longitudes_deg = _check_boundary(
latitudes_deg=latitudes_deg, longitudes_deg=longitudes_deg
)
return latitudes_deg, longitudes_deg
| 5,342,182
|
def update_pins(session):
"""
Update the python and docker pins version inplace.
"""
session.install("-e", ".[dev]")
session.run("python", "bin/update_pythons.py", "--force")
session.run("python", "bin/update_docker.py")
| 5,342,183
|
def generate_sector(size: int, object_weight: list) -> dict:
"""
Generates an Sector with Weighted Spawns
Args:
size: Int Representing the Size of the Sector (Size X Size)
object_weight: An Nested List with Object / Value Types
Examples:
generate_sector(6, [["*", 50], ["#", 10]]) would output an Map File where * is far more Common than #
Returns:
An Dict with Lists inside which Represent the Map Data per Row
"""
if size is 0:
raise ValueError("The Sector Size cant be 0")
size += 1
output = {}
placed_player = False
totals = []
running_total = 0
for w in object_weight:
running_total += w[1]
totals.append(running_total)
def next():
"""
Gets an Random Object from the Object - Weight List
"""
ran = random.random() * totals[-1]
i = bisect.bisect_right(totals, ran)
return object_weight[i][0]
for x in range(1, size):
row = []
for y in range(1, size):
object = next()
if placed_player is False and object is "@":
row.append(object)
placed_player = True
continue
elif placed_player is True and object is "@":
while object is "@":
object = next()
row.append(object)
output[x] = row
return output
| 5,342,184
|
def run(port):
"""Run the daemon."""
reactor.listenTCP(port, NAFactory())
LOG.info('*** Tiedot NA Server starting @ port {}.'.format(port))
reactor.run()
| 5,342,185
|
def rbf_kernel(theta, h=-1):
"""Radial basis function kernel."""
sq_dist = _pdist(theta)
pairwise_dists = _squareform(sq_dist) ** 2
if h < 0: # if h < 0, using median trick
h = _numpy.median(pairwise_dists)
h = _numpy.sqrt(0.5 * h / _numpy.log(theta.shape[0] + 1))
# compute the rbf kernel
Kxy = _numpy.exp(-pairwise_dists / h ** 2 / 2)
dxkxy = -_numpy.matmul(Kxy, theta)
sumkxy = _numpy.sum(Kxy, axis=1)
for i in range(theta.shape[1]):
dxkxy[:, i] = dxkxy[:, i] + _numpy.multiply(theta[:, i], sumkxy)
dxkxy = dxkxy / (h ** 2)
return (Kxy, dxkxy)
| 5,342,186
|
def get_default_out_of_workspace_subcommands():
"""Returns a dict of default out-of-workspace subcommands as <name: `CliCommand`>s
:return: A dict of <name: `CliCommand`>
"""
new_cmd = NewCommand()
return {new_cmd.name(): new_cmd}
| 5,342,187
|
def test_json_view():
"""Turns a Python object into a response."""
def func(request):
return {'x': 1}
response = decorators.json_view(func)(mock.Mock())
assert isinstance(response, http.HttpResponse)
eq_(response.content, '{"x": 1}')
eq_(response['Content-Type'], 'application/json')
eq_(response.status_code, 200)
| 5,342,188
|
def compile(what, mimetype, cwd=None, uri_cwd=None, debug=None):
"""
Compile a given text based on mimetype.
The text to compile must be provided as a Unicode object and this
function must return the compiled text as a Unicode object.
"""
try:
compiler = compilers[mimetype.lower()]
except KeyError:
raise RuntimeError('Compiler for mimetype %s not found.' % mimetype)
return compiler.compile(what, mimetype.lower(), cwd=cwd,
uri_cwd=uri_cwd, debug=debug)
| 5,342,189
|
def run_model(idx):
"""
Run BART on idx index from dataset.
Args:
idx (int): The index of the dataset.
Returns:
tuple: tuple with
fname: Filename
slice_num: Slice number.
prediction: Reconstructed image.
"""
masked_kspace, reg_wt, fname, slice_num, crop_size, num_low_freqs = dataset[idx]
prediction = cs_total_variation(
args, masked_kspace, reg_wt, crop_size, num_low_freqs
)
return fname, slice_num, prediction
| 5,342,190
|
def _saveJob(event):
"""
When a job is saved, if it is a docker run task, add the Dask Bokeh port to
the list of exposed ports.
"""
job = event.info
try:
from bson import json_util
jobkwargs = json_util.loads(job['kwargs'])
if ('docker_run_args' not in jobkwargs['task'] and
'scheduler' in jobkwargs['inputs']):
jobkwargs['task']['docker_run_args'] = {'ports': {'8787': None}}
job['kwargs'] = json_util.dumps(jobkwargs)
except Exception:
pass
| 5,342,191
|
async def test_invoke_default_fail(default_listener, inter):
"""Test a default listener invocation that fails during param conversion."""
inter.component.custom_id = "default_listener<>abc"
with pytest.raises(components.exceptions.ConversionError) as exc_info:
await default_listener(inter)
assert isinstance(exc_info.value.errors[0], components.exceptions.MatchFailure)
| 5,342,192
|
def is_tt_tensor(arg) -> bool:
"""Determine whether the object is a `TT-Tensor` or `WrappedTT` with underlying `TT-Tensor`.
:return: `True` if `TT-Tensor` or `WrappedTT(TT-Tensor)`, `False` otherwise
:rtype: bool
"""
return isinstance(arg, TT) or (isinstance(arg, WrappedTT) and
not arg.tt.is_tt_matrix)
| 5,342,193
|
def wait_for_event(event):
"""wait for the event to be set before doing anything"""
print('wait_for_event: starting')
event.wait()
print('wait_for_event: event.is_set()-> ', event.is_set())
| 5,342,194
|
def ensure_node():
"""
Ensure nodejs from nodesource is installed
"""
key = b"""
-----BEGIN PGP PUBLIC KEY BLOCK-----
Version: GnuPG v1
Comment: GPGTools - https://gpgtools.org
mQINBFObJLYBEADkFW8HMjsoYRJQ4nCYC/6Eh0yLWHWfCh+/9ZSIj4w/pOe2V6V+
W6DHY3kK3a+2bxrax9EqKe7uxkSKf95gfns+I9+R+RJfRpb1qvljURr54y35IZgs
fMG22Np+TmM2RLgdFCZa18h0+RbH9i0b+ZrB9XPZmLb/h9ou7SowGqQ3wwOtT3Vy
qmif0A2GCcjFTqWW6TXaY8eZJ9BCEqW3k/0Cjw7K/mSy/utxYiUIvZNKgaG/P8U7
89QyvxeRxAf93YFAVzMXhoKxu12IuH4VnSwAfb8gQyxKRyiGOUwk0YoBPpqRnMmD
Dl7SdmY3oQHEJzBelTMjTM8AjbB9mWoPBX5G8t4u47/FZ6PgdfmRg9hsKXhkLJc7
C1btblOHNgDx19fzASWX+xOjZiKpP6MkEEzq1bilUFul6RDtxkTWsTa5TGixgCB/
G2fK8I9JL/yQhDc6OGY9mjPOxMb5PgUlT8ox3v8wt25erWj9z30QoEBwfSg4tzLc
Jq6N/iepQemNfo6Is+TG+JzI6vhXjlsBm/Xmz0ZiFPPObAH/vGCY5I6886vXQ7ft
qWHYHT8jz/R4tigMGC+tvZ/kcmYBsLCCI5uSEP6JJRQQhHrCvOX0UaytItfsQfLm
EYRd2F72o1yGh3yvWWfDIBXRmaBuIGXGpajC0JyBGSOWb9UxMNZY/2LJEwARAQAB
tB9Ob2RlU291cmNlIDxncGdAbm9kZXNvdXJjZS5jb20+iQI4BBMBAgAiBQJTmyS2
AhsDBgsJCAcDAgYVCAIJCgsEFgIDAQIeAQIXgAAKCRAWVaCraFdigHTmD/9OKhUy
jJ+h8gMRg6ri5EQxOExccSRU0i7UHktecSs0DVC4lZG9AOzBe+Q36cym5Z1di6JQ
kHl69q3zBdV3KTW+H1pdmnZlebYGz8paG9iQ/wS9gpnSeEyx0Enyi167Bzm0O4A1
GK0prkLnz/yROHHEfHjsTgMvFwAnf9uaxwWgE1d1RitIWgJpAnp1DZ5O0uVlsPPm
XAhuBJ32mU8S5BezPTuJJICwBlLYECGb1Y65Cil4OALU7T7sbUqfLCuaRKxuPtcU
VnJ6/qiyPygvKZWhV6Od0Yxlyed1kftMJyYoL8kPHfeHJ+vIyt0s7cropfiwXoka
1iJB5nKyt/eqMnPQ9aRpqkm9ABS/r7AauMA/9RALudQRHBdWIzfIg0Mlqb52yyTI
IgQJHNGNX1T3z1XgZhI+Vi8SLFFSh8x9FeUZC6YJu0VXXj5iz+eZmk/nYjUt4Mtc
pVsVYIB7oIDIbImODm8ggsgrIzqxOzQVP1zsCGek5U6QFc9GYrQ+Wv3/fG8hfkDn
xXLww0OGaEQxfodm8cLFZ5b8JaG3+Yxfe7JkNclwvRimvlAjqIiW5OK0vvfHco+Y
gANhQrlMnTx//IdZssaxvYytSHpPZTYw+qPEjbBJOLpoLrz8ZafN1uekpAqQjffI
AOqW9SdIzq/kSHgl0bzWbPJPw86XzzftewjKNbkCDQRTmyS2ARAAxSSdQi+WpPQZ
fOflkx9sYJa0cWzLl2w++FQnZ1Pn5F09D/kPMNh4qOsyvXWlekaV/SseDZtVziHJ
Km6V8TBG3flmFlC3DWQfNNFwn5+pWSB8WHG4bTA5RyYEEYfpbekMtdoWW/Ro8Kmh
41nuxZDSuBJhDeFIp0ccnN2Lp1o6XfIeDYPegyEPSSZqrudfqLrSZhStDlJgXjea
JjW6UP6txPtYaaila9/Hn6vF87AQ5bR2dEWB/xRJzgNwRiax7KSU0xca6xAuf+TD
xCjZ5pp2JwdCjquXLTmUnbIZ9LGV54UZ/MeiG8yVu6pxbiGnXo4Ekbk6xgi1ewLi
vGmz4QRfVklV0dba3Zj0fRozfZ22qUHxCfDM7ad0eBXMFmHiN8hg3IUHTO+UdlX/
aH3gADFAvSVDv0v8t6dGc6XE9Dr7mGEFnQMHO4zhM1HaS2Nh0TiL2tFLttLbfG5o
QlxCfXX9/nasj3K9qnlEg9G3+4T7lpdPmZRRe1O8cHCI5imVg6cLIiBLPO16e0fK
yHIgYswLdrJFfaHNYM/SWJxHpX795zn+iCwyvZSlLfH9mlegOeVmj9cyhN/VOmS3
QRhlYXoA2z7WZTNoC6iAIlyIpMTcZr+ntaGVtFOLS6fwdBqDXjmSQu66mDKwU5Ek
fNlbyrpzZMyFCDWEYo4AIR/18aGZBYUAEQEAAYkCHwQYAQIACQUCU5sktgIbDAAK
CRAWVaCraFdigIPQEACcYh8rR19wMZZ/hgYv5so6Y1HcJNARuzmffQKozS/rxqec
0xM3wceL1AIMuGhlXFeGd0wRv/RVzeZjnTGwhN1DnCDy1I66hUTgehONsfVanuP1
PZKoL38EAxsMzdYgkYH6T9a4wJH/IPt+uuFTFFy3o8TKMvKaJk98+Jsp2X/QuNxh
qpcIGaVbtQ1bn7m+k5Qe/fz+bFuUeXPivafLLlGc6KbdgMvSW9EVMO7yBy/2JE15
ZJgl7lXKLQ31VQPAHT3an5IV2C/ie12eEqZWlnCiHV/wT+zhOkSpWdrheWfBT+ac
hR4jDH80AS3F8jo3byQATJb3RoCYUCVc3u1ouhNZa5yLgYZ/iZkpk5gKjxHPudFb
DdWjbGflN9k17VCf4Z9yAb9QMqHzHwIGXrb7ryFcuROMCLLVUp07PrTrRxnO9A/4
xxECi0l/BzNxeU1gK88hEaNjIfviPR/h6Gq6KOcNKZ8rVFdwFpjbvwHMQBWhrqfu
G3KaePvbnObKHXpfIKoAM7X2qfO+IFnLGTPyhFTcrl6vZBTMZTfZiC1XDQLuGUnd
sckuXINIU3DFWzZGr0QrqkuE/jyr7FXeUJj9B7cLo+s/TXo+RaVfi3kOc9BoxIvy
/qiNGs/TKy2/Ujqp/affmIMoMXSozKmga81JSwkADO1JMgUy6dApXz9kP4EE3g==
=CLGF
-----END PGP PUBLIC KEY BLOCK-----
""".strip()
apt.trust_gpg_key(key)
apt.add_source('nodesource', 'https://deb.nodesource.com/node_10.x', 'main')
apt.install_packages(['nodejs'])
| 5,342,195
|
def top_predicted_outcomes(proba_pred, index_to_outcome_dict, N_top = 3):
""" extract the most likely outcomes based on a 1d-array of predicted probabilities
Parameters
----------
proba_pred: numpy 1d-array
array containing the predicted probabilities
index_to_outcome_dict: dict
reversed index systen for the outcomes
Returns
-------
dict
top outcomes along with their probability of occurrences
"""
# extract the indices of the top 'N_top' outcomes
idxs_top = np.argsort(proba_pred)[::-1][:N_top]
# top outcomes
top_outcomes = {index_to_outcome_dict[i+1]:proba_pred[i] for i in idxs_top}
return top_outcomes
| 5,342,196
|
def parse_kafka_table(beamsqltable, name, logger):
# loop through the kafka structure
# map all key value pairs to 'key' = 'value',
# except properties
"""
parse kafka parameter
"""
ddl = ""
kafka = beamsqltable.spec.get("kafka")
if not kafka:
message = f"Beamsqltable {name} has no Kafka connector descriptor."
logger.warning(message)
return None
# check mandatory fields in Kafka, topic, bootstrap.server
if not kafka.get("topic"):
message = f"Beamsqltable {name} has no kafka topic."
logger.warning(message)
return None
try:
_ = kafka["properties"]["bootstrap.servers"]
except KeyError:
message = f"Beamsqltable {name} has no kafka bootstrap servers found"
logger.warning(message)
return None
# the other fields are inserted, there is not a check for valid fields yet
for kafka_key, kafka_value in kafka.items():
# properties are iterated separately
if kafka_key == 'properties':
for property_key, property_value in kafka_value.items():
ddl += f",'properties.{property_key}' = '{property_value}'"
else:
ddl += f", '{kafka_key}' = '{kafka_value}'"
key_format = kafka.get("key.format")
if key_format is None:
message = f"Beamsqltable {name} has no key.format but it is mandatory \
for upsert-kafka"
logger.warning(message)
return None
return ddl
| 5,342,197
|
def submit(client, job_path, output_job_path, files):
"""
Submit a new Fuzzing Job via the MSRD REST API.
"""
with open(job_path) as input_job_file:
job = json.load(input_job_file)
job = add_file_info_to_job(client, job, files)
if output_job_path:
with open(output_job_path) as out_file:
json.dump(job, out_file, indent=2)
print_response(client.submit_job(job))
| 5,342,198
|
def linear_operator_from_num_variables(num_variables, type_, W):
"""Generates the linear operator for the TV lasso Nesterov function
from number of variables.
Parameters:
----------
num_variables : Integer. The total number of variables, including the
intercept variable(s).
"""
A = list()
for k in range(0,num_variables):
Ak = Ak_from_pairs(k,num_variables,type_,W)
A.append(Ak.tocsr())
return A
| 5,342,199
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.