content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def get_top_experts_per_item_dispatcher(gates: Array, name: str,
num_selected_experts: int,
batch_priority: bool,
capacity: Optional[int] = None,
capacity_factor: Optional[float] = None,
**dispatcher_kwargs) -> BaseDispatcher:
"""Returns a dispatcher implementing Top-Experts-Per-Item routing.
For each item, the `num_selected_experts` experts with the largest gating
score are selected in a greedy fashion. However, because each expert has a
fixed `capacity`, if more items than `capacity` select a given expert some of
the assignments will be ignored. All top-1 choices have priority over top-2
choices and so on. In addition, the choices that are ignored also depend on
`batch_priority`. If it is False, the "Vanilla" algorithm is used, meaning
that items in earlier positions of the array have priority. If it is True, the
"Batch Priority Routing" algorithm (see https://arxiv.org/abs/2106.05974) is
used, which gives more priority to the items whose largest score is greater.
Args:
gates: (S, E) array with the gating values for each (item, expert).
These values will also be used as combine_weights for the selected pairs.
name: String with the type of dispatcher to use (supported values are
"einsum" and "indices").
num_selected_experts: Maximum number of experts to select per each item (K).
batch_priority: Whether to use batch priority routing or not.
capacity: If given, maximum number of items processed by each expert.
Either this or `capacity_factor` must be given.
capacity_factor: If given, sets the `capacity` to this factor of S * K / E.
Either this or `capacity` must be given.
**dispatcher_kwargs: Additional arguments for the dispatcher object.
Returns:
A dispatcher.
"""
if (capacity is None) == (capacity_factor is None):
raise ValueError(
"You must specify either 'capacity' or 'capacity_factor', and not both."
f" Current values are capacity = {capacity!r}, "
f"capacity_factor = {capacity_factor!r}")
if not capacity:
group_size, num_experts = gates.shape
capacity = _compute_capacity(
# Target number of tokens to split among the `num_experts` experts.
num_tokens=group_size * num_selected_experts,
num_experts=num_experts,
capacity_factor=capacity_factor)
fn_map = {
"einsum": _get_top_experts_per_item_einsum_dispatcher,
"indices": _get_top_experts_per_item_expert_indices_dispatcher,
}
if name not in fn_map:
raise ValueError(f"Unknown dispatcher type: {name!r}")
return fn_map[name](gates, num_selected_experts, capacity, batch_priority,
**dispatcher_kwargs)
| 20,600
|
def main(argv=None):
"""Run a Tensorflow model on the Iris dataset."""
args = parse_arguments(sys.argv if argv is None else argv)
env = json.loads(os.environ.get('TF_CONFIG', '{}'))
# First find out if there's a task value on the environment variable.
# If there is none or it is empty define a default one.
task_data = env.get('task') or {'type': 'master', 'index': 0}
trial = task_data.get('trial')
if trial is not None:
output_dir = os.path.join(args.output_path, trial)
else:
output_dir = args.output_path
learn_runner.run(
experiment_fn=get_experiment_fn(args),
output_dir=output_dir)
| 20,601
|
def nix_prefetch_url(url, algo='sha256'):
"""Prefetches the content of the given URL."""
print(f'nix-prefetch-url {url}')
out = subprocess.check_output(['nix-prefetch-url', '--type', algo, url])
return out.decode('utf-8').rstrip()
| 20,602
|
def find_files_list(*args, **kwargs):
""" Returns a list of find_files generator"""
return list(find_files(*args, **kwargs))
| 20,603
|
def bool_from_string(subject, strict=False, default=False):
"""Interpret a subject as a boolean.
A subject can be a boolean, a string or an integer. Boolean type value
will be returned directly, otherwise the subject will be converted to
a string. A case-insensitive match is performed such that strings
matching 't','true', 'on', 'y', 'yes', or '1' are considered True and,
when `strict=False`, anything else returns the value specified by
'default'.
Useful for JSON-decoded stuff and config file parsing.
If `strict=True`, unrecognized values, including None, will raise a
ValueError which is useful when parsing values passed in from an API call.
Strings yielding False are 'f', 'false', 'off', 'n', 'no', or '0'.
"""
if isinstance(subject, bool):
return subject
if not isinstance(subject, str):
subject = str(subject)
lowered = subject.strip().lower()
if lowered in TRUE_STRINGS:
return True
elif lowered in FALSE_STRINGS:
return False
elif strict:
acceptable = ", ".join(
"'%s'" % s for s in sorted(TRUE_STRINGS + FALSE_STRINGS))
msg = ("Unrecognized value '%(val)s', acceptable values are:"
" %(acceptable)s") % {"val": subject,
"acceptable": acceptable}
raise ValueError(msg)
else:
return default
| 20,604
|
def create_db():
"""Create the database using sqlalchemy."""
database_url = str(database.url).replace("mysql://", "mysql+pymysql://")
engine = sqlalchemy.create_engine(database_url)
sqlalchemy_metadata.create_all(engine)
| 20,605
|
def scan_quality_check(label: str,
pivots: list,
energies: list,
scan_res: float = rotor_scan_resolution,
used_methods: Optional[list] = None,
log_file: Optional[str] = None,
species: Optional[ARCSpecies] = None,
preserve_params: Optional[list] = None,
trajectory: Optional[list] = None,
original_xyz: Optional[dict] = None,
) -> Tuple[bool, str, str, dict]:
"""
Checks the scan's quality:
1. Based on intermediate conformers if available:
- whether the initial and final points are consistent
- whether it is relatively "smooth"
2. Based on the PES curve (if intermediate conformers are unavailable):
- whether the initial and final points are consistent
- whether it is relatively "smooth"
3. Common:
- whether the optimized geometry indeed represents the minimum energy conformer (for a non-TS species)
- whether the barrier height is reasonable
4. Based on requested parameters to preserve:
- whether specified atom distances to preserve criteria aren't violated
Args:
label (str): The species label.
pivots (list): The rotor pivots.
energies (list): The scan energies in kJ/mol.
scan_res (float, optional): The scan resolution in degrees.
used_methods (list, optional): Troubleshooting methods already tried out.
log_file (str, optional): The path to the output file.
species (ARCSpecies, optional): The ARCSpecies this scan is related to.
preserve_params (list, optional): Entries are length 2 lists of atom indices (1-indexed) between which the
distance as well as a torsion dihedral angle with these atoms as its pivots
must be preserved throughout the scan to a tolerance.
trajectory (list, optional): Entries are Cartesian coordinates along the scan trajectory.
original_xyz (dict, optional): The optimized coordinated for the species.
Returns: Tuple[bool, str, str, dict]
- Whether to invalidate this rotor, ``True`` to invalidate.
- Reason for invalidating this rotor.
- Error or warning message.
- Troubleshooting methods to apply, including conformational changes.
Todo:
- adjust to ND
"""
message, invalidation_reason = '', ''
invalidate = False
actions = dict()
used_methods = used_methods or list()
energies = np.array(energies, np.float64)
scan_conformers = None
# Check if the conformer based method is valid
if log_file:
try:
scan_conformers = parse_scan_conformers(log_file)
except NotImplementedError:
message = f'Rotor scan quality check using conformer internal coordinates ' \
f'has not been implemented for current ESS. Using PES curve based ' \
f'check for rotor scan of {label} between pivots {pivots}.'
logger.warning(message)
# 1. Check based on intermediate conformers
if scan_conformers is not None and (species is None or not species.is_ts):
bonds = scan_conformers[scan_conformers['type'] == 'R']
angles = scan_conformers[scan_conformers['type'] == 'A']
non_scan_rotor = scan_conformers[(scan_conformers['type'] == 'D') \
& (scan_conformers['scan'] == False)]
scan_rotor = scan_conformers[scan_conformers['scan'] == True]
# 1.1 Find significant changes of internal coordinates
expected_step_num = int(360 / scan_res)
# 5 below refers to type, atoms, scan, redundant and initial guess
actual_step_num = scan_conformers.shape[1] - 5
step_num = min(expected_step_num, actual_step_num)
changed_ic_dict = {}
for index_1 in range(step_num + 1):
if index_1 != 0:
# Compare the 'adjacent' conformers
index_2 = index_1 - 1
delta = scan_res # scan[index_1] - scan[index_2] = scan_res
elif step_num == expected_step_num:
# Compare the first and the last conformer
index_2 = step_num
delta = 0
else:
# When the scan is not finished as desired
continue
# Identify changes by type
bond_change = (2 * (bonds[index_1] - bonds[index_2]) /
(bonds[index_1] + bonds[index_2])).abs() > preserve_params_in_scan['bond']
angle_change = (angles[index_1] - angles[index_2]).abs() > preserve_params_in_scan['angle']
non_scan_rotor_change = check_torsion_change(torsions=non_scan_rotor,
index_1=index_1,
index_2=index_2,
threshold=preserve_params_in_scan['dihedral'])
scan_rotor_change = check_torsion_change(torsions=scan_rotor,
index_1=index_1,
index_2=index_2,
threshold=preserve_params_in_scan['dihedral'],
delta=delta)
# Summarize changes
change_sum = pd.concat([bond_change,
angle_change,
non_scan_rotor_change,
scan_rotor_change])
changed_ics = change_sum[change_sum == True].index.to_list()
# Save changes in the format of {conformer index: problematic ics}
if changed_ics:
invalidate = True
changed_ic_dict.update({index_1: changed_ics})
# 1.2 Check broken bond and any lowest conformation
# Exclude those with broken bonds (different species)
# Better to just freeze the broken bond when bond changing first happens
for conf_index, ics in changed_ic_dict.items():
# R(X,Y) refers to bonds in ics
broken_bonds = [ic for ic in ics if 'R' in ic]
if broken_bonds and conf_index != 0:
# Find the bond that changes the most, to avoid accompanied changes, like C-O transforms
# to C=O, which we don't want to freeze. If other bonds need to be frozen as well,
# it can be done in the following troubleshooting.
bonds = scan_conformers.loc[broken_bonds, :]
bond_change = (2 * (bonds[conf_index] - bonds[conf_index - 1]) /
(bonds[conf_index] + bonds[conf_index - 1])).abs()
broken_bond_label = bond_change.sort_values().index[-1] # the largest change
# Freeze the bonds, no further freezing other ics to prevent over-constraining
broken_bonds = [scan_conformers['atoms'][broken_bond_label]]
invalidate = True
invalidation_reason = f'Bond ({broken_bonds}) broke during the scan.'
message = f'Rotor scan of {label} between pivots {pivots} has broken bonds: ' \
f'{broken_bonds}. ARC will attempt to troubleshoot this rotor scan.'
logger.error(message)
actions = {'freeze': broken_bonds}
return invalidate, invalidation_reason, message, actions
# If no bond broke, ideally all conformers should be isomorphic.
# Switch to the lowest conformer
energy_diff = energies[0] - np.min(energies)
# Use tighter threshold to find lower conformer
if energy_diff >= 0.5 or energy_diff > 0.5 * (max(energies) - min(energies)) \
and (species is None or not species.is_ts):
invalidate = True
invalidation_reason = f'Another conformer for {label} exists which is ' \
f'{energy_diff:.2f} kJ/mol lower.'
message = f'Species {label} is not oriented correctly around pivots {pivots}, ' \
f'searching for a better conformation...'
logger.info(message)
# Find the dihedrals in degrees of the lowest conformer:
min_index = np.argmin(energies)
conf_xyzs = parse_1d_scan_coords(log_file)
actions = {'change conformer': conf_xyzs[min_index]}
return invalidate, invalidation_reason, message, actions
# 1.3 Check consistency
if 0 in changed_ic_dict.keys() and len(changed_ic_dict) == 1:
# A smooth scan with different initial and final conformer.
invalidate = True
invalidation_reason = 'Inconsistent initial and final conformers'
message = f'Rotor scan of {label} between pivots {pivots} has inconsistent initial ' \
f'and final conformers.\nInternal coordinates {changed_ic_dict[0]} are different. ' \
f'ARC will attempt to troubleshoot this rotor scan.'
logger.error(message)
actions = {'freeze': [scan_conformers['atoms'][ic_label]
for ic_label in changed_ic_dict[0]]}
return invalidate, invalidation_reason, message, actions
elif len(changed_ic_dict) > 0:
# Not a smooth scan.
invalidate = True
invalidation_reason = 'Significant difference observed between consecutive conformers'
message = f'Rotor scan of {label} between pivots {pivots} is inconsistent between ' \
f'two consecutive conformers.\nInconsistent consecutive conformers and problematic ' \
f'internal coordinates:'
changed_ic_label = []
for index, ics in changed_ic_dict.items():
if index > 0: # Do not include the initial/final differences which may include more ics
message += f'\nconformer #{index:>3d} / #{index+1:>3d} '
message += ', '.join(ics)
changed_ic_label += ics
message += '\nARC will attempt to troubleshoot this rotor scan.'
# list(set()) is used to remove duplicate labels
changed_ic_label = list(set(changed_ic_label))
logger.error(message)
actions = {'freeze': [scan_conformers['atoms'][ic_label]
for ic_label in changed_ic_label]}
return invalidate, invalidation_reason, message, actions
else:
# 2. Check rotor scan quality according to the PES curve
# 2.1. Check consistency between initial and final points
if abs(energies[-1] - energies[0]) > inconsistency_az:
# initial and final points differ by more than `inconsistency_az` kJ/mol.
# seems like this rotor broke the conformer. Invalidate
invalidate = True
invalidation_reason = f'initial and final points are inconsistent by more than {inconsistency_az:.2f} kJ/mol'
message = f'Rotor scan of {label} between pivots {pivots} is inconsistent by more ' \
f'than {inconsistency_az:.2f} kJ/mol between initial and final positions. ' \
f'Initial energy = {energies[0]}, final energy = {energies[-1]}. ARC will ' \
f'attempt to troubleshoot this rotor scan.'
logger.error(message)
actions = {'inc_res': None, 'freeze': 'all'}
return invalidate, invalidation_reason, message, actions
# 2.2. Check consistency between consecutive points
for j in range(len(energies) - 1):
if abs(energies[j] - energies[j + 1]) > inconsistency_ab * np.max(energies):
# Two consecutive points on the scan differ by more than `inconsistency_ab` kJ/mol.
# This is a serious inconsistency. Invalidate
invalidate = True
invalidation_reason = f'Two consecutive points are inconsistent by more than ' \
f'{inconsistency_ab * max(energies):.2f} kJ/mol'
message = f'Rotor scan of {label} between pivots {pivots} is inconsistent by' \
f'more than {inconsistency_ab * max(energies):.2f} kJ/mol between ' \
f'two consecutive points. ARC will attempt to troubleshoot this rotor scan.'
logger.error(message)
# Propose a method
# Try increasing resolution firstly, and try increasing res. and freezing all
# torsions jointly, afterwards.
# TODO: If we figure out that solely increasing res. is not effective,
# we can simplify the process to actions = {'inc_res': None, 'freeze': 'all'}
if any(['scan_res' in used_method for used_method in used_methods]):
# Check if increasing scan resolution is ever applied
if not any([used_method['scan_trsh'] != '' for used_method in used_methods]):
# Case where freezing torisions has not been applied
actions = {'inc_res': None, 'freeze': 'all'}
else:
# Since all torsions are frozen, there's not much we can do except increasing
# scan resolution. But it is not that effective either. So stop and do nothing.
pass
else:
# Case where neither increasing scan resolution nor freezing
# torisions has been applied
actions = {'inc_res': None}
return invalidate, invalidation_reason, message, actions
# 2.3 Check energy and change conformation if needed:
energy_diff = energies[0] - np.min(energies)
if energy_diff >= 2 or energy_diff > 0.5 * (max(energies) - min(energies)) \
and (species is None or not species.is_ts):
invalidate = True
invalidation_reason = f'Another conformer for {label} exists which is {energy_diff:.2f} kJ/mol lower.'
message = f'Species {label} is not oriented correctly around pivots {pivots}. ' \
f'Another conformer exists which is {energy_diff:.2f} kJ/mol lower. ' \
f'searching for a better conformation...'
logger.info(message)
# Find the lowest conformer, and use the new conformer for further jobs.
# Since at this point, the scan has passed previous checks, the possibility
# to switch to a non-isomorphic conformer is low.
min_index = np.argmin(energies)
conf_xyzs = parse_1d_scan_coords(log_file)
actions = {'change conformer': conf_xyzs[min_index]}
return invalidate, invalidation_reason, message, actions
# 3. Check the barrier height
if (np.max(energies) - np.min(energies)) > maximum_barrier:
# The barrier for the internal rotation is higher than `maximum_barrier`
num_wells = determine_rotor_symmetry(label=label,
pivots=pivots,
rotor_path='',
energies=energies,
return_num_wells=True,
log=False,
)[-1]
if num_wells == 1:
invalidate = True
invalidation_reason = f'The rotor scan has a barrier of {np.max(energies) - np.min(energies):.2f} ' \
f'kJ/mol, which is higher than the maximal barrier for rotation ' \
f'({maximum_barrier:.2f} kJ/mol)'
message = f'Rotor scan of {label} between pivots {pivots} has a barrier ' \
f'larger than {maximum_barrier:.2f} kJ/mol. Invalidating rotor.'
logger.warning(message)
return invalidate, invalidation_reason, message, actions
else:
logger.warning(f'The maximal barrier for rotor {pivots} of {label} is '
f'{(np.max(energies) - np.min(energies)):.2f} kJ/mol, which is higher than the set threshold '
f'of {maximum_barrier} kJ/mol. Since this mode when treated as torsion has {num_wells}, '
f'this mode is not invalidated: treating it as a vibrational mode will be less accurate than '
f'the hindered rotor treatment, since the entropy contribution from the population of '
f'this species at the higher wells will not be taken into account. NOT invalidating this '
f'torsional mode.')
# 4. Check requested atom constraints are preserved (particularly useful for TSs)
if preserve_params is not None:
success = True
pivots = list()
for atoms in preserve_params:
for i, xyz in enumerate(trajectory):
if i != 0:
# check that the distance between this atom pair is preserved relative to the previous entry
# in the trajectory, as well as relative to the original value (final_xyz).
current_distance = calculate_distance(coords=xyz, atoms=atoms, index=1)
previous_distance = calculate_distance(coords=trajectory[i - 1], atoms=atoms, index=1)
original_distance = calculate_distance(coords=original_xyz, atoms=atoms, index=1)
if previous_distance * (1.0 - preserve_params_in_scan['bond']) < \
current_distance < \
previous_distance * (1.0 + preserve_params_in_scan['bond']) \
or original_distance * (1.0 - preserve_params_in_scan['bond']) < \
current_distance < \
original_distance * (1.0 + preserve_params_in_scan['bond']):
success = False
pivots.append(atoms)
message = f"The rotor breaks the TS around pivots {pivots}: In trajectory {i}, the distance " \
f"between pivots is {current_distance} Angstroms, which is " \
f"{current_distance / previous_distance:.2f} of the previous frame, and " \
f"{current_distance / original_distance:.2f} of the original geometry."
break
if species.mol is not None:
scan = [determine_smallest_atom_index_in_scan(atom1=species.mol.atoms.index(atoms[0]),
atom2=species.mol.atoms.index(atoms[1]),
mol=species.mol)]
scan.extend(atoms)
scan.append(
determine_smallest_atom_index_in_scan(atom1=species.mol.atoms.index(atoms[1]),
atom2=species.mol.atoms.index(atoms[0]),
mol=species.mol))
# check that a dihedral angle with this atom pair as its pivots is preserved relative to the
# previous entry in the trajectory, as well as relative to the original value (final_xyz).
current_dihedral = calculate_dihedral_angle(coords=xyz, torsion=scan, index=1)
previous_dihedral = calculate_dihedral_angle(coords=trajectory[i - 1], torsion=scan, index=1)
original_dihedral = calculate_dihedral_angle(coords=original_xyz, torsion=scan, index=1)
if abs(current_dihedral - previous_dihedral) < preserve_params_in_scan['dihedral'] \
or abs(current_dihedral - original_dihedral) < preserve_params_in_scan['dihedral']:
success = False
pivots.append(atoms)
message = f"The rotor breaks the TS around pivots {pivots}: In trajectory {i}, the " \
f"dihedral angle is {current_dihedral} degrees, a " \
f"{abs(current_dihedral - previous_dihedral)} change relative to the previous " \
f"frame, and a {abs(current_dihedral - original_dihedral)} change relative to " \
f"the original geometry."
break
if species.mol is None:
logger.warning(
f'Cannot check that the dihedral angle of {species.label} is consistent throughout rotor '
f'scans without a .mol attribute')
if not success:
invalidate = True
invalidation_reason = message
logger.info(message)
actions = dict()
return invalidate, invalidation_reason, message, actions
return invalidate, invalidation_reason, message, actions
| 20,606
|
def delete(group_name: str) -> None:
"""Delete a task group"""
delete_group(group_name)
| 20,607
|
def highlight(elements, effect_time=None, color=None, border=None, create_dump=False):
"""Highlights (blinks) a Selenium WebDriver element"""
if effect_time is None:
effect_time = DEFAULT_EFFECT_TIME
if color is None:
color = DEFAULT_EFFECT_COLOR
if border is None:
border = DEFAULT_BORDER_WIDTH
original_styles = dict()
elements_list = elements
if not isinstance(elements_list, list):
elements_list = list()
elements_list.append(elements)
for element in elements_list:
original_styles[element] = element.get_attribute('style')
new_style = original_styles[element] + "; border: {0}px solid {1};".format(border, color)
apply_style(element, new_style)
if create_dump:
dump(elements_list[0].parent, reason=highlight.__name__)
sleep(effect_time)
for element in elements_list:
apply_style(element, original_styles[element])
| 20,608
|
def save_log(epoch, dataset_name, results_train, results_val):
"""
:param epoch:
:param dataset_name:
:param results_train: (per-step log in dataframe, epoch time) from training data
:param results_val: (per-step log in dataframe, epoch time) from validation data
"""
summ_cols = ["loss", "trjabs", "trjrel", "roterr", "deprel", "time"]
summary = save_results(epoch, dataset_name, results_train, results_val, summ_cols, "history.csv")
other_cols = [colname for colname in list(results_train[0]) if colname not in summ_cols]
_ = save_results(epoch, dataset_name, results_train, results_val, other_cols, "mean_result.csv")
save_scales(epoch, results_train[0], results_val[0], "scales.txt")
draw_and_save_plot(summary, "history.png")
| 20,609
|
def cachecontrol_logging_hook(app):
"""
Reroute cachecontrol logger to use cement log handlers.
"""
from cachecontrol.controller import logger
formatter = logging.Formatter(LOG_FORMAT)
for handler in app.log.backend.handlers:
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
| 20,610
|
def get_msg_timeout(options):
"""Reads the configured sbd message timeout from each device.
Key arguments:
options -- options dictionary
Return Value:
msg_timeout (integer, seconds)
"""
# get the defined msg_timeout
msg_timeout = -1 # default sbd msg timeout
cmd = generate_sbd_command(options, "dump")
(return_code, out, err) = run_command(options, cmd)
for line in out.split("\n"):
if len(line) == 0:
continue
if "msgwait" in line:
tmp_msg_timeout = int(line.split(':')[1])
if -1 != msg_timeout and tmp_msg_timeout != msg_timeout:
logging.warn(\
"sbd message timeouts differ in different devices")
# we only save the highest timeout
if tmp_msg_timeout > msg_timeout:
msg_timeout = tmp_msg_timeout
return msg_timeout
| 20,611
|
def odata_getone(url, headers):
"""
Get a single object from Odata
"""
r = requests.get(url, headers=headers)
if not r.ok:
logging.warning(f"Fetch url {url} hit {r.status_code}")
return None
rjson = r.json()
if 'error' in rjson:
logging.warning(f"Fetching of {url} returned error {r.text}")
return None
return rjson
| 20,612
|
def test_create_pause_action(
decoy: Decoy,
session_view: SessionView,
engine_store: EngineStore,
unique_id: str,
current_time: datetime,
client: TestClient,
) -> None:
"""It should handle a pause action."""
session_created_at = datetime.now()
actions = SessionAction(
actionType=SessionActionType.PAUSE,
createdAt=current_time,
id=unique_id,
)
next_session = SessionResource(
session_id="unique-id",
create_data=BasicSessionCreateData(),
created_at=session_created_at,
actions=[actions],
)
decoy.when(
session_view.with_action(
session=prev_session,
action_id=unique_id,
action_data=SessionActionCreateData(actionType=SessionActionType.PAUSE),
created_at=current_time,
),
).then_return((actions, next_session))
response = client.post(
"/sessions/session-id/actions",
json={"data": {"actionType": "pause"}},
)
verify_response(response, expected_status=201, expected_data=actions)
decoy.verify(engine_store.runner.pause())
| 20,613
|
def _tokenizer_from_json(json_string):
"""Parses a JSON tokenizer configuration file and returns a
tokenizer instance.
# Arguments
json_string: JSON string encoding a tokenizer configuration.
# Returns
A Keras Tokenizer instance
"""
tokenizer_config = json.loads(json_string)
config = tokenizer_config.get('config')
word_counts = json.loads(config.pop('word_counts'))
word_docs = json.loads(config.pop('word_docs'))
index_docs = json.loads(config.pop('index_docs'))
# Integer indexing gets converted to strings with json.dumps()
index_docs = {int(k): v for k, v in index_docs.items()}
index_word = json.loads(config.pop('index_word'))
index_word = {int(k): v for k, v in index_word.items()}
word_index = json.loads(config.pop('word_index'))
tokenizer = tf.keras.preprocessing.text.Tokenizer(**config)
tokenizer.word_counts = word_counts
tokenizer.word_docs = word_docs
tokenizer.index_docs = index_docs
tokenizer.word_index = word_index
tokenizer.index_word = index_word
return tokenizer
| 20,614
|
def test_get_timerange(dataset_container):
"""Test that timerange returns a list with the correct timestamps."""
dataset_container.append(datetime(2018, 1, 1, 12, 0, 0), 1)
dataset_container.append(datetime(2018, 1, 1, 12, 1, 0), 2)
dataset_container.append(datetime(2018, 1, 1, 12, 2, 0), 3)
timerange = dataset_container.timerange()
assert timerange == [datetime(2018, 1, 1, 12, 0, 0),
datetime(2018, 1, 1, 12, 2, 0)]
| 20,615
|
def get_all_pip_requirements_files() -> List[Path]:
"""
If the root level hi-ml directory is available (e.g. it has been installed as a submodule or
downloaded directly into a parent repo) then we must add it's pip requirements to any environment
definition. This function returns a list of the necessary pip requirements files. If the hi-ml
root directory does not exist (e.g. hi-ml has been installed as a pip package, this is not necessary
and so this function returns an empty list.)
:return: An list list of pip requirements files in the hi-ml and hi-ml-azure packages if relevant,
or else an empty list
"""
files = []
if paths.is_himl_used_from_git_repo():
git_root = paths.git_repo_root_folder()
for folder in [Path("hi-ml") / "run_requirements.txt", Path("hi-ml-azure") / "run_requirements.txt"]:
files.append(git_root / folder)
return files
| 20,616
|
def load_data(dataset_name: str, split: str) -> object:
"""
Load the data from datasets library and convert to dataframe
Parameters
----------
dataset_name : str
name of the dataset to be downloaded.
split : str
type of split (train or test).
Returns
-------
object
dataframe.
"""
data = load_dataset(dataset_name, split=split)
logger.info(split + " dataset downloaded!")
return data
| 20,617
|
def horizontal_south_link_neighbor(shape, horizontal_ids, bad_index_value=-1):
"""ID of south horizontal link neighbor.
Parameters
----------
shape : tuple of int
Shape of grid of nodes.
horizontal_ids : array of int
Array of all horizontal link ids *must be of len(horizontal_links)*.
bad_index_value: int, optional
Value assigned to inactive indicies in the array.
Returns
-------
ndarray :
Link IDs of south horizontal neighbor links. Length of
number_of_horizontal_links.
Examples
--------
The following example uses this grid::
*--27-->*--28-->*--29-->*--30-->*
*--18-->*--19-->*--20-->*--21-->*
*---9-->*--10-->*--11-->*--12-->*
*---0-->*---1-->*---2-->*---3-->*
.. note::
Only horizontal links are shown. When no neighbor is found,
bad_index_value is returned.
``*`` indicates nodes
Numeric values correspond to the horizontal IDs.
>>> from landlab import RasterModelGrid
>>> from landlab.components.overland_flow._links import (horizontal_link_ids,
... horizontal_north_link_neighbor)
>>> rmg = RasterModelGrid((4, 5))
>>> horizontal_links = horizontal_link_ids(rmg.shape).flatten()
>>> horizontal_south_link_neighbor(rmg.shape, horizontal_links)
array([-1, -1, -1, -1, 0, 1, 2, 3, 9, 10, 11, 12, 18, 19, 20, 21])
"""
links = np.roll(horizontal_ids.reshape((shape[0], shape[1] - 1)), 1, axis=0)
links[0, :] = bad_index_value
return links.reshape(-1)
| 20,618
|
def main():
""" Copy necessary arduino files to their appropriate locations. """
args = get_args()
# Select option based on argparse results
if args.all is not False:
# Copy `lib` to all directories.
directories = os.listdir(".")
exclude_dirs = [".git", "lib"]
for directory in directories:
if directory not in exclude_dirs and os.path.isdir(directory):
subprocess.Popen(["cp", "-r", "lib/", directory + "/"])
elif args.file is not None:
# Check to make sure filepath is valid, then copy to file directory.
if os.path.isdir(args.file):
subprocess.Popen(["cp", "-r", "lib/", args.file])
else:
raise ValueError("File path given is not a real directory or path!")
else:
raise Exception("Need to give an --all or --file option!")
| 20,619
|
def _get_output(algorithm, iport=0, iconnection=0, oport=0, active_scalar=None,
active_scalar_field='point'):
"""A helper to get the algorithm's output and copy input's vtki meta info"""
ido = algorithm.GetInputDataObject(iport, iconnection)
data = wrap(algorithm.GetOutputDataObject(oport))
data.copy_meta_from(ido)
if active_scalar is not None:
data.set_active_scalar(active_scalar, preference=active_scalar_field)
return data
| 20,620
|
def test_annotated_top_images_dataset_init_annotation_count(
top_images_root, top_images_annotations_csv_file,
top_image_annotations, annotation_count):
"""Test AnnotatedTopImagesDataset.__init__, setting annotation_count."""
# Remove all L0 annotations.
banned = conftest.layer(0)
rows = [conftest.HEADER]
rows += [anno for anno in top_image_annotations if anno[0] != banned]
# Add an extra one for L1.
expanded = conftest.layer(1)
rows += [anno for anno in top_image_annotations if anno[0] == expanded]
# Overwrite annotations file with our janky modifications.
with top_images_annotations_csv_file.open('w') as handle:
writer = csv.writer(handle)
writer.writerows(rows)
annotated_top_images_dataset = datasets.AnnotatedTopImagesDataset(
top_images_root,
annotations_csv_file=top_images_annotations_csv_file,
layer_column=conftest.LAYER_COLUMN,
unit_column=conftest.UNIT_COLUMN,
annotation_column=conftest.ANNOTATION_COLUMN,
annotation_count=annotation_count,
display_progress=False)
assert str(top_images_root).endswith(annotated_top_images_dataset.name)
# Yeah, yeah, yeah, this is bad practice, I know...
if annotation_count is None:
assert len(annotated_top_images_dataset.samples) == conftest.N_SAMPLES
actuals = [
sample for sample in annotated_top_images_dataset.samples
if sample.layer == banned
]
assert len(actuals) == conftest.N_UNITS_PER_LAYER
for actual in actuals:
assert actual.annotations == ()
actuals = [
sample for sample in annotated_top_images_dataset.samples
if sample.layer == expanded
]
assert len(actuals) == conftest.N_UNITS_PER_LAYER
for actual in actuals:
assert len(actual.annotations) == 2
else:
actual = len(annotated_top_images_dataset.samples)
expected = (conftest.N_LAYERS - 1) * conftest.N_UNITS_PER_LAYER
assert actual == expected
layers = {
sample.layer for sample in annotated_top_images_dataset.samples
}
assert banned not in layers
assert expanded in layers
lengths = {
len(sample.annotations)
for sample in annotated_top_images_dataset.samples
}
assert lengths == {annotation_count}
| 20,621
|
def read_cesar_out(cesar_line):
"""Return ref and query sequence."""
cesar_content = cesar_line.split("\n")
# del cesar_content[0]
fractions = parts(cesar_content, 4)
cesar_fractions = []
for fraction in fractions:
if len(fraction) == 1:
continue
ref_seq = fraction[1]
query_name = fraction[2][1:]
query_seq = fraction[3]
if len(ref_seq) != len(query_seq):
die("Error! Ref and query sequences must have the same length!")
elif len(ref_seq) == 0:
die("Error! The input is empty!")
fraction = (query_name, ref_seq, query_seq)
cesar_fractions.append(fraction)
return cesar_fractions
| 20,622
|
def aa_i2c_slave_write_stats (aardvark):
"""usage: int return = aa_i2c_slave_write_stats(Aardvark aardvark)"""
if not AA_LIBRARY_LOADED: return AA_INCOMPATIBLE_LIBRARY
# Call API function
return api.py_aa_i2c_slave_write_stats(aardvark)
| 20,623
|
def detect_os():
"""
Detects the Operating System and sets a global variable to target OS
specific features to the right platform.
Options for g_platform_os are: win, lin, mac
"""
global g_platform_os
os = platform.system()
if os == "Windows":
g_platform_os = "windows"
elif os == "Linux":
g_platform_os = "linux"
elif os == "Darwin":
g_platform_os = "mac"
| 20,624
|
def get_aliases_user(request):
"""
Returns all the Aliases
API_ENDPOINT:api/v1/aliases
----------
payload
{
"email":"a@a.com"
}
"""
alias_array = []
payload = {}
print("came to get_aliases_user()")
data_received = json.loads(request.body)
email = data_received["email"]
print(f"Email body:{email}")
db_data = Aliases.objects.filter(user__email=email)
print(f"QuerySet->{db_data}")
for x in db_data:
alias_array.append(x.alias)
return JsonResponse({"alias":alias_array}, safe=False)
| 20,625
|
def _sorted_attributes(features, attrs, attribute):
"""
When the list of attributes is a dictionary, use the
sort key parameter to order the feature attributes.
evaluate it as a function and return it. If it's not
in the right format, attrs isn't a dict then returns
None.
"""
sort_key = attrs.get('sort_key')
reverse = attrs.get('reverse')
assert sort_key is not None, "Configuration " + \
"parameter 'sort_key' is missing, please " + \
"check your configuration."
# first, we find the _minimum_ ordering over the
# group of key values. this is because we only do
# the intersection in groups by the cutting
# attribute, so can only sort in accordance with
# that.
group = dict()
for feature in features:
val = feature[1].get(sort_key)
key = feature[1].get(attribute)
val = _no_none_min(val, group.get(key))
group[key] = val
# extract the sorted list of attributes from the
# grouped (attribute, order) pairs, ordering by
# the order.
all_attrs = sorted(group.iteritems(),
key=lambda x: x[1], reverse=bool(reverse))
# strip out the sort key in return
return [x[0] for x in all_attrs]
| 20,626
|
def get_gpus(num_gpu=1, worker_index=-1, format=AS_STRING):
"""Get list of free GPUs according to nvidia-smi.
This will retry for ``MAX_RETRIES`` times until the requested number of GPUs are available.
Args:
:num_gpu: number of GPUs desired.
:worker_index: index "hint" for allocation of available GPUs.
Returns:
Comma-delimited string of GPU ids, or raises an Exception if the requested number of GPUs could not be found.
"""
# get list of gpus (index, uuid)
list_gpus = subprocess.check_output(["nvidia-smi", "--list-gpus"]).decode()
logger.debug("all GPUs:\n{0}".format(list_gpus))
# parse index and guid
gpus = [x for x in list_gpus.split('\n') if len(x) > 0]
def parse_gpu(gpu_str):
cols = gpu_str.split(' ')
return cols[5].split(')')[0], cols[1].split(':')[0]
gpu_list = [parse_gpu(gpu) for gpu in gpus]
free_gpus = []
retries = 0
while len(free_gpus) < num_gpu and retries < MAX_RETRIES:
smi_output = subprocess.check_output(["nvidia-smi", "--format=csv,noheader,nounits", "--query-compute-apps=gpu_uuid"]).decode()
logger.debug("busy GPUs:\n{0}".format(smi_output))
busy_uuids = [x for x in smi_output.split('\n') if len(x) > 0]
for uuid, index in gpu_list:
if uuid not in busy_uuids:
free_gpus.append(index)
if len(free_gpus) < num_gpu:
logger.warn("Unable to find available GPUs: requested={0}, available={1}".format(num_gpu, len(free_gpus)))
retries += 1
time.sleep(30 * retries)
free_gpus = []
logger.info("Available GPUs: {}".format(free_gpus))
# if still can't find available GPUs, raise exception
if len(free_gpus) < num_gpu:
smi_output = subprocess.check_output(["nvidia-smi", "--format=csv", "--query-compute-apps=gpu_uuid,pid,process_name,used_gpu_memory"]).decode()
logger.info(": {0}".format(smi_output))
raise Exception("Unable to find {} free GPU(s)\n{}".format(num_gpu, smi_output))
# Get logical placement
num_available = len(free_gpus)
if worker_index == -1:
# use original random placement
random.shuffle(free_gpus)
proposed_gpus = free_gpus[:num_gpu]
else:
# ordered by worker index
if worker_index * num_gpu + num_gpu > num_available:
worker_index = worker_index * num_gpu % num_available
proposed_gpus = free_gpus[worker_index * num_gpu:(worker_index * num_gpu + num_gpu)]
logger.info("Proposed GPUs: {}".format(proposed_gpus))
if format == AS_STRING:
return ','.join(str(x) for x in proposed_gpus)
elif format == AS_LIST:
return proposed_gpus
else:
raise Exception("Unknown GPU format")
| 20,627
|
def ldensity_laplace_uniform_dist(prob_laplace, location, scale, low, high,
val):
"""
A mixture of a Laplace and a uniform distribution
"""
return np.log((prob_laplace * np.exp(-np.abs(val - location) / scale) / (2 * scale))
+ ((1 - prob_laplace) / (high - low)))
| 20,628
|
def readConfirmInput():
"""asks user for confirmation
Returns:
bool: True if user confirms, False if doesn't
"""
try:
result = readUserInput("(y/n): ") # UnrecognisedSelectionException
return 'y' in result[0].lower() # IndexError
except (UnrecognisedSelectionException, IndexError) as e:
return False
| 20,629
|
def add_iSWAP_like_twoQ_clifford(index, gate_seq_1, gate_seq_2, **kwargs):
"""Add iSWAP like two Qubit Clifford.
(24*24*3*3 = 5184)
(gate_seq_1: gate seq. of qubit #1, gate_seq_t: gate seq. of qubit #2)
"""
generator = kwargs.get('generator', 'CZ')
# randomly sample from single qubit cliffords (24)
index_1 = index % 24
# randomly sample from single qubit cliffords (24)
index_2 = (index // 24) % 24
# randomly sample from S1_Y2p (3) or S1 (3)
index_3 = (index // 24 // 24) % 3
# randomly sample from S1_X2p (3) or S1 (3)
index_4 = (index // 24 // 24 // 3) % 3
if generator == 'CZ':
add_singleQ_clifford(index_1, gate_seq_1)
add_singleQ_clifford(index_2, gate_seq_2)
gate_seq_1.append(gates.I)
gate_seq_2.append(gates.CZ)
gate_seq_1.append(gates.Y2p)
gate_seq_2.append(gates.X2m)
gate_seq_1.append(gates.I)
gate_seq_2.append(gates.CZ)
add_singleQ_S1_Y2p(index_3, gate_seq_1)
add_singleQ_S1_X2p(index_4, gate_seq_2)
elif generator == 'iSWAP':
add_singleQ_clifford(index_1, gate_seq_1)
add_singleQ_clifford(index_2, gate_seq_2)
gate_seq_1.append(gates.I)
gate_seq_2.append(gates.iSWAP)
add_singleQ_S1(index_3, gate_seq_1)
add_singleQ_S1(index_4, gate_seq_2)
| 20,630
|
def linkElectron(inLep, inLepIdx, lepCollection, genPartCollection):
"""process input Electron, find lineage within gen particles
pass "find" as inLepIdx of particle to trigger finding within the method"""
linkChain = []
lepIdx = -1
if inLepIdx == "find":
for Idx, lep in enumerate(lepCollection):
if inLep == lep:
lepIdx = Idx
break
elif -1 < inLepIdx < len(lepCollection):
lepIdx = inLepIdx
else:
lepIdx = -999
tmpMoth = inLep.genPartIdx
#temporary deltaR with a default (only stored under logic error) and a calculation against the 'head' of the chain
tmpDeltaR = -9999.786
if len(linkChain) > 0:
tmpDeltaR = deltaR(inPart, linkChain[0][6])
elif len(linkChain) == 0:
tmpDeltaR = 0.0
linkChain.append( ("Electron", lepIdx, tmpMoth, inLep.pdgId, tmpDeltaR, inLep.genPartFlav, inLep) )
if -1 < tmpMoth < len(genPartCollection):
__ = linkGenPart(genPartCollection[tmpMoth], tmpMoth, genPartCollection, linkChain=linkChain)
return linkChain
| 20,631
|
def status():
"""Determines whether or not if CrowdStrike Falcon is loaded.
:return: A Boolean on whether or not crowdstrike is loaded.
:rtype: bool
.. code-block:: bash
salt '*' crowdstrike.status
"""
if not __salt__['crowdstrike.system_extension']():
# if we should be using a kext, just check the kext as falconctl stats
# can take a long time to run if falcon is already unloaded.
if not __salt__['kext.running']('com.crowdstrike.sensor'):
return False
try:
__salt__['crowdstrike.falconctl']('stats', timeout=5)
return True
except CommandExecutionError:
return False
| 20,632
|
def sparse_search(arr, s):
""" 10.5 Sparse Search: Given a sorted array of strings that is interspersed
with empty strings, write a method to find the location of a given string.
EXAMPLE:
Input: find "ball" in {"at", "", "", "" , "ball", "", "", "car", "" , "" , "dad", ""}
Output: 4
"""
def spread(arr, middle, left, right):
k = 1
while middle - k >= left and middle + k <= right:
if arr[middle - k] != "":
return middle - k
if arr[middle + k] != "":
return middle + k
k += 1
return middle
def rec_sparse_search(arr, s, left, right):
if left > right:
return None
middle = (left + right) / 2
if arr[middle] == "":
new_middle = spread(arr, middle, left, right)
if new_middle == middle:
return None
middle = new_middle
if arr[middle] == s:
return middle
if arr[middle] < s:
return rec_sparse_search(arr, s, left, middle - 1)
return rec_sparse_search(arr, s, middle + 1, right)
return rec_sparse_search(arr, s, 0, len(arr) - 1)
| 20,633
|
def iterDirXML(dirname):
"""Given a directory, iterate over the content of the .txt files in that directory as Trees"""
for filename in os.listdir(dirname):
fullpath = os.path.join(dirname, filename)
if os.path.isfile(fullpath):
_, ext = os.path.splitext(fullpath)
if ext == ".xml":
yield filename, ElementTree.parse(fullpath)
| 20,634
|
def uuid_pk():
"""
Generate uuid1 and cut it to 12.
UUID default size is 32 chars.
"""
return uuid.uuid1().hex[:12]
| 20,635
|
def infected_symptomatic_00x80():
"""
Real Name: b'Infected symptomatic 00x80'
Original Eqn: b'Infected symptomatic 00+Infected symptomatic 80'
Units: b'person'
Limits: (None, None)
Type: component
b''
"""
return infected_symptomatic_00() + infected_symptomatic_80()
| 20,636
|
def main():
"""Make a jazz noise here"""
args = get_args()
#print(args.positional)
with open('process_one_set.sh', 'r') as f:
line_list = f.readlines()
for line in line_list:
if 'HPC_PATH=' in line:
replacing = line.split('=')[-1]
replace_file_one(replacing, f'"{args.positional}"\n')
replace_file_two(replacing, f'"{args.positional}"\n')
| 20,637
|
def test_finetune_full():
""" finetuning using 'full'.
"""
DATASET_PATH = ROOT_PATH+'/data/SS-Youtube/raw.pickle'
nb_classes = 2
# Keras and pyTorch implementation of the Adam optimizer are slightly different and change a bit the results
# We reduce the min accuracy needed here to pass the test
# See e.g. https://discuss.pytorch.org/t/suboptimal-convergence-when-compared-with-tensorflow-model/5099/11
min_acc = 0.68
with open(VOCAB_PATH, 'r') as f:
vocab = json.load(f)
data = load_benchmark(DATASET_PATH, vocab, extend_with=10000)
print('Loading pyTorch model from {}.'.format(PRETRAINED_PATH))
model = torchmoji_transfer(nb_classes, PRETRAINED_PATH, extend_embedding=data['added'])
print(model)
model, acc = finetune(model, data['texts'], data['labels'], nb_classes,
data['batch_size'], method='full', nb_epochs=1)
print("Finetune full SS-Youtube 1 epoch acc: {}".format(acc))
assert acc >= min_acc
| 20,638
|
def stencilCompare(firstElem, secondElem):
"""
stencilCompare(const std::pair< int, FP_PRECISION > &firstElem, const std::pair< int,
FP_PRECISION > &secondElem) -> bool
Comparitor for sorting k-nearest stencil std::pair objects
"""
return _openmoc.stencilCompare(firstElem, secondElem)
| 20,639
|
def bisect_profiles_wrapper(decider, good, bad, perform_check=True):
"""Wrapper for recursive profile bisection."""
# Validate good and bad profiles are such, otherwise bisection reports noise
# Note that while decider is a random mock, these assertions may fail.
if perform_check:
if decider.run(good, save_run=False) != StatusEnum.GOOD_STATUS:
raise ValueError('Supplied good profile is not actually GOOD')
if decider.run(bad, save_run=False) != StatusEnum.BAD_STATUS:
raise ValueError('Supplied bad profile is not actually BAD')
common_funcs = sorted(func for func in good if func in bad)
if not common_funcs:
return {'ranges': [], 'individuals': []}
# shuffle because the results of our analysis can be quite order-dependent
# but this list has no inherent ordering. By shuffling each time, the chances
# of finding new, potentially interesting results are increased each time
# the program is run
random.shuffle(common_funcs)
results = bisect_profiles(decider, good, bad, common_funcs, 0,
len(common_funcs))
results['ranges'].sort()
results['individuals'].sort()
return results
| 20,640
|
def add_article_to_db(
table: str, article_title: str, article_url: str, article_date: str
) -> None:
""" Add a new article title and date to the database
Args:
table (str): current table
article_title (str): The title of an article
article_url (str): The url of an article
article_date (str): The publication date of an article
"""
db.table(table).insert(
{"title": article_title, "link": article_url, "published": article_date}
)
logger.debug(f"Article '{article_title}' inserted")
| 20,641
|
def rolling_median_with_nan_forward_fill(vector: typing.List[float], window_length: int) -> typing.List[float]:
"""Computes a rolling median of a vector of floats and returns the results. NaNs will be forward filled."""
forward_fill(vector)
return rolling_median_no_nan(vector, window_length)
| 20,642
|
def build_class_docstring(class_to_doc: ClassToDocument, formatter: Formatter) -> str:
"""A function to build the docstring of a class
Parameters
----------
class_to_doc : ClassToDocument
The class to document
formatter : Formatter
The formatter to use
Returns
-------
docstring : str
The docstring for this class
"""
_logger.debug(f"Build class docstring for '{class_to_doc.name}'...")
return formatter.format_docstring(nb_base_tab=class_to_doc.nb_base_tab,
description=class_to_doc.description,
fields={
'Attributes': class_to_doc.attributes,
'Public methods': class_to_doc.public_methods,
'Protected methods': class_to_doc.protected_methods,
})
| 20,643
|
def test_thread_safety():
""" test context keeps separate correlation ID per thread """
class _SampleThread(threading.Thread):
def __init__(self):
super(_SampleThread, self).__init__()
self.correlation_id = str(uuid.uuid1())
self.read_correlation_id = ''
def run(self):
cf_logging.FRAMEWORK.context.set_correlation_id(self.correlation_id)
time.sleep(0.1)
self.read_correlation_id = cf_logging.FRAMEWORK.context.get_correlation_id()
cf_logging.init(level=logging.DEBUG)
thread_one = _SampleThread()
thread_two = _SampleThread()
thread_one.start()
thread_two.start()
thread_one.join()
thread_two.join()
assert thread_one.correlation_id == thread_one.read_correlation_id
assert thread_two.correlation_id == thread_two.read_correlation_id
| 20,644
|
def Subprocess(
identifier: Optional[str] = None, variables: Optional[Dict] = None,
env: Optional[Dict] = None, volume: Optional[str] = None
) -> Dict:
"""Get base configuration for a subprocess worker with the given optional
arguments.
Parameters
----------
identifier: string, default=None
Unique worker identifier. If no identifier is given, a new unique
identifier will be generated.
variables: dict, default=None
Mapping with default values for placeholders in command template
strings.
env: dict, default=None
Default settings for environment variables when executing workflow
steps. These settings can get overridden by step-specific settings.
volume: string, default=None
Identifier for the storage volume that the worker has access to.
Returns
-------
dict
"""
return WorkerSpec(
worker_type=SUBPROCESS_WORKER,
variables=variables,
env=env,
identifier=identifier,
volume=volume
)
| 20,645
|
def maybe_start_instance(instance):
"""Starts instance if it's stopped, no-op otherwise."""
if not instance:
return
if instance.state['Name'] == 'stopped':
instance.start()
while True:
print(f"Waiting for {instance} to start.")
instance.reload()
if instance.state['Name'] == 'running':
break
time.sleep(10)
| 20,646
|
def ensure_folder_exist(foldername):
"""Create folder (with whole path) if it doesn't exist yet."""
if not os.access(foldername, os.R_OK|os.W_OK|os.X_OK):
os.makedirs(foldername)
| 20,647
|
async def print_roles(ctx):
""" Imprimir no chat as regras atribuidas ao usuário """
await ctx.send('Resource still under development, being released for use in the near future')
return
print(">> Comando de listagem de regras executado _print_roles_")
if ctx.author.nick == None:
await ctx.send(ctx.author.name)
else:
await ctx.send(ctx.author.nick)
for x in ctx.author.roles:
if str(x.name) != "@everyone":
await ctx.send(x)
| 20,648
|
def get_var(name):
"""
Returns the value of a settings variable.
The full name is CONTROLLED_VOCABULARY_ + name.
First look into django settings.
If not found there, use the value defined in this file.
"""
full_name = "CONTROLLED_VOCABULARY_" + name
ret = globals().get(full_name, None)
ret = getattr(settings, full_name, ret)
return ret
| 20,649
|
def test_do_post_no_data_returns_400():
"""Make a POST request to /cow with no data and expect status code 400."""
response = req.post('http://127.0.0.1:5000/cow')
assert response.status_code == 400
| 20,650
|
def get_schema_from_dataset_url_carbon(dataset_url,
key=None,
secret=None,
endpoint=None,
proxy=None,
proxy_port=None,
filesystem=None):
"""Returns a :class:`petastorm.unischema.Unischema` object loaded from a dataset specified by a url.
:param dataset_url: A dataset URL
:param key: access key
:param secret: secret key
:param endpoint: endpoint_url
:param proxy: proxy
:param proxy_port: proxy_port
:param filesystem: filesystem
:return: A :class:`petastorm.unischema.Unischema` object
"""
# Get a unischema stored in the dataset metadata.
stored_schema = get_schema_carbon(CarbonDataset(dataset_url,
key=key,
secret=secret,
endpoint=endpoint,
proxy=proxy,
proxy_port=proxy_port,
filesystem=filesystem))
return stored_schema
| 20,651
|
def test_Drop_Disk_At_NoDisk(score, max_score):
"""Function drop_disk_at: No disk."""
max_score.value += 20
try:
set_up()
the_board = Board.init_board(
dimension=6, given_disks=
( (wrapped_disk_value_1,),
(cracked_disk_value_2,cracked_disk_value_2_B),
(visible_disk_value_4,),
(visible_disk_value_4_B,),
(cracked_disk_value_5,visible_disk_value_2))
)
assert Drop7.drop_disk_at(the_board) == 1*2 + 1*4 + 2*8 + 1*16 + 1*32 + 1*64
for col in range(1,5):
assert not Board.has_disk_at(the_board,(col,1))
score.value += 20
except:
pass
| 20,652
|
def parse_args():
"""
standard command line parsing
"""
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument('--label', required=True)
parser.add_argument('--h5file' )
parser.add_argument('--neg', action='store_true', default=False)
parser.add_argument('--no-info', action='store_true', default=False)
parser.add_argument('--drop-artifacts', action='store_true',
default=False)
args = parser.parse_args()
label = args.label
sign = 'neg' if args.neg else 'pos'
do_info = not args.no_info
drop_artifacts = args.drop_artifacts
if args.h5file is None:
rel_h5files = h5files(os.getcwd())
else:
rel_h5files = [args.h5file]
# for cluster_info.mat:
all_info = [[None]*len(rel_h5files), [None]*len(rel_h5files)]
if do_info:
session_name = os.path.dirname(rel_h5files[0])
date = time.strftime('%Y-%m-%d_%H:%M:%S')
csvfile = open('cluster_info.csv', 'w')
writer = csv.writer(csvfile)
writer.writerow(['# Session: {}, Converted: {}'.
format(session_name, date)])
writer.writerow(['# For Artifacts, 0 means there are none, 1 means '
'they are included as clusters (type -1), 2 means '
'they are included as unassigned (type 0)'])
writer.writerow(['# For Unassigned, 1 means they exist, 0 means they '
'do not exist.'])
writer.writerow(['# For Clusters, 1 means multi-unit, 2 means '
'single-unit, -1 means artifact'])
writer.writerow(['ChannelNumber', 'ChannelName', 'Artifacts',
'Unassigned', 'Cluster1', 'Cluster2', '...'])
for dfile in rel_h5files:
basedir = os.path.dirname(dfile)
basename = os.path.basename(dfile)
sorting_path = os.path.join(basedir, label)
outfname = basename[5:-3]
info = main(dfile, sorting_path, sign, outfname, drop_artifacts)
if do_info and (info is not None):
writer.writerow(info)
all_info[0][rel_h5files.index(dfile)] = info[4:]
all_info[1][rel_h5files.index(dfile)] = info[1]
if do_info:
info_dict = {'cluster_info': all_info, 'label_info':
' 1 = MU\n 2 = SU\n-1 = Artif.\nRefers to '
'"cluster_class"-values 1 and up.\nIgnores Unassigned '
'(value 0)'}
info_fname = "cluster_info.mat"
savemat(info_fname, info_dict)
| 20,653
|
def get_all_metrics(model, epoch, val_x, val_y, start_time, loss_fn):
"""每个epoch结束后在发展集上预测,得到一些指标
:param model: tf.keras.Model, epoch训练后的模型
:param epoch: int, 轮数
:param val_x: tf.data.Dataset, 发展集的输入, 和val_y一样的sample_size
:param val_y: tf.data.Dataset, 发展集的标签
:param start_time: time.time, 开始时间
:param loss_fn: 损失函数
:return: 模型在发展集上的损失
"""
y_pred_val, y_true_val = [], []
loss_val = 0
sample_size_val = 0
for x_tmp, y_tmp in zip(val_x.as_numpy_iterator(), val_y.as_numpy_iterator()):
pred_tmp = model.predict(x_tmp)
y_pred_val.append(pred_tmp)
y_true_val.append(y_tmp)
loss_tmp = loss_fn(y_tmp, pred_tmp)
loss_val += np.sum(loss_tmp)
sample_size_val += x_tmp[0].shape[0]
# 计算损失
loss_val /= sample_size_val
# 计算auc
y_pred = np.concatenate(y_pred_val).astype(dtype=float)
y_true = np.concatenate(y_true_val).astype(dtype=float)
roc_auc_val = roc_auc_score(y_true, y_pred)
# 转化预测概率为类别
y_pred = np.where(y_pred > 0.5, np.ones_like(y_pred), np.zeros_like(y_pred))
# 计算混淆矩阵相关的
recall = recall_score(y_true=y_true, y_pred=y_pred)
precision = precision_score(y_true=y_true, y_pred=y_pred)
accuracy = accuracy_score(y_true=y_true, y_pred=y_pred)
line = f"""For epoch {epoch}, on val set loss is {round(loss_val, 5)}, auc is {round(roc_auc_val, 4)},
recall is {round(recall, 4)}, precision is {round(precision, 4)}, accuracy is {round(accuracy, 4)},
confusion_matrix is {confusion_matrix(y_true=y_true, y_pred=y_pred)}"""
line += f", time elapsed {(time.time() - start_time) / 60} mins"
print("HZJ info: ", line)
return loss_val
| 20,654
|
def configure_ssl_off(units, model_name=None, max_wait=60):
"""Turn RabbitMQ charm SSL config option off.
Turn ssl charm config option off, confirm that it is disabled
on every unit.
:param units: list of units
:param max_wait: maximum time to wait in seconds to confirm
:returns: None if successful. Raise on error.
"""
logging.debug('Setting ssl charm config option: off')
# Disable RMQ SSL
config = {'ssl': 'off'}
zaza.model.set_application_config('rabbitmq-server',
config,
model_name=model_name)
# Wait for unit status
wait_for_cluster(model_name)
ret = _retry_validate_ssl_disabled_units(units)
if ret:
raise Exception(ret)
| 20,655
|
def get_labels(decode_steps: DecodeSteps) -> LabelsDict:
"""Returns labels dict given DecodeSteps."""
return {
"target_action_types": decode_steps.action_types,
"target_action_ids": decode_steps.action_ids,
}
| 20,656
|
def test_abstract_methods():
"""Abstract methods must be implemented."""
ERROR_MSG_REGEX = "^Can't instantiate abstract class .* with abstract method"
# create artifact file for model init
with tempfile.NamedTemporaryFile(delete=False) as tempf:
pickle.dump("foo", tempf)
try:
for model_cls in verta_models():
assert model_cls({model_cls.ARTIFACT_KEY: tempf.name})
for model_cls in incomplete_verta_models():
with pytest.raises(TypeError, match=ERROR_MSG_REGEX):
model_cls({})
finally:
os.remove(tempf.name)
| 20,657
|
def get_body_barycentric_posvel(body, time, ephemeris=None):
"""Calculate the barycentric position and velocity of a solar system body.
Parameters
----------
body : str or other
The solar system body for which to calculate positions. Can also be a
kernel specifier (list of 2-tuples) if the ``ephemeris`` is a JPL
kernel.
time : `~astropy.time.Time`
Time of observation.
ephemeris : str, optional
Ephemeris to use. By default, use the one set with
``astropy.coordinates.solar_system_ephemeris.set``
Returns
-------
position, velocity : tuple of `~astropy.coordinates.CartesianRepresentation`
Tuple of barycentric (ICRS) position and velocity.
See also
--------
get_body_barycentric : to calculate position only.
This is faster by about a factor two for JPL kernels, but has no
speed advantage for the built-in ephemeris.
Notes
-----
The velocity cannot be calculated for the Moon. To just get the position,
use :func:`~astropy.coordinates.get_body_barycentric`.
"""
return _get_body_barycentric_posvel(body, time, ephemeris)
| 20,658
|
async def test_unschedule_all_schedulers(
startup_and_shutdown_uvicorn, host, port, tmp_path
):
""" unschedule a scheduler. """
client = ClientAsync(host=host, port=port)
await reset_dispatcher(client, str(tmp_path))
action1 = file_x.FileAppend(
relative_to_output_dir=False,
file=str(tmp_path / "output1.txt"),
payload={"show": "two"},
)
action2 = file_x.FileAppend(
relative_to_output_dir=False,
file=str(tmp_path / "output2.txt"),
payload={"show": "two"},
)
scheduler = Timely(interval=1)
await add_action(client=client, action_name="foo1", action=action1)
await add_action(client=client, action_name="foo2", action=action2)
await add_scheduler(client=client, scheduler_name="bar", scheduler=scheduler)
await schedule_action(client=client, action_name="foo1", scheduler_name="bar")
await schedule_action(client=client, action_name="foo2", scheduler_name="bar")
await assert_job_count(client=client, n=1)
await assert_scheduled_action_count(client=client, n=2)
await unschedule_all(client=client)
await assert_job_count(client=client, n=0)
await assert_scheduled_action_count(client=client, n=0)
await get_action(client=client, action_name="foo1")
await get_action(client=client, action_name="foo2")
await get_scheduler(client=client, scheduler_name="bar")
| 20,659
|
def set_sequences(fastq_file, sequence_dict, as_rna=False):
"""
Set sequences in a `FastqFile` instance from a dictionary.
Parameters
----------
fastq_file : FastqFile
The `FastqFile` to be accessed.
sequence_dict : dict
A dictionary containing the sequences and scores to be set.
Identifiers are keys,
(`NucleotideSequence`, `ndarray`) tuples are values.
as_rna : bool, optional
If set to true, the sequence symbol ``'T'`` will be replaced
by ``'U'``.
"""
for header, (sequence, scores) in sequence_dict.items():
fastq_file[header] = _convert_to_string(sequence, as_rna), scores
| 20,660
|
def recurDraw(num, data):
"""
Purpose: to draw polygons
Parameters: num - indicator of what layer the program is on, data - instance
of the Data class
Returns: data - instance of the data class
Calls: recurDraw - itself, Data - data processing class, toDraw - drawing
intermediary function
"""
if num == 0:
return num
num -= 1
data = recurDraw(num, data)
data = Data(num, data)
toDraw(data)
return data
| 20,661
|
def cont4():
"""
Two clusters, namely <cont1> (5 contours) and <cont3> 4 contours).
The enclosing contours of the clusters have a different value.
Contains 3 minima.
"""
cont_min = [
cncc(5, (6.00, 3.00), 0.2, (1, 1)),
cncc(2, (7.00, 4.00), 0.1, (4, 1), rmin=0.15),
cncc(2, (6.25, 3.25), 0.3, (6, 1), rmin=1.6, no_min=True),
cncc(5, (3.00, 3.00), 0.2, (1, 1)),
]
cont = [e for lst in cont_min for e in lst[0]]
min = [e for lst in cont_min for e in lst[1]]
return cont, min
| 20,662
|
def predict(test_data, qrnn, add_noise = False):
"""
predict the posterior mean and median
"""
if add_noise:
x_noise = test_data.add_noise(test_data.x, test_data.index)
x = (x_noise - test_data.mean)/test_data.std
y_prior = x_noise
y = test_data.y_noise
y0 = test_data.y
else:
x = (test_data.x - test_data.mean)/test_data.std
y_prior = test_data.x
y = test_data.y
y0 = test_data.y0
y_pre = qrnn.predict(x.data)
y_pos_mean = qrnn.posterior_mean(x.data)
return y_pre, y_prior, y0, y, y_pos_mean, x.data
| 20,663
|
def handle_login_GET():
"""
Displays the index (the login page).
"""
if request.args.get('next'):
url_kwargs = dict(next=request.args.get('next'))
else:
url_kwargs = {}
try:
weblab_api.api.check_user_session()
except SessionNotFoundError:
pass # Expected behavior
else:
# User is already logged in, send him to the next url
return redirect(get_next_url())
return render_template("webclient/login.html", url_kwargs = url_kwargs)
| 20,664
|
def get_role_actions():
"""Returns the possible role to actions items in the application.
Returns:
dict(str, list(str)). A dict presenting key as role and values as list
of actions corresponding to the given role.
"""
return copy.deepcopy(_ROLE_ACTIONS)
| 20,665
|
def jsons_str_tuple_to_jsons_tuple(ctx, param, value):
"""
Converts json str into python map
"""
if value is None:
return []
else:
return [json.loads(a) for a in value]
| 20,666
|
def get_webelements_in_active_area(xpath, **kwargs):
"""Find element under another element.
If ${ACTIVE_AREA_FUNC} returns an element then the xpath is searched from
that element. Otherwise the element is searched under body element.
Parameters
----------
xpath : str
Xpath expression without xpath= prefix.
Returns
-------
:obj:`list` of :obj:`WebElement`
List of visible WebElements.
"""
active_area_xpath = CONFIG["ActiveAreaXpath"]
if ACTIVE_AREA_FUNCTION is not None:
active_area = ACTIVE_AREA_FUNCTION()
if active_area:
xpath = xpath.replace('//', './/', 1)
else:
driver = browser.get_current_browser()
active_area = driver.find_element_by_xpath(active_area_xpath)
else:
driver = browser.get_current_browser()
try:
active_area = driver.find_element_by_xpath(active_area_xpath)
if active_area is None:
logger.debug('Got None for active area. Is page still loading '
'or is it missing body tag?')
return None
# //body not found, is page still loading? Return None to continue looping
except NoSuchElementException:
logger.debug("Cannot locate //body element. Is page still loading?")
return None
try:
webelements = active_area.find_elements_by_xpath(xpath)
logger.trace('XPath {} matched {} webelements'
.format(xpath, len(webelements)))
webelements = get_visible_elements_from_elements(webelements, **kwargs)
except StaleElementReferenceException:
raise QWebStalingElementError('Got StaleElementException')
except (JavascriptException, InvalidSelectorException) as e:
logger.debug('Got {}, returning None'.format(e))
webelements = None
return webelements
| 20,667
|
def test_lemmatizer_reflects_lookups_changes():
"""Test for an issue that'd cause lookups available in a model loaded from
disk to not be reflected in the lemmatizer."""
nlp = Language()
assert Doc(nlp.vocab, words=["foo"])[0].lemma_ == "foo"
table = nlp.vocab.lookups.add_table("lemma_lookup")
table["foo"] = "bar"
assert Doc(nlp.vocab, words=["foo"])[0].lemma_ == "bar"
table = nlp.vocab.lookups.get_table("lemma_lookup")
table["hello"] = "world"
# The update to the table should be reflected in the lemmatizer
assert Doc(nlp.vocab, words=["hello"])[0].lemma_ == "world"
new_nlp = Language()
table = new_nlp.vocab.lookups.add_table("lemma_lookup")
table["hello"] = "hi"
assert Doc(new_nlp.vocab, words=["hello"])[0].lemma_ == "hi"
nlp_bytes = nlp.to_bytes()
new_nlp.from_bytes(nlp_bytes)
# Make sure we have the previously saved lookup table
assert "lemma_lookup" in new_nlp.vocab.lookups
assert len(new_nlp.vocab.lookups.get_table("lemma_lookup")) == 2
assert new_nlp.vocab.lookups.get_table("lemma_lookup")["hello"] == "world"
assert Doc(new_nlp.vocab, words=["foo"])[0].lemma_ == "bar"
assert Doc(new_nlp.vocab, words=["hello"])[0].lemma_ == "world"
| 20,668
|
def create_double_group():
"""
Returns: Create two simple control for all object under selected
"""
selections = cm.ls(selection=True)
if len(selections) < 1:
return om.MGlobal.displayError("This function need at lest two object to work with")
for selection in selections:
if "End" in str(selection):
continue
else:
for each_name in list_tail_name:
if str(each_name) in selection:
base_name = selection.replace(str(each_name), "")
else:
base_name = selection
parent = cm.listRelatives(selection, parent=True)
group_orient = cm.group(empty=True, world=True, name="{0}_orient".format(base_name))
group_offset = cm.group(empty=True, world=True, name="{0}_offset".format(base_name))
cm.parent(group_offset, group_orient)
if parent is not None:
cm.parent(group_orient, parent)
cm.matchTransform(group_orient, selection)
cm.makeIdentity(group_orient, apply=True, scale=True)
cm.parent(selection, group_offset)
sys.stdout.write("Create double group completed.\n")
| 20,669
|
def voucher_and_partial_matches_with_coupons(voucher_and_partial_matches):
"""
Returns a voucher with partial matching CourseRuns and valid coupons
"""
context = voucher_and_partial_matches
products = [
ProductFactory(content_object=course_run)
for course_run in context.partial_matches
]
coupon_eligibility_list = [
CouponEligibilityFactory(product=product) for product in products
]
payment_versions = [
CouponPaymentVersionFactory(amount=1, company=context.company)
for _ in coupon_eligibility_list
]
coupon_versions = [
CouponVersionFactory(
coupon=coupon_eligibility_list[i].coupon,
payment_version=payment_versions[i],
)
for i in range(len(coupon_eligibility_list))
]
return SimpleNamespace(
**vars(voucher_and_partial_matches),
products=products,
coupon_eligibility_list=coupon_eligibility_list,
coupon_versions=coupon_versions,
payment_versions=payment_versions,
)
| 20,670
|
def one_transit(t=np.linspace(0,27,19440),
per=1., rp=0.1, t0=1., a=15., inc=87., ecc=0.,
w=90., limb_dark ='nonlinear', u=[0.5,0.1,0.1,-0.1]):
"""
~Simulates a one-sector long TESS light curve with injected planet transits per input parameters.~
Requires: batman; numpy
Args: t =times at which to calculate light curve, default is one TESS sector;
per =orbital period;
rp =planet radius (in units of stellar radii);
t0 =time of inferior conjunction);
a =semi-major axis (in units of stellar radii);
inc =orbital inclination (in degrees);
ecc =eccentricity;
w =longitude of periastron (in degrees);
limb_dark =limb darkening model;
u =limb darkening coefficients [u1, u2, u3, u4];
outputs: flux array =light curve with one injected transit at per, for use right before sim_lc to get TESS lc
"""
#### maybe should make params its own fcn and split this fcn into 2....
import batman
params = batman.TransitParams(); params.t0 = t0; params.per = per
params.rp = rp; params.a = a; params.inc = inc; params.ecc = ecc
params.w = w; params.limb_dark = limb_dark; params.u = u
m = batman.TransitModel(params, t) #initializes model
flux = m.light_curve(params) #calculates light curve
return flux, m, params
| 20,671
|
async def master_loop_error(error):
"""Handler of exceptions in master game loop"""
try:
raise error
except Exception:
await botutils.send_lobby(error_str)
await botutils.log(botutils.Level.error, traceback.format_exc())
finally:
master_game_loop.cancel()
| 20,672
|
def scrape_md_file(md_path):
"""
Yield the Python scripts and URLs in the md_file in path.
Parameters
----------
md_path : str
path to md file to scrape
Returns
-------
python_examples : List[str]
The list of Python scripts included in the provided file.
urls :
"""
# check there is a README in that folder
if not os.path.isfile(md_path):
return [], []
with open(md_path, 'r') as f:
readme_content = f.read()
pythons = re.findall('```python(.*?)```', readme_content, flags=re.DOTALL)
urls = re.findall('http[s]?://(?:[0-9a-zA-Z]|[-/.%:_])+', readme_content)
return pythons, urls
| 20,673
|
def activate(request: Request) -> dict:
"""View to activate user after clicking email link.
:param request: Pyramid request.
:return: Context to be used by the renderer.
"""
code = request.matchdict.get('code', None)
registration_service = get_registration_service(request)
return registration_service.activate_by_email(code)
| 20,674
|
def showMovie(frames, movSz, fps=20, transpose=False):
"""Show a movie using OpenCV.
Takes a numpy matrix (with images as columns) and shows the images in
a video at a specified frame rate.
Parameters
----------
frames : numpy array, shape = (N, D)
Input data with N images as D-dimensional column vectors.
movSz : tupe of (height, width, nFrames)
Size of the movie to show. This is used to reshape the column vectors
in the data matrix.
fps : int (default: 20)
Show video at specific frames/second (FPS) rate.
transpose : boolean (defaukt : False)
Transpose each frame.
"""
if fps < 0:
raise Exception("FPS < 0")
video = frames.reshape(movSz)
nImgs = frames.shape[1]
tWait = int(1000.0/fps);
for i in range(nImgs):
if transpose:
cv2.imshow("video", renormalize(video[:,:,i].T, (0, 1)))
else:
cv2.imshow("video", renormalize(video[:,:,i], (0, 1)))
key = cv2.waitKey(tWait)
if key == 27:
break
| 20,675
|
def build_feed(posts):
"""Generate Atom feed file"""
feed = Atom1Feed(
title="~tym smol pomes", description="", link=f"{SITEURL}/", language="en"
)
for post in posts:
slug = post["metadata"]["slug"]
stamp = post["metadata"]["stamp"]
content = post["content"]
feed.add_item(
title=slug,
pubdate=stamp,
content=content,
author="xqo",
description=None,
link=f"{SITEURL}/{slug}",
)
with open("site_build/feed.xml", "w") as feed_output:
feed.write(feed_output, "utf-8")
| 20,676
|
def _create_eval_metrics_fn(
dataset_name, is_regression_task
):
"""Creates a function that computes task-relevant metrics.
Args:
dataset_name: TFDS name of dataset.
is_regression_task: If true, includes Spearman's rank correlation
coefficient computation in metric function; otherwise, defaults to
accuracy computation.
Returns:
Relevant metric function.
"""
def get_accuracy(guess, gold):
return (guess == gold).mean()
def get_mcc(guess, gold):
tp = ((guess == 1) & (gold == 1)).sum()
tn = ((guess == 0) & (gold == 0)).sum()
fp = ((guess == 1) & (gold == 0)).sum()
fn = ((guess == 0) & (gold == 1)).sum()
mcc_denom = np.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))
mcc = (tp * tn - fp * fn) / (mcc_denom + 1e-6)
return mcc
def get_f1(guess, gold):
tp = ((guess == 1) & (gold == 1)).sum()
fp = ((guess == 1) & (gold == 0)).sum()
fn = ((guess == 0) & (gold == 1)).sum()
f1 = (2 * tp) / (2 * tp + fp + fn + 1e-6)
return f1
def get_f1_accuracy_mean(guess, gold):
return (get_f1(guess, gold) + get_accuracy(guess, gold)) / 2.0
def get_spearmanr(x, y):
return scipy_stats.spearmanr(x, y).correlation
eval_metrics = {}
if is_regression_task:
eval_metrics["spearmanr"] = get_spearmanr
else:
eval_metrics["accuracy"] = get_accuracy
if dataset_name == "glue/cola":
eval_metrics["mcc"] = get_mcc
elif dataset_name in ("glue/mrpc", "glue/qqp"):
eval_metrics["f1_accuracy_mean"] = get_f1_accuracy_mean
def metrics_fn(stats):
res = {}
for name, fn in eval_metrics.items():
res[name] = fn(stats["prediction"], stats["label"])
return res
return metrics_fn
| 20,677
|
def brighter(rgb):
"""
Make the color (rgb-tuple) a tad brighter.
"""
_rgb = tuple([ int(np.sqrt(a/255) * 255) for a in rgb ])
return _rgb
| 20,678
|
def open_browser_with_timeout(driver, browser):
"""
Opens Chromium, sets a timeout for the script to finish and takes a screenshot
:param driver: Browser driver
:param browser: Browser
:return: None
"""
try:
if browser.case == "expired" or browser.case == "wrong-host" or browser.case == "self-signed" \
or browser.case == "untrusted-root" or browser.case == "revoked":
driver.set_page_load_timeout(5)
driver.set_script_timeout(5)
open_browser(driver, browser.url)
screenshot_website(driver, browser)
except Exception as e:
logger.error("Exception occured {} - making screenshot.".format(e))
screenshot_website(driver, browser)
| 20,679
|
def delete_workspace_config(namespace, workspace, cnamespace, config):
"""Delete method configuration in workspace.
Args:
namespace (str): project to which workspace belongs
workspace (str): Workspace name
mnamespace (str): Method namespace
method (str): Method name
Swagger:
https://api.firecloud.org/#!/Method_Configurations/deleteWorkspaceMethodConfig
"""
uri = "workspaces/{0}/{1}/method_configs/{2}/{3}".format(namespace,
workspace, cnamespace, config)
return __delete(uri)
| 20,680
|
def test_filled_transparent_graphs_2():
""" Two functions with transparend grid over them """
coordinate_system = cartesius.CoordinateSystem()
coordinate_system.add(
charts.Function(
math.sin,
start = -4,
end = 5,
step = 0.02,
fill_color = (0, 0, 255),
transparency_mask = 100))
coordinate_system.add(
charts.Function(
math.cos,
start = -4,
end = 5,
step = 0.02,
fill_color = (200, 255, 200),
transparency_mask = 100))
coordinate_system.add(elements.Grid(1, 1, transparency_mask=140))
return coordinate_system.draw(300, 200), coordinate_system.draw(300, 200, antialiasing=True)
| 20,681
|
def get_shader_code(name):
""" Returns the shader as a string """
fname = op.join( op.dirname(__file__), name )
if op.exists( fname ):
with open(fname) as f:
return f.read()
| 20,682
|
def compute_translation_error(pred_pose, gt_pose, reduction="mean"):
"""
Computes the error (meters) in translation components of pose prediction.
Inputs:
pred_pose - (bs, 3) --- (x, y, theta)
gt_pose - (bs, 3) --- (x, y, theta)
Note: x, y must be in meters.
"""
error = torch.sqrt(
F.mse_loss(pred_pose[:, :2], gt_pose[:, :2], reduction=reduction)
)
return error
| 20,683
|
def get_base_snippet_action_menu_items(model):
"""
Retrieve the global list of menu items for the snippet action menu,
which may then be customised on a per-request basis
"""
menu_items = [
SaveMenuItem(order=0),
DeleteMenuItem(order=10),
]
for hook in hooks.get_hooks('register_snippet_action_menu_item'):
action_menu_item = hook(model)
if action_menu_item:
menu_items.append(action_menu_item)
return menu_items
| 20,684
|
def assign_file(package, source):
"""Initializes package output class.
Parameters
----------
package : :obj:`str`
Name of the package that generated the trajectory file.
source : :obj:`str`
Path to the trajectory file.
Returns
-------
The class corresponding to the correct package.
"""
if package.lower() == 'gamess':
return GAMESS(source)
else:
raise ValueError(f'{package} is not supported.')
| 20,685
|
def _magpie_register_services_with_db_session(services_dict, db_session, push_to_phoenix=False,
force_update=False, update_getcapabilities_permissions=False):
# type: (ServicesSettings, Session, bool, bool, bool) -> bool
"""
Registration procedure of :term:`Services` from ``providers`` section using pre-established database session.
.. seealso::
:func:`magpie_register_services_from_config`
"""
db_session.begin(subtransactions=True)
existing_services_names = [n[0] for n in db_session.query(models.Service.resource_name)]
magpie_anonymous_user = get_constant("MAGPIE_ANONYMOUS_USER")
anonymous_user = UserService.by_user_name(magpie_anonymous_user, db_session=db_session)
for svc_name, svc_values in services_dict.items():
svc_new_url = svc_values["url"]
svc_type = svc_values["type"]
svc_config = svc_values.get("configuration")
svc_sync_type = svc_values.get("sync_type")
if force_update and svc_name in existing_services_names:
svc = models.Service.by_service_name(svc_name, db_session=db_session)
if svc.url == svc_new_url:
print_log("Service URL already properly set [{url}] ({svc})"
.format(url=svc.url, svc=svc_name), logger=LOGGER)
else:
print_log("Service URL update [{url_old}] => [{url_new}] ({svc})"
.format(url_old=svc.url, url_new=svc_new_url, svc=svc_name), logger=LOGGER)
svc.url = svc_new_url
svc.sync_type = svc_sync_type
svc.configuration = svc_config
elif not force_update and svc_name in existing_services_names:
print_log("Skipping service [{svc}] (conflict)" .format(svc=svc_name), logger=LOGGER)
else:
print_log("Adding service [{svc}]".format(svc=svc_name), logger=LOGGER)
svc = models.Service(
resource_name=svc_name,
resource_type=models.Service.resource_type_name,
url=svc_new_url,
type=svc_type,
configuration=svc_config,
sync_type=svc_sync_type
)
db_session.add(svc)
getcap_perm = Permission.GET_CAPABILITIES
if update_getcapabilities_permissions and anonymous_user is None:
print_log("Cannot update 'getcapabilities' permission of non existing anonymous user",
level=logging.WARN, logger=LOGGER)
elif update_getcapabilities_permissions and getcap_perm in SERVICE_TYPE_DICT[svc_type].permissions:
svc = db_session.query(models.Service.resource_id).filter_by(resource_name=svc_name).first()
svc_perm_getcapabilities = UserResourcePermissionService.by_resource_user_and_perm(
user_id=anonymous_user.id,
perm_name=getcap_perm.value,
resource_id=svc.resource_id,
db_session=db_session
)
if svc_perm_getcapabilities is None:
print_log("Adding '{}' permission to anonymous user.".format(getcap_perm.value), logger=LOGGER)
svc_perm_getcapabilities = models.UserResourcePermission(
user_id=anonymous_user.id,
perm_name=getcap_perm.value,
resource_id=svc.resource_id
)
db_session.add(svc_perm_getcapabilities)
transaction.commit()
if push_to_phoenix:
return _phoenix_update_services(services_dict)
return True
| 20,686
|
def journal(client):
"""
Fetch journal entries which reference a member.
"""
client.require_auth()
with (yield from nwdb.connection()) as conn:
cursor = yield from conn.cursor()
yield from cursor.execute("""
select A.tx_id, A.wallet_id, A.debit, A.credit, B.currency_id, C.narrative
from journal A
inner join wallet B on B.id = A.wallet_id
inner join wallet_transaction C on C.id = A.tx_id
where B.member_id = %s
order by C.created
""", [client.session["member_id"]])
rs = yield from cursor.fetchall()
return [dict(i) for i in rs]
| 20,687
|
def test_get_version(check):
"""
Test _get_version() to make sure the check is properly parsing Postgres versions
"""
db = MagicMock()
# Test #.#.# style versions
db.cursor().fetchone.return_value = ['9.5.3']
assert check._get_version('regular_version', db) == [9, 5, 3]
# Test #.# style versions
db.cursor().fetchone.return_value = ['10.2']
assert check._get_version('short_version', db) == [10, 2]
# Test #beta# style versions
db.cursor().fetchone.return_value = ['11beta3']
assert check._get_version('beta_version', db) == [11, -1, 3]
# Test #rc# style versions
db.cursor().fetchone.return_value = ['11rc1']
assert check._get_version('rc_version', db) == [11, -1, 1]
# Test #unknown# style versions
db.cursor().fetchone.return_value = ['11nightly3']
assert check._get_version('unknown_version', db) == [11, -1, 3]
| 20,688
|
def test_coerce__to_bool(value, expected):
"""Ensure we are properly coercing to boolean."""
assert configure.coerce_to_expected(value, "foo", bool) is expected
| 20,689
|
def computer_config_show(computer, user, current, as_option_string):
"""Show the current or default configuration for COMPUTER."""
import tabulate
from aiida.common.utils import escape_for_bash
config = {}
table = []
transport_cls = computer.get_transport_class()
option_list = [
param for param in transport_cli.create_configure_cmd(computer.get_transport_type()).params
if isinstance(param, click.core.Option)
]
option_list = [option for option in option_list if option.name in transport_cls.get_valid_auth_params()]
if current:
config = get_computer_configuration(computer, user)
else:
config = {option.name: transport_cli.transport_option_default(option.name, computer) for option in option_list}
option_items = []
if as_option_string:
for option in option_list:
t_opt = transport_cls.auth_options[option.name]
if config.get(option.name) or config.get(option.name) is False:
if t_opt.get('switch'):
option_value = option.opts[-1] if config.get(option.name) else '--no-{}'.format(
option.name.replace('_', '-'))
elif t_opt.get('is_flag'):
is_default = config.get(option.name) == transport_cli.transport_option_default(
option.name, computer)
option_value = option.opts[-1] if is_default else ''
else:
option_value = '{}={}'.format(option.opts[-1], config[option.name])
option_items.append(option_value)
opt_string = ' '.join(option_items)
echo.echo(escape_for_bash(opt_string))
else:
table = [('* ' + name, config[name]) for name in transport_cls.get_valid_auth_params()]
echo.echo(tabulate.tabulate(table, tablefmt='plain'))
| 20,690
|
def triplet_to_rrggbb(rgbtuple):
"""Converts a (red, green, blue) tuple to #rrggbb."""
hexname = _tripdict.get(rgbtuple)
if hexname is None:
hexname = '#%02x%02x%02x' % rgbtuple
_tripdict[rgbtuple] = hexname
return hexname
| 20,691
|
def stab_cholesky(M):
""" A numerically stable version of the Cholesky decomposition.
Used in the GLE implementation. Since many of the matrices used in this
algorithm have very large and very small numbers in at once, to handle a
wide range of frequencies, a naive algorithm can end up having to calculate
the square root of a negative number, which breaks the algorithm. This is
due to numerical precision errors turning a very tiny positive eigenvalue
into a tiny negative value.
Instead of this, an LDU decomposition is used, and any small negative numbers
in the diagonal D matrix are assumed to be due to numerical precision errors,
and so are replaced with zero.
Args:
M: The matrix to be decomposed.
"""
n = M.shape[1]
D = np.zeros(n,float)
L = np.zeros(M.shape,float)
for i in range(n):
L[i,i] = 1.
for j in range(i):
L[i,j] = M[i,j]
for k in range(j):
L[i,j] -= L[i,k]*L[j,k]*D[k]
if (not D[j] == 0.0):
L[i,j] = L[i,j]/D[j]
D[i] = M[i,i]
for k in range(i):
D[i] -= L[i,k]*L[i,k]*D[k]
S = np.zeros(M.shape,float)
for i in range(n):
if (D[i]>0):
D[i] = math.sqrt(D[i])
else:
warning("Zeroing negative element in stab-cholesky decomposition: " + str(D[i]), verbosity.low)
D[i] = 0
for j in range(i+1):
S[i,j] += L[i,j]*D[j]
return S
| 20,692
|
def prodNeventsTrend(request):
"""
The view presents historical trend of nevents in different states for various processing types
Default time window - 1 week
"""
valid, response= initRequest(request)
defaultdays = 7
equery = {}
if 'days' in request.session['requestParams'] and request.session['requestParams']['days']:
try:
days = int(request.session['requestParams']['days'])
except:
days = defaultdays
starttime = datetime.now() - timedelta(days=days)
endtime = datetime.now()
request.session['requestParams']['days'] = days
else:
starttime = datetime.now() - timedelta(days=defaultdays)
endtime = datetime.now()
request.session['requestParams']['days'] = defaultdays
equery['timestamp__range'] = [starttime, endtime]
if 'processingtype' in request.session['requestParams'] and request.session['requestParams']['processingtype']:
if '|' not in request.session['requestParams']['processingtype']:
equery['processingtype'] = request.session['requestParams']['processingtype']
else:
pts = request.session['requestParams']['processingtype'].split('|')
equery['processingtype__in'] = pts
events = ProdNeventsHistory.objects.filter(**equery).values()
timeline = set([ev['timestamp'] for ev in events])
timelinestr = [datetime.strftime(ts, defaultDatetimeFormat) for ts in timeline]
if 'view' in request.session['requestParams'] and request.session['requestParams']['view'] and request.session['requestParams']['view'] == 'separated':
view = request.session['requestParams']['view']
else:
view = 'joint'
plot_data = []
if view == 'joint':
ev_states = ['running', 'waiting']
data = {}
for es in ev_states:
data[es] = {}
for ts in timelinestr:
data[es][ts] = 0
for ev in events:
for es in ev_states:
data[es][datetime.strftime(ev['timestamp'], defaultDatetimeFormat)] += ev['nevents' + str(es)]
else:
processingtypes = set([ev['processingtype'] for ev in events])
ev_states = ['running', 'waiting']
lines = []
for prtype in processingtypes:
for evst in ev_states:
lines.append(str(prtype + '_' + evst))
if len(processingtypes) > 1:
lines.append('total_running')
lines.append('total_waiting')
data = {}
for l in lines:
data[l] = {}
for ts in timelinestr:
data[l][ts] = 0
for ev in events:
for l in lines:
if ev['processingtype'] in l:
data[l][datetime.strftime(ev['timestamp'], defaultDatetimeFormat)] += ev['nevents' + str(l.split('_')[1])]
if l.startswith('total'):
data[l][datetime.strftime(ev['timestamp'], defaultDatetimeFormat)] += ev['nevents' + str(l.split('_')[1])]
for key, value in data.items():
newDict = {'state': key, 'values':[]}
for ts, nevents in value.items():
newDict['values'].append({'timestamp': ts, 'nevents':nevents})
newDict['values'] = sorted(newDict['values'], key=lambda k: k['timestamp'])
plot_data.append(newDict)
if (('HTTP_ACCEPT' in request.META) and (request.META.get('HTTP_ACCEPT') in ('text/json', 'application/json'))) or (
'json' in request.session['requestParams']):
plot_data_list = [['timestamp'],]
plot_data_list[0].extend([point['timestamp'] for point in plot_data[0]['values']])
for i, line in enumerate(plot_data):
plot_data_list.append([line['state']])
plot_data_list[i+1].extend([point['nevents'] for point in plot_data[i]['values']])
dump = json.dumps(plot_data_list, cls=DateEncoder)
return HttpResponse(dump, content_type='application/json')
else:
data = {
'request': request,
'viewParams': request.session['viewParams'],
'requestParams': request.session['requestParams'],
'built': datetime.now().strftime("%H:%M:%S"),
'plotData': json.dumps(plot_data)
}
response = render_to_response('prodNeventsTrend.html', data, content_type='text/html')
setCacheEntry(request, "prodNeventsTrend", json.dumps(data, cls=DateEncoder), 60 * 20)
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
return response
| 20,693
|
def create_storage_policy_zios(session, cloud_name, zios_id, policy_name, drive_type, drive_quantity,
policy_type_id, description=None, return_type=None, **kwargs):
"""
Creates a new policy to ZIOS.
:type session: zadarapy.session.Session
:param session: A valid zadarapy.session.Session object. Required.
:type cloud_name: str
:param cloud_name: The cloud 'name' as returned by get_all_clouds. For
example: 'zadaralab01'. Required.
:type zios_id: int
:param zios_id: The ZIOS 'id' value as returned by get_all_zios_objects. Required.
:type policy_name: str
:param policy_name: Policy name. Required
:type drive_type: str
:param drive_type: Drive type internal name. Required
:type drive_quantity: int
:param drive_quantity: Number of drives to add. Required.
:type policy_type_id: int
:param policy_type_id: Storage policy type id. Required.
:type description: str
:param description: Policy description
:type return_type: str
:param return_type: If this is set to the string 'json', this function
will return a JSON string. Otherwise, it will return a Python
dictionary. Optional (will return a Python dictionary by default).
:rtype: dict, str
:returns: A dictionary or JSON data set as a string depending on
return_type parameter.
"""
zios_id = verify_zios_id(zios_id)
cloud_name = verify_cloud_name(cloud_name)
drive_type = verify_field(drive_type, 'drive_type')
drive_quantity = verify_capacity(drive_quantity, 'drive_quantity')
policy_type_id = verify_capacity(policy_type_id, 'policy_type_id')
body_values = {"name":policy_name, "drive_type":drive_type,
"drive_quantity":drive_quantity, "policy_type_id":policy_type_id}
if description is not None:
body_values["description"] = description
path = "/api/clouds/{0}/zioses/{1}/policy.json".format(cloud_name, zios_id)
return session.post_api(path=path, body=body_values, return_type=return_type, **kwargs)
| 20,694
|
def format_tooltips(G, **kwargs):
""" Annotate G, format tooltips.
"""
# node data = [(n, {...}), ...]
node_data = {}
if isinstance(G, nx.Graph):
node_data = G.nodes(True)
elif 'nodes' in G:
node_data = [(d["id"], d) for d in G['nodes']]
# unique ids
member_uids = np.sort(np.unique([
__ for n,d in node_data for __ in d['members']
]))
# array of tooltips
node_tooltips = []
for n,d in node_data:
# progress
print("Formatting tooltip... NodeID:", n)
member_ids = d['members']
# member images
images = d['image'][member_ids]
images = [IMG_HTML.format(src=_) for _ in images]
# format tooltip for node
node_tooltip = NODE_HTML.format(
node_id=n, node_name=d['name'],
node_size=len(member_ids),
data_size=len(member_uids),
images=images
)
# add to array
node_tooltips.append(node_tooltip)
# make numpy array
return np.array(node_tooltips)
| 20,695
|
def copy_files(extension, source, target=None):
"""Copy matching files from source to target.
Scan the ``source`` folder and copy any file that end with
the given ``extension`` to the ``target`` folder.
Both ``source`` and ``target`` are expected to be either a ``str`` or a
list or tuple of strings to be joined using ``os.path.join``.
``sourec`` will be interpreted as a path relative to the ``atm`` root
code folder, and ``target`` will be interpreted as a path relative to
the user's current working directory.
If ``target`` is ``None``, ``source`` will be used, and if the ``target``
directory does not exist, it will be created.
Args:
extension (str):
File extension to copy.
source (str or iterabe):
Source directory.
target (str or iterabe or None):
Target directory. Defaults to ``None``.
Returns:
dict:
Dictionary containing the file names without extension as keys
and the new paths as values.
"""
if isinstance(source, (list, tuple)):
source = os.path.join(*source)
if isinstance(target, (list, tuple)):
target = os.path.join(*target)
elif target is None:
target = source
source_dir = os.path.join(os.path.dirname(__file__), source)
target_dir = os.path.join(os.getcwd(), target)
if not os.path.exists(target_dir):
os.makedirs(target_dir)
file_paths = dict()
for source_file in glob.glob(os.path.join(source_dir, '*.' + extension)):
file_name = os.path.basename(source_file)
target_file = os.path.join(target_dir, file_name)
print('Generating file {}'.format(target_file))
shutil.copy(source_file, target_file)
file_paths[file_name[:-(len(extension) + 1)]] = target_file
return file_paths
| 20,696
|
def add_centroid_frags(fragList, atmList):
"""Add centroid to each fragment."""
for frag in fragList:
atoms = [atmList[i] for i in frag['ids']]
frag['cx'], frag['cy'], frag['cz'] = centroid_atmList(atoms)
return fragList
| 20,697
|
def load_mat(path: str, mat: str, fid: str, size: Optional[int] = None, overwrite: Optional[bool] = False, loop: Optional[int] = 0) -> np.ndarray:
"""Get the raw data for one individual file.
If the file does not exist in the specified path then tries to download it
from Google Drive.
"""
filepath = os.path.join(path, mat)
if os.path.exists(filepath) and not overwrite:
if filepath.endswith('.mat'):
try:
return loadmat(filepath)
except ValueError:
try:
return tables.open_file(filepath, driver="H5FD_CORE")
except:
pass
# logging.warning('Corrupt database!!\n, overwriting...')
# return load_mat(path, mat, fid, size, overwrite=True)
elif filepath.endswith('.edf'):
try:
return mne.io.read_raw_edf(filepath)
except:
pass
elif filepath.endswith('.npy'):
try:
return np.load(filepath)
except:
pass
elif filepath.endswith('.bdf'):
try:
return mne.io.read_raw_bdf(filepath)
except:
pass
elif filepath.endswith('.gdf'):
try:
return mne.io.read_raw_gdf(filepath)
except:
pass
if loop > 2:
logging.warning(
'Several unsuccessful attempts, the data access quota could be compromised.')
logging.warning(
'Many read and write tasks over Google Drive databases could block the background access system almost 24 hours.')
sys.exit()
if drive_mounted():
logging.warning('Corrupt database!!')
return
else:
logging.warning('Corrupt database!!\noverwriting...')
return load_mat(path, mat, fid, size, overwrite=True, loop=loop + 1)
else:
logging.warning('Database not found!')
logging.warning('downloading...')
if drive_mounted():
logging.warning('Write on the shared drive has been disabled.')
logging.warning(
f'The directory name is optional for Google Drive mounted environment')
sys.exit()
os.makedirs(path, exist_ok=True)
gdd.download_file_from_google_drive(file_id=fid,
dest_path=filepath,
unzip=False,
overwrite=overwrite,
size=size)
return load_mat(path, mat, fid, size, loop=loop + 1)
| 20,698
|
def assert_stats_are_equal(state1: Any, state2: Any):
"""Asserts that the activation statistics in two Flax states are almost equal."""
for layer_name, state1_stats, state2_stats in _iterate_stats(state1, state2):
# The tolerance was chosen empirically to make the tests pass reliably for a
# 3-layer model. Possibly the tolerance has to be high because of floating
# point accumulated error over 3 layers and because masked-out tokens still
# exerts a tiny but non-zero padding on statistics since they are masked out
# in the attention layers by subtracting a large but not infinite negative
# value from their position in the Q*K output before taking a softmax to
# get attention weights.
np.testing.assert_allclose(
state1_stats,
state2_stats,
err_msg=f'Stats changed for layer {layer_name}',
atol=.01)
| 20,699
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.