content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def _collect_exit_info(container_dir):
"""Read exitinfo, check if app was aborted and why."""
exitinfo_file = os.path.join(container_dir, 'exitinfo')
exitinfo = _read_exitinfo(exitinfo_file)
_LOGGER.info('check for exitinfo file %r: %r', exitinfo_file, exitinfo)
aborted_file = os.path.join(container_dir, 'aborted')
aborted = os.path.exists(aborted_file)
_LOGGER.info('check for aborted file: %s, %s', aborted_file, aborted)
aborted_reason = None
if aborted:
with open(aborted_file) as f:
aborted_reason = f.read()
return exitinfo, aborted, aborted_reason | 5,327,100 |
def main( # pylint: disable=too-many-arguments,too-many-locals
private_key: PrivateKey,
state_db: str,
web3: Web3,
contracts: Dict[str, Contract],
start_block: BlockNumber,
confirmations: BlockTimeout,
host: str,
port: int,
service_fee: TokenAmount,
operator: str,
info_message: str,
enable_debug: bool,
matrix_server: List[str],
accept_disclaimer: bool,
enable_tracing: bool,
tracing_sampler: str,
tracing_param: str,
) -> int:
"""The Pathfinding service for the Raiden Network."""
log.info("Starting Raiden Pathfinding Service")
click.secho(PFS_DISCLAIMER, fg="yellow")
if not accept_disclaimer:
click.confirm(CONFIRMATION_OF_UNDERSTANDING, abort=True)
log.info("Using RPC endpoint", rpc_url=get_web3_provider_info(web3))
hex_addresses = {
name: to_checksum_address(contract.address) for name, contract in contracts.items()
}
log.info("Contract information", addresses=hex_addresses, start_block=start_block)
if enable_tracing:
tracing_config = Config(
config={"sampler": {"type": tracing_sampler, "param": tracing_param}, "logging": True},
service_name="pfs",
scope_manager=GeventScopeManager(),
validate=True,
)
# Tracer is stored in `opentracing.tracer`
tracing_config.initialize_tracer()
assert isinstance(web3.provider, HTTPProvider), MYPY_ANNOTATION
assert web3.provider.endpoint_uri is not None, MYPY_ANNOTATION
# Set `Web3` requests Session to use `SessionTracing`
cache_session(
web3.provider.endpoint_uri,
SessionTracing(propagate=False, span_tags={"target": "ethnode"}),
)
service = None
api = None
try:
service = PathfindingService(
web3=web3,
contracts=contracts,
sync_start_block=start_block,
required_confirmations=confirmations,
private_key=private_key,
poll_interval=DEFAULT_POLL_INTERVALL,
db_filename=state_db,
matrix_servers=matrix_server,
enable_tracing=enable_tracing,
)
service.start()
log.debug("Waiting for service to start before accepting API requests")
try:
service.startup_finished.get(timeout=PFS_START_TIMEOUT)
except gevent.Timeout:
raise Exception("PFS did not start within time.")
log.debug("Starting API")
api = PFSApi(
pathfinding_service=service,
service_fee=service_fee,
debug_mode=enable_debug,
one_to_n_address=to_canonical_address(contracts[CONTRACT_ONE_TO_N].address),
operator=operator,
info_message=info_message,
enable_tracing=enable_tracing,
)
api.run(host=host, port=port)
service.get()
except (KeyboardInterrupt, SystemExit):
print("Exiting...")
finally:
log.info("Stopping Pathfinding Service...")
if api:
api.stop()
if service:
service.stop()
return 0 | 5,327,101 |
def get_service(hass, config):
"""Get the Google Voice SMS notification service."""
if not validate_config({DOMAIN: config},
{DOMAIN: [CONF_USERNAME,
CONF_PASSWORD]},
_LOGGER):
return None
return GoogleVoiceSMSNotificationService(config[CONF_USERNAME],
config[CONF_PASSWORD]) | 5,327,102 |
def hammer_op(context, chase_duration):
"""what better way to do a lot of gnarly work than to pointer chase?"""
ptr_length = context.op_config["chase_size"]
data = list(range(0, ptr_length))
random.shuffle(data)
curr = random.randint(0, ptr_length - 1)
# and away we go
start_time = time.time()
while (time.time() - start_time) < chase_duration:
curr = data[curr]
context.log.info("Hammered - start %d end %d" % (start_time, time.time()))
return chase_duration | 5,327,103 |
def validate_sourcedata(path, source_type, pattern='sub-\\d+'):
"""
This function validates the "sourcedata/" directory provided by user to
see if it's contents are consistent with the pipeline's requirements.
"""
if not path:
path = './'
if not source_type:
source_type = ['eeg']
# construct relative sourcedata path
sourcedata_dir = os.path.join(path, 'sourcedata')
source_name = None
sub_dirs = []
n_subs = 0
subject_names = None
data_dirs = []
data_files = []
# browse through directories in parent_dir
for root, directories, files in os.walk(path):
if root == path:
if 'sourcedata' not in directories:
raise ValueError('The provided directory does not contain "sourcedata/".') # noqa: E501
else:
source_name = 'OK'
if root == sourcedata_dir:
sub_dirs.extend([os.path.join(root, sub) for sub in directories])
n_subs = len(sub_dirs)
valid_names = []
for sub in sub_dirs:
if re.findall(pattern=pattern,
string=os.path.basename(sub)):
valid_names.append(True)
else:
valid_names.append(False)
if len(set(valid_names)) > 1:
bad_names = np.where(np.logical_not(valid_names))[0]
bad_names = [sub_dirs[bn] for bn in bad_names]
warnings.warn('The subject directory names in "sourcedata/" are inconsistent.') # noqa: E501
subject_names = 'error in %s' \
% (', '.join([str(bn) for bn in bad_names]))
else:
subject_names = 'OK'
elif root in sub_dirs:
data_dirs.append([data_dir for data_dir in directories
if data_dir in source_type])
n_dirs = [len(d) for d in data_dirs]
if len(set(n_dirs)) > 1:
warnings.warn("Subject directories contain different number of data directories.") # noqa: E501
modal = max(set(n_dirs), key=n_dirs.count)
inconscistent = [i for i, n in enumerate(n_dirs)
if not n == modal]
bads = [sub_dirs[i] for i in inconscistent]
elif root in [os.path.join(s_dir, source)
for s_dir in sub_dirs for source in source_type]:
data_files.extend([os.path.join(root, file) for file in files])
valid_file_names = []
file_patterns = [pattern + '_\\w+_' + source + '.*'
for source in source_type]
for file in data_files:
for file_pattern in file_patterns:
if re.findall(pattern=file_pattern,
string=os.path.basename(file)):
valid_file_names.append(True)
else:
valid_file_names.append(False)
if len(set(valid_file_names)) > 1:
bad_files = np.where(np.logical_not(valid_file_names))[0]
bad_files = [data_files[bf] for bf in bad_files]
warnings.warn("Some file names are not named accordance to the pipeline's requirements.")
file_names = 'error in %s' % (', '.join([str(bn)
for bn in bad_files]))
else:
file_names = 'OK'
data_val = {
'source_data_path':
{
'path': sourcedata_dir,
'naming': source_name,
'dirs_in_sourcedata': n_subs
},
'subject_directories':
{
'name_pattern': pattern,
'naming': subject_names
},
'data_files':
{
'file_pattern': (', '.join([f_pat for f_pat in file_patterns])), # noqa
'naming': file_names # noqa
}
}
return data_val | 5,327,104 |
def singularity_plot(
pure_fname,
rolled_camp, lphase, grads,
singularities, avg_singularity, thres_singularity):
""" Plot overview over singularity measure
"""
# handle img directory
img_dir = 'images'
if not os.path.isdir(os.path.join(img_dir, pure_fname)):
os.makedirs(os.path.join(img_dir, pure_fname))
# plot data
pos_num = 4
pos_range = range(0, lphase.shape[0], int(lphase.shape[0]/(pos_num)))[1:]
fig, axarr = plt.subplots(len(pos_range), 4, figsize=(10, 10))
fig.tight_layout()
plt.suptitle('pipeline overview')
def show(data, title, ax):
ax.set_title(title)
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
im = ax.imshow(
data, interpolation='nearest', cmap=cm.gray)
holder = make_axes_locatable(ax)
cax = holder.append_axes('right', size='20%', pad=0.05)
plt.colorbar(im, cax=cax, format='%.2f')
for axrow, pos in enumerate(pos_range):
show(rolled_camp[pos], 'cell overview', axarr[axrow][0])
show(lphase[pos], 'local phase', axarr[axrow][1])
show(grads[pos], 'gradient', axarr[axrow][2])
show(singularities[pos], 'singularity measure', axarr[axrow][3])
plt.savefig(os.path.join(img_dir, pure_fname, 'singularity.png'), bbox_inches='tight', dpi=300)
#plt.show()
# averaged results
fig, axarr = plt.subplots(1, 2, figsize=(10, 10))
show(avg_singularity, 'averaged singularity measure', axarr[0])
show(thres_singularity, 'thresholded singularity measure', axarr[1])
plt.savefig(os.path.join(img_dir, pure_fname, 'averaged_singularity.png'), bbox_inches='tight', dpi=300)
#plt.show() | 5,327,105 |
def generate_optimization_fns(
loss_fn: Callable,
opt_fn: Callable,
k_fn: Callable,
normalize_grad: bool = False,
optimizations: Mapping = None,
):
"""Directly generates upper/outer bilevel program derivative functions.
Args:
loss_fn: loss_fn(z, *params), upper/outer level loss
opt_fn: opt_fn(*params) = z, lower/inner argmin function
k_fn: k_fn(z, *params) = 0, lower/inner implicit function
normalize_grad: whether to normalize the gradient by its norm
jit: whether to apply just-in-time (jit) compilation to the functions
Returns:
``f_fn(*params), g_fn(*params), h_fn(*params)``
parameters-only upper/outer level loss, gradient and Hessian.
"""
sol_cache = dict()
opt_fn_ = lambda *args, **kwargs: opt_fn(*args, **kwargs).detach()
optimizations = {} if optimizations is None else copy(optimizations)
@fn_with_sol_cache(opt_fn_, sol_cache)
def f_fn(z, *params):
z = z.detach() if isinstance(z, torch.Tensor) else z
params = _detach_args(*params)
return loss_fn(z, *params)
@fn_with_sol_cache(opt_fn_, sol_cache)
def g_fn(z, *params):
z = z.detach() if isinstance(z, torch.Tensor) else z
params = _detach_args(*params)
g = JACOBIAN(loss_fn, (z, *params))
Dp = implicit_jacobian(
k_fn, z.detach(), *params, Dg=g[0], optimizations=optimizations
)
Dp = Dp if len(params) != 1 else [Dp]
# opts = dict(device=z.device, dtype=z.dtype)
# Dp = [
# torch.zeros(param.shape, **opts) for param in params
# ]
ret = [Dp + g for (Dp, g) in zip(Dp, g[1:])]
if normalize_grad:
ret = [(z / (torch.norm(z) + 1e-7)).detach() for z in ret]
ret = [ret.detach() for ret in ret]
return ret[0] if len(ret) == 1 else ret
@fn_with_sol_cache(opt_fn_, sol_cache)
def h_fn(z, *params):
z = z.detach() if isinstance(z, torch.Tensor) else z
params = _detach_args(*params)
g = JACOBIAN(loss_fn, (z, *params))
if optimizations.get("Hz_fn", None) is None:
optimizations["Hz_fn"] = lambda z, *params: HESSIAN_DIAG(
lambda z: loss_fn(z, *params), (z,)
)[0]
Hz_fn = optimizations["Hz_fn"]
Hz = Hz_fn(z, *params)
H = [Hz] + HESSIAN_DIAG(lambda *params: loss_fn(z, *params), params)
Dp, Dpp = implicit_hessian(
k_fn,
z,
*params,
Dg=g[0],
Hg=H[0],
optimizations=optimizations,
)
Dpp = Dpp if len(params) != 1 else [Dpp]
ret = [Dpp + H for (Dpp, H) in zip(Dpp, H[1:])]
ret = [ret.detach() for ret in ret]
return ret[0] if len(ret) == 1 else ret
return f_fn, g_fn, h_fn | 5,327,106 |
def host_services(ctx: Configuration):
"""Home Assistant host reboot."""
_handle(ctx, 'host/services') | 5,327,107 |
def get_weighted_spans(doc, vec, feature_weights):
# type: (Any, Any, FeatureWeights) -> Optional[WeightedSpans]
""" If possible, return a dict with preprocessed document and a list
of spans with weights, corresponding to features in the document.
"""
if isinstance(vec, FeatureUnion):
return _get_weighted_spans_from_union(doc, vec, feature_weights)
else:
result = _get_doc_weighted_spans(doc, vec, feature_weights)
if result is not None:
found_features, doc_weighted_spans = result
return WeightedSpans(
[doc_weighted_spans],
other=_get_other(feature_weights, [('', found_features)]),
) | 5,327,108 |
def get_child(parent, child_index):
"""
Get the child at the given index, or return None if it doesn't exist.
"""
if child_index < 0 or child_index >= len(parent.childNodes):
return None
return parent.childNodes[child_index] | 5,327,109 |
def testAtomicSubatomic():
"""
Test atomic/subatomic links defined in memes.
"""
method = moduleName + '.' + 'testAtomicSubatomic'
Graph.logQ.put( [logType , logLevel.DEBUG , method , "entering"])
resultSet = []
errata = []
testResult = "True"
expectedResult = "True"
errorMsg = ""
#The testcase entities
try:
parentMeme1 = Graph.api.createEntityFromMeme("AtomicSubatomic.ParentMeme1") #Both shild entites have subatomic links
parentMeme2 = Graph.api.createEntityFromMeme("AtomicSubatomic.ParentMeme2") #One child has an atomic link, the other subatomic
parentMeme3 = Graph.api.createEntityFromMeme("AtomicSubatomic.ParentMeme3") #Both shild entites have atomic links
except Exception as e:
testResult = "False"
errorMsg = ('Error creating test entities! Traceback = %s' % (e) )
errata.append(errorMsg)
try:
pm1aChildren = api.getLinkCounterpartsByMetaMemeType(parentMeme1, "AtomicSubatomic.ChildMM", linkTypes.ATOMIC)
pm1sChildren = api.getLinkCounterpartsByMetaMemeType(parentMeme1, "AtomicSubatomic.ChildMM", linkTypes.SUBATOMIC)
if len(pm1sChildren) < 2:
testResult = "False"
errorMsg = "Meme AtomicSubatomic.ParentMeme1 should have two subatomic children. It actually has %s\n" %(len(pm1sChildren))
if len(pm1aChildren) > 0:
testResult = "False"
errorMsg = "Meme AtomicSubatomic.ParentMeme1 should have no atomic children. It actually has %s\n" %(len(pm1aChildren))
except Exception as e:
testResult = "False"
errorMsg = ('Error when searching for children of AtomicSubatomic.ParentMeme1! Traceback = %s' % (e) )
errata.append(errorMsg)
try:
pm2aChildren = api.getLinkCounterpartsByMetaMemeType(parentMeme2, "AtomicSubatomic.ChildMM", linkTypes.ATOMIC)
pm2sChildren = api.getLinkCounterpartsByMetaMemeType(parentMeme2, "AtomicSubatomic.ChildMM", linkTypes.SUBATOMIC)
if len(pm2sChildren) != 1:
testResult = "False"
errorMsg = "Meme AtomicSubatomic.ParentMeme2 should have one subatomic child. It actually has %s\n" %(len(pm2sChildren))
if len(pm2aChildren) != 1:
testResult = "False"
errorMsg = "Meme AtomicSubatomic.ParentMeme2 should have one atomic child. It actually has %s\n" %(len(pm2aChildren))
except Exception as e:
testResult = "False"
errorMsg = ('Error when searching for children of AtomicSubatomic.ParentMeme2! Traceback = %s' % (e) )
errata.append(errorMsg)
try:
pm3aChildren = api.getLinkCounterpartsByMetaMemeType(parentMeme3, "AtomicSubatomic.ChildMM", linkTypes.ATOMIC)
pm3sChildren = api.getLinkCounterpartsByMetaMemeType(parentMeme3, "AtomicSubatomic.ChildMM", linkTypes.SUBATOMIC)
if len(pm3sChildren) > 0:
testResult = "False"
errorMsg = "Meme AtomicSubatomic.ParentMeme1 should have no subatomic children. It actually has %s\n" %(len(pm3sChildren))
if len(pm3aChildren) < 2:
testResult = "False"
errorMsg = "Meme AtomicSubatomic.ParentMeme1 should have two atomic children. It actually has %s\n" %(len(pm3aChildren))
except Exception as e:
testResult = "False"
errorMsg = ('Error when searching for children of AtomicSubatomic.ParentMeme3! Traceback = %s' % (e) )
errata.append(errorMsg)
testcase = "testAtomicSubatomic()"
results = [1, testcase, testResult, expectedResult, errata]
resultSet.append(results)
Graph.logQ.put( [logType , logLevel.INFO , method , "Finished testcase %s" %(1)])
Graph.logQ.put( [logType , logLevel.DEBUG , method , "exiting"])
return resultSet | 5,327,110 |
def extinction(species, adj, z, independent):
"""
Returns the presence/absence of each species after taking into account
the secondary extinctions.
Parameters
----------
species : numpy array of shape (nbsimu, S) with nbsimu being the number
of simulations (decompositions). This array contains the information
about the presence (1) or absence (0) of each species (columns) in
each simulation (rows).
adj : numpy array of size (S,S) with S being the species richness
Adjacency matrix.
z : float
Number of species which might not undergo secondary extinction.
independent : bool
Should the species having no incoming links be considered as
independent (i.e. not undergo secondary extinction)?
Returns
-------
Numpy array of shape (nbsimu, S) containing, for each decomposition (row),
the presence (1) or absence (0) of each species (columns).
"""
#-------- Extinction of dependent species --------
# Basic rule for dependent species :
# they need to be linked to another species to be part of the network
left = np.sum(adj, axis = 2)[:,z:] # Number of neighbours left
Psurvival = (left > 0).astype(int) # Survival if at least 1 neighbour
# Extinction cascade through trophic levels
while np.sum(species[:,z:] != Psurvival) != 0 :
### Extinction(s) ###
# Removal of non surviving species
species[:,z:] = (species[:,z:])*Psurvival
# Removal of non surviving links (i.e. links of the extinct species)
adj = cancel(adj, species)
### Check for higher order extinctions ###
left = np.sum(adj, axis=2)[:,z:] # Number of neighbours left
Psurvival = (left > 0).astype(int) # Survival if at least 1 neighbour
#-------- Extinction of independent species --------
if independent==False: # If there is no independent species
# Species having no incoming link undergo secondary extinction
interact = np.sum(cancel(adj, species),axis=1)[:, :z] # Outgoing links
(species[:,:z])[interact == 0] = 0 # Removed if no outgoing links left
return(species) | 5,327,111 |
def fix_variable_mana(card):
"""
This function was created to fix a problem in the dataset.
We're currently pretty up against the wall and I realized
that 'Variable' mana texts were not correctly converted to {X}
so this function is fed cards and corrects their mana values
if it detects this problem.
"""
def correct_field(symbol):
if 'Variable' in symbol:
# strip out brackets:
symbol = symbol[1:-1]
# remove 'Variable'
symbol = symbol.replace('Variable', '').strip()
# get the correct color-letter
symbol = alt_text_to_curly_bracket(symbol)
# 'insert' X and return the corrected symbol.
return f'{{X{symbol[1:-1]}}}'
else:
return symbol
corrected = [x for x in card.mana_cost]
card.mana_cost = corrected | 5,327,112 |
def cli_arg(
runner: CliRunner,
notebook_path: Path,
mock_terminal: Mock,
remove_link_ids: Callable[[str], str],
mock_tempfile_file: Mock,
mock_stdin_tty: Mock,
mock_stdout_tty: Mock,
) -> Callable[..., str]:
"""Return function that applies arguments to cli."""
def _cli_arg(
*args: Union[str, None],
truecolor: bool = True,
paging: Union[bool, None] = False,
material_theme: bool = True,
images: bool = True,
**kwargs: Union[str, None],
) -> str:
"""Apply given arguments to cli.
Args:
*args (Union[str, None]): The extra arguments to pass to the
command.
truecolor (bool): Whether to pass
'--color-system=truecolor' option. By default True.
paging (Union[bool, None]): Whether to pass '--paging' or
'--no-paging' option. By default False, which
corresponds to '--no-paging'.
material_theme (bool): Whether to set the theme to
'material'. By default True.
images (bool): Whether to pass '--images'. By default True.
**kwargs (Union[str, None]): Environmental variables to set.
Will be uppercased.
Returns:
str: The output of the invoked command.
"""
cleaned_args = [arg for arg in args if arg is not None]
upper_kwargs = {
name.upper(): value for name, value in kwargs.items() if value is not None
}
cli_args = [os.fsdecode(notebook_path), *cleaned_args]
if images:
cli_args.append("--images")
if material_theme:
cli_args.append("--theme=material")
if truecolor:
cli_args.append("--color-system=truecolor")
if paging is True:
cli_args.append("--paging")
elif paging is False:
cli_args.append("--no-paging")
result = runner.invoke(
__main__.typer_click_object,
args=cli_args,
color=True,
env=upper_kwargs,
)
output = remove_link_ids(result.output)
return output
return _cli_arg | 5,327,113 |
def grid_search(x_train, y_train, x_val=None, y_val=None, args=None, config_filename: str = None, folds: int = 5,
verbose: int = 0, default_config: str = CONFIG_PATH_MLP, working_dir: str = WORKING_DIR,
save_path: str = os.path.join(WORKING_DIR, 'logs')):
""" Optimize MLP through grid searching
Args:
working_dir: (str) path to working directory
save_path: (str) where to store intermediate stuff
config_filename: (str) path to config file. Best hyperparameters will be saved under this name
x_train: array of train data
y_train: array of train data labels, default = None
x_val: array of validation data
y_val: array of validation labels, default = None
args: (dict) hyperparameters of the MLP (see config file)
default_config: (str) path to default config file
verbose: (bool) printout all training steps
Returns: trained model with best hyperparameters
"""
from sklearn.cluster import KMeans
from sklearn.model_selection import StratifiedKFold
# If no args are given, get the default settings to optimize
if args is None:
args = utils.get_config(default_config)
if type(y_train) is list:
y_train = np.array(y_train)
# Create the grid-search grid
grid = get_grid(args)
print(f"--- Optimizing {5*len(grid)} models with {folds}-fold cross-validation")
# Get 5 shuffled train/test folds based on kmeans clustering of the training molecules
kmeans = KMeans(n_clusters=10, random_state=RANDOM_SEED).fit(x_train)
skf = StratifiedKFold(n_splits=folds, random_state=RANDOM_SEED, shuffle=True)
folds = [{'train': tr, 'test': tst} for tr, tst in skf.split(x_train, kmeans.labels_)]
# Train n folds for every set of hyperparameters
results = []
for hyperparameters in grid:
results_folds = []
for fold in folds:
# Get data from this split.
fold_x_train = x_train[fold['train']]
fold_y_train = y_train[fold['train']]
fold_x_test = x_train[fold['test']]
fold_y_test = y_train[fold['test']]
# Train model
model = train_mlp(fold_x_train, fold_y_train, args=hyperparameters, verbose=verbose, save_path=save_path)
# Predictions
predictions = list(model.predict(fold_x_test).flatten())
res = np.square(np.subtract(fold_y_test, predictions)).mean()
results_folds.append(res)
# Append the mean test results from the n fold cross-validation + corresponding hyperparameters to a list
results.append((np.mean(results_folds), hyperparameters))
# Get the best hyperparameters
results.sort(key=lambda x: x[0])
best_hyperparamters = results[0][1]
# Write them as a yml
if config_filename is None:
config_filename = os.path.join(working_dir, 'configures', 'MLP.yml')
utils.write_config(config_filename, best_hyperparamters)
# Train a model on all training data with the best hyperparameters
model = train_mlp(x_train, y_train, x_val, y_val, args=best_hyperparamters, verbose=verbose, save_path=save_path)
return model | 5,327,114 |
def distinguish_system_application(vulner_info):
"""
Test whether CVE has system CIA loss or application CIA loss.
:param vulner_info: object of class Vulnerability from cve_parser.py
:return: result impact or impacts
"""
result_impacts = []
if system_confidentiality_changed(
vulner_info.description,
vulner_info.cvssv2,
vulner_info.cpe_type):
result_impacts.append("System confidentiality loss")
if system_integrity_changed(
vulner_info.description,
vulner_info.cvssv2,
vulner_info.cpe_type):
result_impacts.append("System integrity loss")
if system_availability_changed(
vulner_info.description,
vulner_info.cvssv2,
vulner_info.cpe_type):
result_impacts.append("System availability loss")
if not result_impacts:
if vulner_info.cvssv3['i'] != "NONE":
result_impacts.append("Application integrity loss")
if vulner_info.cvssv3['a'] != "NONE":
result_impacts.append("Application availability loss")
if vulner_info.cvssv3['c'] != "NONE":
result_impacts.append("Application confidentiality loss")
return result_impacts | 5,327,115 |
def split_missions_and_dates(fname):
"""
Examples
--------
>>> fname = 'nustar-nicer_gt55000_lt58000.csv'
>>> outdict = split_missions_and_dates(fname)
>>> outdict['mission1']
'nustar'
>>> outdict['mission2']
'nicer'
>>> outdict['mjdstart']
'MJD 55000'
>>> outdict['mjdstop']
'MJD 58000'
>>> fname = 'nustar-nicer.csv'
>>> outdict = split_missions_and_dates(fname)
>>> outdict['mission1']
'nustar'
>>> outdict['mission2']
'nicer'
>>> outdict['mjdstart']
'Mission start'
>>> outdict['mjdstop']
'Today'
"""
no_ext = os.path.splitext(fname)[0]
split_date = no_ext.split('_')
mjdstart = 'Mission start'
mjdstop = 'Today'
if len(split_date) > 1:
for date_str in split_date[1:]:
if 'gt' in date_str:
mjdstart = 'MJD ' + date_str.replace('gt', '')
elif 'lt' in date_str:
mjdstop = 'MJD ' + date_str.replace('lt', '')
mission1, mission2 = split_date[0].split('-')
outdict = {'mission1': mission1, 'mission2': mission2,
'mjdstart': mjdstart, 'mjdstop': mjdstop}
return outdict | 5,327,116 |
def run_in_windows_bash(conanfile, command, cwd=None, env=None):
""" Will run a unix command inside a bash terminal It requires to have MSYS2, CYGWIN, or WSL"""
if env:
# Passing env invalidates the conanfile.environment_scripts
env_win = [env] if not isinstance(env, list) else env
env_shell = []
else:
env_shell = ["conanenv.sh"]
env_win = ["conanenv.bat"]
subsystem = conanfile.conf["tools.microsoft.bash:subsystem"]
shell_path = conanfile.conf["tools.microsoft.bash:path"]
if not platform.system() == "Windows":
raise ConanException("Command only for Windows operating system")
if not subsystem or not shell_path:
raise ConanException("The config 'tools.microsoft.bash:subsystem' and 'tools.microsoft.bash:path' are "
"needed to run commands in a Windows subsystem")
if subsystem == MSYS2:
# Configure MSYS2 to inherith the PATH
msys2_mode_env = Environment(conanfile)
_msystem = {"x86": "MINGW32"}.get(conanfile.settings.get_safe("arch"), "MINGW64")
msys2_mode_env.define("MSYSTEM", _msystem)
msys2_mode_env.define("MSYS2_PATH_TYPE", "inherit")
path = os.path.join(conanfile.generators_folder, "msys2_mode.bat")
msys2_mode_env.save_bat(path)
env_win.append(path)
# Needed to change to that dir inside the bash shell
wrapped_shell = '"%s"' % shell_path if " " in shell_path else shell_path
if env_win:
wrapped_shell = environment_wrap_command(conanfile, env_win, shell_path,
cwd=conanfile.generators_folder)
cwd = cwd or os.getcwd()
if not os.path.isabs(cwd):
cwd = os.path.join(os.getcwd(), cwd)
cwd_inside = unix_path(conanfile, cwd)
wrapped_user_cmd = command
if env_shell:
# Wrapping the inside_command enable to prioritize our environment, otherwise /usr/bin go
# first and there could be commands that we want to skip
wrapped_user_cmd = environment_wrap_command(conanfile, env_shell, command,
cwd=conanfile.generators_folder)
inside_command = 'cd "{cwd_inside}" && ' \
'{wrapped_user_cmd}'.format(cwd_inside=cwd_inside,
wrapped_user_cmd=wrapped_user_cmd)
inside_command = escape_windows_cmd(inside_command)
final_command = 'cd "{cwd}" && {wrapped_shell} --login -c {inside_command}'.format(
cwd=cwd,
wrapped_shell=wrapped_shell,
inside_command=inside_command)
conanfile.output.info('Running in windows bash: %s' % final_command)
return conanfile._conan_runner(final_command, output=conanfile.output, subprocess=True) | 5,327,117 |
def declare_ineq_soc_ub(model, index_set):
"""
create the constraint for the second order cone
"""
m = model
con_set = decl.declare_set("_con_ineq_soc_ub", model, index_set)
m.ineq_soc_ub = pe.Constraint(con_set)
for from_bus, to_bus in con_set:
m.ineq_soc_ub[(from_bus, to_bus)] = (m.c[from_bus, to_bus] ** 2 + m.s[from_bus, to_bus] ** 2 >=
m.vmsq[from_bus] * m.vmsq[to_bus]) | 5,327,118 |
def remove_uploaded_records(db):
"""
Removes all records archived and uploaded.
:param db: DB Connection to Pony
:return: List of Records removed
"""
list_of_local_records = query.get_records_uploaded(db)
if len(list_of_local_records) == 0:
return 0
removed_records_list = list()
for record in list_of_local_records:
record_path = record.path
if not record.removed and path.isfile(record_path):
remove(record_path)
record.removed = True
removed_records_list.append(record)
return removed_records_list | 5,327,119 |
def nearest_with_mask_regrid(
distances: ndarray,
indexes: ndarray,
surface_type_mask: ndarray,
in_latlons: ndarray,
out_latlons: ndarray,
in_classified: ndarray,
out_classified: ndarray,
vicinity: float,
) -> Tuple[ndarray, ndarray]:
"""
Main regridding function for the nearest distance option.
some input just for handling island-like points.
Args:
distances:
Distnace array from each target grid point to its source grid points.
indexes:
Source grid point indexes for each target grid point.
surface_type_mask:
Boolean true if source point type matches target point type.
in_latlons:
Source points's latitude-longitudes.
out_latlons:
Target points's latitude-longitudes.
in_classified:
Land/sea type for source grid points (land -> True).
out_classified:
Land/sea type for target grid points (land -> True).
vicinity:
Radius of specified searching domain, in meter.
Returns:
- Updated distances - array from each target grid point to its source grid points.
- Updated indexes - source grid point number for all target grid points.
"""
# Check if there are output points with mismatched surface types
matched_nearby_points_count = np.count_nonzero(surface_type_mask, axis=1)
points_with_mismatches = (np.where(matched_nearby_points_count < 4))[0]
# Look for nearest input points for the output points with mismatched surface
indexes, distances, surface_type_mask = update_nearest_points(
points_with_mismatches,
in_latlons,
out_latlons,
indexes,
distances,
surface_type_mask,
in_classified,
out_classified,
)
# Handle island and lake like output points - find more distant same surface type input points
# Note: surface_type_mask has been updated above
matched_nearby_points_count = np.count_nonzero(surface_type_mask, axis=1)
fully_mismatched_points = (np.where(matched_nearby_points_count == 0))[0]
if fully_mismatched_points.shape[0] > 0:
indexes, surface_type_mask = lakes_islands(
fully_mismatched_points,
indexes,
surface_type_mask,
in_latlons,
out_latlons,
in_classified,
out_classified,
vicinity,
)
# Convert mask to be true where input points should not be considered
inverse_surface_mask = np.logical_not(surface_type_mask)
# Replace distances with infinity where they should not be used
masked_distances = np.where(inverse_surface_mask, np.float64(np.inf), distances)
# Distances and indexes have been prepared to handle the mask, so can now
# call the non-masked regrid function in process
return masked_distances, indexes | 5,327,120 |
def _validator(
directory: str,
output_types: List[str] = OUTPUT_TYPES,
log_level: Literal["INFO", "DEBUG"] = "INFO",
coverages: dict = {},
schemas_path: Path = Path(__file__).parent.joinpath(r"./schemas"),
raise_error: bool = False,
) -> dict:
"""
Parameters
----------
directory : str
Directory with datasets sub-directory and validation_rules.json
output_types : List[str], optional
The types of output files that will be written. Options are
["geojson", "csv", "geopackage"]. By default all will be written
log_level : Literal['INFO', 'DEBUG'], optional
Level for logger. The default is "INFO".
coverages : dict, optional
Location of coverages. E.g. {"AHN: path_to_ahn_dir} The default is {}.
schemas_path : Path, optional
Path to the HyDAMO and validation_rules schemas.
The default is Path(__file__).parent.joinpath(r"./schemas").
raise_error: bool, optional
Will raise an error (or not) when Exception is raised. The default is False
Returns
-------
HyDAMO, LayersSummary, ResultSummary
Will return a tuple with a filled HyDAMO datamodel, layers_summary and result_summary
"""
timer = Timer()
try:
results_path = None
logger = logging.getLogger(__name__)
logger.setLevel(getattr(logging, log_level))
date_check = pd.Timestamp.now().isoformat()
result_summary = ResultSummary(date_check=date_check)
layers_summary = LayersSummary(date_check=date_check)
# check if all files are present
dir_path = Path(directory)
# create a results_path
if dir_path.exists():
results_path = dir_path.joinpath("results")
if results_path.exists():
try:
shutil.rmtree(results_path)
except PermissionError:
pass
results_path.mkdir(parents=True, exist_ok=True)
else:
raise FileNotFoundError(f"{dir_path.absolute().resolve()} does not exist")
dataset_path = dir_path.joinpath("datasets")
validation_rules_json = dir_path.joinpath("validationrules.json")
missing_paths = []
for path in [dataset_path, validation_rules_json]:
if not path.exists():
missing_paths += [str(path)]
if missing_paths:
result_summary.error = f'missing_paths: {",".join(missing_paths)}'
raise FileNotFoundError(f'missing_paths: {",".join(missing_paths)}')
else:
try:
validation_rules_sets = json.loads(validation_rules_json.read_text())
except Exception as e:
result_summary.error = "the file with validationrules is not a valid JSON (see exception)"
raise e
try:
rules_version = validation_rules_sets["schema"]
schema = _read_schema(rules_version, schemas_path)
except Exception as e:
result_summary.error = "schema version cannot be read from validation rules (see exception)"
raise e
try:
validate(validation_rules_sets, schema)
except ValidationError as e:
result_summary.error = (
f"validation rules invalid according to json-schema (see exception)"
)
raise e
# check if output-files are supported
unsupported_output_types = [
item for item in output_types if item not in OUTPUT_TYPES
]
if unsupported_output_types:
error_message = (
r"unsupported output types: " f'{",".join(unsupported_output_types)}'
)
result_summary.error = error_message
raise TypeError(error_message)
# set coverages
if coverages:
for key, item in coverages.items():
logical_validation.general_functions._set_coverage(key, item)
# start validation
# read data-model
result_summary.status = "define data-model"
try:
hydamo_version = validation_rules_sets["hydamo_version"]
datamodel = HyDAMO(version=hydamo_version)
except Exception as e:
result_summary.error = "datamodel cannot be defined (see exception)"
raise e
# validate dataset syntax
result_summary.status = "syntax-validation (layers)"
datasets = DataSets(dataset_path)
result_summary.dataset_layers = datasets.layers
## validate syntax of datasets on layers-level and append to result
logger.info("syntax-validation of object-layers")
valid_layers = datamodel_layers(datamodel.layers, datasets.layers)
result_summary.missing_layers = missing_layers(
datamodel.layers, datasets.layers
)
## validate valid_layers on fields-level and add them to data_model
result_summary.status = "syntax-validation (fields)"
syntax_result = []
## get status_object if any
status_object = None
if "status_object" in validation_rules_sets.keys():
status_object = validation_rules_sets["status_object"]
for layer in valid_layers:
logger.info(f"syntax-validation of fields in {layer}")
gdf, schema = datasets.read_layer(
layer, result_summary=result_summary, status_object=status_object
)
layer = layer.lower()
for col in INCLUDE_COLUMNS:
if not col in gdf.columns:
gdf[col] = None
schema["properties"][col] = "str"
if not INDEX in gdf.columns:
result_summary.error = f"Index-column '{INDEX}' is compulsory and not defined for layer '{layer}'."
raise KeyError(f"{INDEX} not in columns")
gdf, result_gdf = fields_syntax(
gdf,
schema,
datamodel.validation_schemas[layer],
INDEX,
keep_columns=INCLUDE_COLUMNS,
)
# Add the syntax-validation result to the results_summary
layers_summary.set_data(result_gdf, layer, schema["geometry"])
# Add the corrected datasets_layer data to the datamodel.
datamodel.set_data(gdf, layer, index_col=INDEX)
syntax_result += [layer]
# do logical validation: append result to layers_summary
result_summary.status = "logical validation"
layers_summary, result_summary = logical_validation.execute(
datamodel,
validation_rules_sets,
layers_summary,
result_summary,
log_level,
raise_error,
)
# finish validation and export results
logger.info("exporting results")
result_summary.status = "export results"
result_layers = layers_summary.export(results_path, output_types)
result_summary.result_layers = result_layers
result_summary.error_layers = [
i for i in datasets.layers if i.lower() not in result_layers
]
result_summary.syntax_result = syntax_result
result_summary.validation_result = [
i["object"] for i in validation_rules_sets["objects"] if i["object"] in result_layers
]
result_summary.success = True
result_summary.status = "finished"
result_summary.duration = timer.report()
result_summary.to_json(results_path)
logger.info(f"finished in {result_summary.duration:.2f} seconds")
return datamodel, layers_summary, result_summary
except Exception as e:
e_str = str(e).replace("\n", " ")
e_str = " ".join(e_str.split())
if result_summary.error is not None:
result_summary.error = fr"{result_summary.error} Python Exception: '{e_str}'"
else:
result_summary.error = fr"Python Exception: '{e_str}'"
if results_path is not None:
result_summary.to_json(results_path)
if raise_error:
raise e
else:
result_summary.to_dict() | 5,327,121 |
def main(infile: str, output: TextIO, chunk_size: int, min_tail_size: int):
"""Takes an assembly as input and produces an output of 'reads': the
assembly chopped into pieces (chunks).
"""
with pysam.FastxFile(infile) as contigs:
for contig in contigs:
chunk_num = 0
seq = contig.sequence
for i in range(0, len(seq), chunk_size):
chunk = seq[i : i + chunk_size]
if len(chunk) >= min_tail_size:
chunk_num += 1
chunk_name = f"{contig.name}_chunk={chunk_num}"
print(f">{chunk_name}", file=output)
print(chunk, file=output) | 5,327,122 |
def assert_file_existance(dataset_uri_list):
"""Assert that provided uris exist in filesystem.
Verify that the uris passed in the argument exist on the filesystem
if not, raise an exeception indicating which files do not exist
Args:
dataset_uri_list (list): a list of relative or absolute file paths to
validate
Returns:
None
Raises:
IOError: if any files are not found
"""
not_found_uris = []
for uri in dataset_uri_list:
if not os.path.exists(uri):
not_found_uris.append(uri)
if len(not_found_uris) != 0:
error_message = (
"The following files do not exist on the filesystem: " +
str(not_found_uris))
raise exceptions.IOError(error_message) | 5,327,123 |
def get_renaming(mappers, year):
"""Get original to final column namings."""
renamers = {}
for code, attr in mappers.items():
renamers[code] = attr['df_name']
return renamers | 5,327,124 |
def test_note_reversed():
"""Test Note reversed dunder method."""
assert list(reversed(list(Note))) == list(reversed(Note)) | 5,327,125 |
async def clap(text, args):
""" Puts clap emojis between words. """
if args != []:
clap_str = args[0]
else:
clap_str = "👏"
words = text.split(" ")
clappy_text = f" {clap_str} ".join(words)
return clappy_text | 5,327,126 |
def apply_binary_str(
a: Union[pa.Array, pa.ChunkedArray],
b: Union[pa.Array, pa.ChunkedArray],
*,
func: Callable,
output_dtype,
parallel: bool = False,
):
"""
Apply an element-wise numba-jitted function on two Arrow columns.
The supplied function must return a numpy-compatible scalar.
Handling of missing data and chunking of the inputs is done automatically.
"""
if len(a) != len(b):
raise ValueError("Inputs don't have the same length.")
if isinstance(a, pa.ChunkedArray):
if isinstance(b, pa.ChunkedArray):
in_a_offsets, in_b_offsets = _combined_in_chunk_offsets(a, b)
new_chunks: List[pa.Array] = []
for a_offset, b_offset in zip(in_a_offsets, in_b_offsets):
a_slice = a.chunk(a_offset[0])[a_offset[1] : a_offset[1] + a_offset[2]]
b_slice = b.chunk(b_offset[0])[b_offset[1] : b_offset[1] + b_offset[2]]
new_chunks.append(
_apply_binary_str_array(
a_slice,
b_slice,
func=func,
output_dtype=output_dtype,
parallel=parallel,
)
)
return pa.chunked_array(new_chunks)
elif isinstance(b, pa.Array):
new_chunks = []
offsets = _calculate_chunk_offsets(a)
for chunk, offset in zip(a.iterchunks(), offsets):
new_chunks.append(
_apply_binary_str_array(
chunk,
b[offset : offset + len(chunk)],
func=func,
output_dtype=output_dtype,
parallel=parallel,
)
)
return pa.chunked_array(new_chunks)
else:
raise ValueError(f"left operand has unsupported type {type(b)}")
elif isinstance(a, pa.Array):
if isinstance(b, pa.ChunkedArray):
new_chunks = []
offsets = _calculate_chunk_offsets(b)
for chunk, offset in zip(b.iterchunks(), offsets):
new_chunks.append(
_apply_binary_str_array(
a[offset : offset + len(chunk)],
chunk,
func=func,
output_dtype=output_dtype,
parallel=parallel,
)
)
return pa.chunked_array(new_chunks)
elif isinstance(b, pa.Array):
return _apply_binary_str_array(
a, b, func=func, output_dtype=output_dtype, parallel=parallel
)
else:
raise ValueError(f"left operand has unsupported type {type(b)}")
else:
raise ValueError(f"left operand has unsupported type {type(a)}") | 5,327,127 |
def product_review(product_id: str):
"""
Shows review statistics for a product.
Returns a python dictionary with content-type: application/json
"""
session = Session()
date = request.args.get('date') # parse a query string formatted as BIGINT unixReviewTime
# SELECT AVG(overall) AS average, COUNT(overall) AS total FROM reviews WHERE productID=<product_id> (AND unixReviewTime=date);
query_1 = (
session.query(
func.avg(reviews.columns.overall)
.label('average'),
func.count(reviews.columns.overall)
.label('total')
)
.filter(reviews.columns.productID==product_id)
)
if date:
query_1 = query_1.filter(reviews.columns.unixReviewTime==date)
query_1 = query_1.first()
# SELECT overall AS stars, COUNT(overall) AS count FROM reviews WHERE productID=<product_id> (AND unixReviewTime=date) GROUP BY overall;
query_2 = (
session.query(
reviews.columns.overall
.label('stars'),
func.count(reviews.columns.overall)
.label('count')
)
.filter(reviews.columns.productID==product_id)
)
if date:
query_2 = query_2.filter(reviews.columns.unixReviewTime==date)
query_2 = (
query_2.group_by(reviews.columns.overall)
.all()
)
try:
json = {
"productID": product_id,
"average": round(query_1.average,1),
"percent_breakdown": {f"{int(row.stars)}_star": round((row.count*100)/query_1.total) for row in query_2},
"count_breakdown": {f"{int(row.stars)}_star": row.count for row in query_2},
"total": query_1.total
}
return json
except:
return Response("Error",404)
finally:
session.close() | 5,327,128 |
def remove_extra_two_spaces(text: str) -> str:
"""Replaces two consecutive spaces with one wherever they occur in a text"""
return text.replace(" ", " ") | 5,327,129 |
def reflect_table(table_name, engine):
"""
Gets the table with the given name from the sqlalchemy engine.
Args:
table_name (str): Name of the table to extract.
engine (sqlalchemy.engine.base.Engine): Engine to extract from.
Returns:
table (sqlalchemy.ext.declarative.api.DeclarativeMeta): The extracted table, which can be now be used to read from the database.
"""
meta = MetaData()
table = Table(table_name, meta, autoload=True, autoload_with=engine)
return table | 5,327,130 |
def load_utt_list(utt_list):
"""Load a list of utterances.
Args:
utt_list (str): path to a file containing a list of utterances
Returns:
List[str]: list of utterances
"""
with open(utt_list) as f:
utt_ids = f.readlines()
utt_ids = map(lambda utt_id: utt_id.strip(), utt_ids)
utt_ids = filter(lambda utt_id: len(utt_id) > 0, utt_ids)
return list(utt_ids) | 5,327,131 |
def kfunc_vals(points, area):
"""
Input
points: a list of Point objects
area: an Extent object
Return
ds: list of radii
lds: L(d) values for each radius in ds
"""
# This function is taken from kfunction file in spatialanalysis library
n = len(points)
density = n/area.area()
t = kdtree2(points)
d = min([area.xmax-area.xmin,area.ymax-area.ymin])*2/3/10
ds = [ d*(i+1) for i in range(10)]
lds = [0 for d in ds]
for i, d in enumerate(ds):
for p in points:
ld = kfunc(t, p, d, density)[1]
lds[i] += ld
lds = [ld/n for ld in lds]
return ds, lds | 5,327,132 |
def test_step():
"""
Test step function
"""
ros_rate = 20
ros_max_ita = 20 * 32
laser_z = 0.086 + 0.043 + 0.035
step_point_list = [[0.2, laser_z], [0.2, laser_z+0.2], [0.2, laser_z+0.4],
[0.0, laser_z+0.2], [-0.2, laser_z+0.4],
[-0.2, laser_z+0.2], [-0.2, laser_z], [0, laser_z]]
step_point_time = 4 * ros_rate
servo_pos_list = np.zeros((2, ros_max_ita))
for ii, step_point in enumerate(step_point_list):
tmp_pan, tmp_tilt = compute_laser_pointer_inverse_kinematic(step_point[0], step_point[1])
servo_pos_list[0, ii*step_point_time:(ii+1)*step_point_time] = tmp_pan
servo_pos_list[1, ii * step_point_time:(ii + 1) * step_point_time] = -tmp_tilt
servo_spd = 4.0
laser_point_controller(servo_pos_list, servo_spd, ros_max_ita, ros_rate) | 5,327,133 |
async def get_locations():
"""
Retrieves the locations from the categories. The locations are cached for 1 hour.
:returns: The locations.
:rtype: List[Location]
"""
# Get all of the data categories locations.
confirmed = await get_category("confirmed")
deaths = await get_category("deaths")
# recovered = await get_category("recovered")
locations_confirmed = confirmed["locations"]
locations_deaths = deaths["locations"]
# locations_recovered = recovered["locations"]
# Final locations to return.
locations = []
# Go through locations.
for index, location in enumerate(locations_confirmed):
# Get the timelines.
timelines = {
"confirmed": locations_confirmed[index]["history"],
"deaths": locations_deaths[index]["history"],
# 'recovered' : locations_recovered[index]['history'],
}
# Grab coordinates.
coordinates = location["coordinates"]
# Create location (supporting timelines) and append.
locations.append(
TimelinedLocation(
# General info.
index,
location["country"],
location["province"],
# Coordinates.
Coordinates(coordinates["lat"], coordinates["long"]),
# Last update.
datetime.utcnow().isoformat() + "Z",
# Timelines (parse dates as ISO).
{
"confirmed": Timeline(
{
datetime.strptime(date, "%m/%d/%y").isoformat() + "Z": amount
for date, amount in timelines["confirmed"].items()
}
),
"deaths": Timeline(
{
datetime.strptime(date, "%m/%d/%y").isoformat() + "Z": amount
for date, amount in timelines["deaths"].items()
}
),
"recovered": Timeline({}),
},
)
)
# Finally, return the locations.
return locations | 5,327,134 |
def corrupted_pdf(papers):
"""
Do a simple convertion of pdf into txt.
Parameters
----------
papers : List with corrupted papers (directory tree)
DESCRIPTION.
Returns
-------
None.
"""
os.chdir(f'{root}/dados/txt_filesv3')
for paper in papers:
rsrcmgr = PDFResourceManager()
retstr = StringIO()
codec = 'utf-8'
laparams = LAParams()
device = TextConverter(rsrcmgr, retstr,
codec=codec, laparams=laparams)
fp = open(paper, 'rb')
interpreter = PDFPageInterpreter(rsrcmgr, device)
password = ""
maxpages = 0
caching = True
pagenos = set()
for page in PDFPage.get_pages(fp, pagenos,
maxpages=maxpages,
password=password,
caching=caching,
check_extractable=True):
interpreter.process_page(page)
text = retstr.getvalue()
fp.close()
device.close()
retstr.close()
nome = re.search(r'(doc[0-9]{3}_[0-9]{4})', paper).group() + '.txt'
with open(nome, 'w', encoding='utf-8') as f:
f.write(text)
f.close()
os.chdir(root) | 5,327,135 |
def MDAPE(y_true, y_pred, multioutput='raw_values'):
"""
calculate Median Absolute Percentage Error (MDAPE).
:param y_true: array-like of shape = (n_samples, *)
Ground truth (correct) target values.
:param y_pred: array-like of shape = (n_samples, *)
Estimated target values.
:param multioutput: string in ['raw_values', 'uniform_average']
:return:float or ndarray of floats
A non-negative floating point value (the best value is 0.0), or an
array of floating point values, one for each individual target.
"""
y_true, y_pred, original_shape = _standardize_input(y_true, y_pred, multioutput)
output_errors = np.median(100 * np.abs((y_true - y_pred) / (y_true + EPSILON)), axis=0,)
if multioutput == 'raw_values':
return output_errors.reshape(original_shape)
return np.mean(output_errors) | 5,327,136 |
def list_ports():
"""List the available serial ports."""
for comport in serial.tools.list_ports.comports():
print(comport) | 5,327,137 |
def fast_spearman(x, y=None):
"""calculate the spearnab correlation matrix for the columns of x (MxN), or optionally, the spearmancorrelaton matrix between x and y (OxP).
In the language of statistics the columns are the variables and the rows are the observations.
Args:
x (numpy array-like) MxN in shape
y (optional, numpy array-like) OxP in shape
returns:
(numpy array-like) array of the covariance values
for defaults (y=None), shape is NxN
if y is provied, shape is NxP
"""
logger.debug("x.shape: {}".format(x.shape))
if hasattr(y, "shape"):
logger.debug("y.shape: {}".format(y.shape))
x_ranks = pandas.DataFrame(x).rank(method="average").values
logger.debug("some min and max ranks of x_ranks:\n{}\n{}".format(numpy.min(x_ranks[:10], axis=0), numpy.max(x_ranks[:10], axis=0)))
y_ranks = pandas.DataFrame(y).rank(method="average").values if y is not None else None
return fast_corr(x_ranks, y_ranks) | 5,327,138 |
def analyze_individual_category(k, cocoDt, cocoGt, catId, iou_type, areas=None):
"""针对某个特定类别,分析忽略亚类混淆和类别混淆时的准确率。
Refer to https://github.com/open-mmlab/mmdetection/blob/master/tools/analysis_tools/coco_error_analysis.py#L174
Args:
k (int): 待分析类别的序号。
cocoDt (pycocotols.coco.COCO): 按COCO类存放的预测结果。
cocoGt (pycocotols.coco.COCO): 按COCO类存放的真值。
catId (int): 待分析类别在数据集中的类别id。
iou_type (str): iou计算方式,若为检测框,则设置为'bbox',若为像素级分割结果,则设置为'segm'。
Returns:
int:
dict: 有关键字'ps_supercategory'和'ps_allcategory'。关键字'ps_supercategory'的键值是忽略亚类间
混淆时的准确率,关键字'ps_allcategory'的键值是忽略类别间混淆时的准确率。
"""
# matplotlib.use() must be called *before* pylab, matplotlib.pyplot,
# or matplotlib.backends is imported for the first time
# pycocotools import matplotlib
import matplotlib
matplotlib.use('Agg')
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
nm = cocoGt.loadCats(catId)[0]
print(f'--------------analyzing {k + 1}-{nm["name"]}---------------')
ps_ = {}
dt = copy.deepcopy(cocoDt)
nm = cocoGt.loadCats(catId)[0]
imgIds = cocoGt.getImgIds()
dt_anns = dt.dataset['annotations']
select_dt_anns = []
for ann in dt_anns:
if ann['category_id'] == catId:
select_dt_anns.append(ann)
dt.dataset['annotations'] = select_dt_anns
dt.createIndex()
# compute precision but ignore superclass confusion
gt = copy.deepcopy(cocoGt)
child_catIds = gt.getCatIds(supNms=[nm['supercategory']])
for idx, ann in enumerate(gt.dataset['annotations']):
if ann['category_id'] in child_catIds and ann['category_id'] != catId:
gt.dataset['annotations'][idx]['ignore'] = 1
gt.dataset['annotations'][idx]['iscrowd'] = 1
gt.dataset['annotations'][idx]['category_id'] = catId
cocoEval = COCOeval(gt, copy.deepcopy(dt), iou_type)
cocoEval.params.imgIds = imgIds
cocoEval.params.maxDets = [100]
cocoEval.params.iouThrs = [0.1]
cocoEval.params.useCats = 1
if areas:
cocoEval.params.areaRng = [[0**2, areas[2]], [0**2, areas[0]],
[areas[0], areas[1]], [areas[1], areas[2]]]
cocoEval.evaluate()
cocoEval.accumulate()
ps_supercategory = cocoEval.eval['precision'][0, :, k, :, :]
ps_['ps_supercategory'] = ps_supercategory
# compute precision but ignore any class confusion
gt = copy.deepcopy(cocoGt)
for idx, ann in enumerate(gt.dataset['annotations']):
if ann['category_id'] != catId:
gt.dataset['annotations'][idx]['ignore'] = 1
gt.dataset['annotations'][idx]['iscrowd'] = 1
gt.dataset['annotations'][idx]['category_id'] = catId
cocoEval = COCOeval(gt, copy.deepcopy(dt), iou_type)
cocoEval.params.imgIds = imgIds
cocoEval.params.maxDets = [100]
cocoEval.params.iouThrs = [0.1]
cocoEval.params.useCats = 1
if areas:
cocoEval.params.areaRng = [[0**2, areas[2]], [0**2, areas[0]],
[areas[0], areas[1]], [areas[1], areas[2]]]
cocoEval.evaluate()
cocoEval.accumulate()
ps_allcategory = cocoEval.eval['precision'][0, :, k, :, :]
ps_['ps_allcategory'] = ps_allcategory
return k, ps_ | 5,327,139 |
def rename(blocks, scope, stype):
""" Rename all sub-blocks moved under another
block. (mixins)
Args:
lst (list): block list
scope (object): Scope object
"""
for p in blocks:
if isinstance(p, stype):
p.tokens[0].parse(scope)
if p.tokens[1]:
scope.push()
scope.current = p.tokens[0]
rename(p.tokens[1], scope, stype)
scope.pop() | 5,327,140 |
def read_line1(line):
"""! Function read_line1
Reads as argument a string formatted as a Line 1 in SEISAN's Nordic format
Returns a Hypocenter dataclass with all the fields in a SEISAN's Line 1
@param[in] line string with SEISAN's Nordic hypocenter format (Line 1)
@return Hypocenter dataclass
"""
if len(line) != 81:
print('ERROR: invalid line length')
if line[79] != '1':
print('ERROR: invalid line type')
year = int(line[1:5])
month = int(line[6:8])
day = int(line[8:10])
fixed_time = line[10] # 'F if origin time fixed'
hour = int(line[11:13])
minute = int(line[13:15])
if not line[16:20].isspace():
second = float(line[16:20])
else:
second = None
location_model = line[20]
distance_indicator = line[21]
event_type = line[22] # blank for earthquake, 'E' for explosion
if not line[23:30].isspace():
latitude = float(line[23:30])
else:
latitude = None
if not line[30:38].isspace():
longitude = float(line[30:38])
else:
longitude = None
if not line[38:43].isspace():
depth = float(line[38:43])
else:
depth = None
depth_indicator = line[43] # blank free depth, 'F' fixed, 'S' starting
locating_indicator = line[44] # blank free depth, 'F' fixed, 'S' starting, '*' do not locate
locating_agency = line[45:48]
if not line[48:51].isspace():
num_sta = int(line[48:51])
else:
num_sta = None
if not line[51:55].isspace():
rms = float(line[51:55])
else:
rms = None
if not line[55:59].isspace():
mag1 = float(line[55:59])
mag_type1 = 'M' + line[59]
mag_agency1 = line[60:63]
else:
mag1 = None
mag_type1 = ' '
mag_agency1 = ' '
if not line[63:67].isspace():
mag2 = float(line[63:67])
mag_type2 = 'M' + line[67]
mag_agency2 = line[68:71]
else:
mag2 = None
mag_type2 = ' '
mag_agency2 = ' '
if not line[71:75].isspace():
mag3 = float(line[71:75])
mag_type3 = 'M' + line[75]
mag_agency3 = line[76:79]
else:
mag3 = None
mag_type3 = ' '
mag_agency3 = ' '
return Hypocenter(year, month, day, fixed_time, hour, minute, second,
location_model, distance_indicator, event_type, latitude, longitude, depth, depth_indicator,
locating_indicator, locating_agency, num_sta, rms,
mag1, mag_type1, mag_agency1, mag2, mag_type2, mag_agency2, mag3, mag_type3, mag_agency3) | 5,327,141 |
def write_md_files():
"""Make a readme.md file for every module in its folder"""
for mdl in sheet.modules:
mdl.write_md() | 5,327,142 |
def ghidra_headless(address,
xml_file_path,
bin_file_path,
ghidra_headless_path,
ghidra_plugins_path):
"""
Call Ghidra in headless mode and run the plugin
FunctionDecompile.py to decompile the code of the function.
"""
try:
if not os.path.isfile(ghidra_headless_path):
print("GhIDA:: [!] ghidra analyzeHeadless not found.")
raise Exception("analyzeHeadless not found")
decompiled_code = None
idaapi.show_wait_box("Ghida decompilation started")
prefix = "%s_" % address
output_temp = tempfile.NamedTemporaryFile(prefix=prefix, delete=False)
output_path = output_temp.name
# print("GhIDA:: [DEBUG] output_path: %s" % output_path)
output_temp.close()
cmd = [ghidra_headless_path,
".",
"Temp",
"-import",
xml_file_path,
'-scriptPath',
ghidra_plugins_path,
'-postScript',
'FunctionDecompile.py',
address,
output_path,
"-noanalysis",
"-deleteProject"]
# Options to 'safely' terminate the process
if os.name == 'posix':
kwargs = {
'preexec_fn': os.setsid
}
else:
kwargs = {
'creationflags': subprocess.CREATE_NEW_PROCESS_GROUP,
'shell': True
}
p = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
**kwargs)
stop = False
counter = 0
print("GhIDA:: [INFO] Ghidra headless (timeout: %ds)" % TIMEOUT)
print("GhIDA:: [INFO] Waiting Ghidra headless analysis to finish...")
while not stop:
time.sleep(SLEEP_LENGTH)
counter += 1
subprocess.Popen.poll(p)
# Process terminated
if p.returncode is not None:
stop = True
print("GhIDA:: [INFO] Ghidra analysis completed!")
continue
# User terminated action
if idaapi.user_cancelled():
# Termiante the process!
terminate_process(p.pid)
stop = True
print("GhIDA:: [!] Ghidra analysis interrupted.")
continue
# Process timeout
if counter > COUNTER_MAX:
terminate_process(p.pid)
stop = True
print("GhIDA:: [!] Decompilation error - timeout reached")
continue
# Check if JSON response is available
if os.path.isfile(output_path):
with open(output_path) as f_in:
j = json.load(f_in)
if j['status'] == "completed":
decompiled_code = j['decompiled']
else:
print("GhIDA:: [!] Decompilation error -",
" JSON response is malformed")
# Remove the temporary JSON response file
os.remove(output_path)
else:
print("GhIDA:: [!] Decompilation error - JSON response not found")
idaapi.warning("Ghidra headless decompilation error")
except Exception as e:
print("GhIDA:: [!] %s" % e)
print("GhIDA:: [!] Ghidra headless analysis failed")
idaapi.warning("Ghidra headless analysis failed")
decompiled_code = None
finally:
idaapi.hide_wait_box()
return decompiled_code | 5,327,143 |
def session_scope():
"""When accessing the database, use the following syntax:
>>> with session_scope() as session:
>>> session.query(...)
:return: the session for accessing the database.
"""
session_obj = scoped_session(DBSession)
session = session_obj()
try:
yield session
session.commit()
except exc.OperationalError:
session.rollback()
time.sleep(0.5 + random.random())
session.commit()
except Exception as e:
session.rollback()
print('No commit has been made, due to the following error: {}'.format(e))
finally:
session.close() | 5,327,144 |
def write_shellchoice_file(home_directory: Path, configuration: Any) -> None:
"""Write the shell choice configuration file."""
file_path = Path(os.path.join(home_directory, ".shellchoice"))
if does_file_exist(file_path):
print("... exists, not creating")
return
print("... creating file")
shell_path = configuration["user"].get("shell", "/bin/bash")
contents = f"""export SHELL={shell_path}"""
with open(file_path, "w") as file:
file.write(contents) | 5,327,145 |
def run(parsed_args):
"""Execute release task creation
parsed_args: command line arguemnts"""
assert parsed_args.server
assert parsed_args.user
assert parsed_args.password
assert parsed_args.query
assert parsed_args.max_results
verification_steps_threshold = 0
verification_results_threshold = 0
if not parsed_args.dry_run:
jira = client.JIRA(server=parsed_args.server,
basic_auth=(parsed_args.user, parsed_args.password))
issues = jira.search_issues(parsed_args.query,
maxResults=parsed_args.max_results)
print('len(issues): %s' % len(issues))
output_file = None
if parsed_args.output:
output_file = open(parsed_args.output, 'w')
if output_file:
output_file.write('key,len(verification_steps),'
'len(verification_results),review\n')
else:
print('key,len(verification_steps),len(verification_results),review')
for i in issues:
verification_results = i.fields.customfield_14210 or []
verification_steps = i.fields.customfield_13913 or []
review = False
try:
comments = jira.comments(i)
except:
comments = []
for c in comments:
if 'review.metacloud.in' in c.body:
review = True
output_line = '%s,%s,%s,%s\n' % (
i.key,
len(verification_steps) > verification_steps_threshold,
len(verification_results) > verification_results_threshold,
review)
if output_file:
output_file.write(output_line)
else:
print(output_line) | 5,327,146 |
def validate_id(
endpoint_name,
type_id,
cache_buster=False,
config=api_config.CONFIG,
logger=logging.getLogger('publicAPI'),
):
"""Check EVE Online CREST as source-of-truth for id lookup
Args:
endpoint_name (str): desired endpoint for data lookup
type_id (int): id value to look up at endpoint (NOTE: only SDE replacement)
cache_buster (bool, optional): skip caching, fetch from internet
config (:obj:`prosper.common.ProsperConfig`): configuration object
logger (:obj:`logging.logger`): logging handle
Returns:
int: HTTP status code for error validation
"""
## Check local cache for value ##
try:
db_handle = setup_cache_file(endpoint_name)
except Exception as err_msg: # pragma: no cover
logger.error(
'ERROR: unable to connect to local tinyDB cache' +
'\n\tendpoint_name: {0}'.format(endpoint_name) +
'\n\tcache_path: {0}'.format(CACHE_PATH),
exc_info=True
)
if not cache_buster:
logger.info('--searching cache for id: %s', type_id)
logger.debug('endpoint_name=%s', endpoint_name)
logger.debug('type_id=%s', type_id)
cache_time = datetime.utcnow().timestamp() - int(config.get('CACHING', 'sde_cache_limit'))
cache_val = db_handle.search(
(Query().cache_datetime >= cache_time) &
(Query().index_key == type_id)
)
if cache_val:
logger.info('--found type_id cache for id: {0}'.format(type_id))
logger.debug(cache_val)
return cache_val[0]['payload'] #skip CREST
## Request info from CREST ##
logger.info('--fetching CREST ID information')
logger.debug('endpoint_name=%s', endpoint_name)
logger.debug('type_id=%s', type_id)
try:
kwarg_pair = endpoint_to_kwarg(
endpoint_name,
type_id
)
type_info = None
type_info = fetch_esi_endpoint(
endpoint_name,
**kwarg_pair,
config=config
)
except Exception as err_msg:
logger.warning(
'ERROR: unable to connect to CREST' +
'\n\tendpoint_name: {0}'.format(endpoint_name) +
'\n\ttype_id: {0}'.format(type_id),
exc_info=True
)
raise exceptions.IDValidationError(
status=404,
message='Unable to validate {0}:{1}'.format(
endpoint_name,
type_id
)
)
## Update cache ##
logger.info('--updating cache')
try:
write_cache_entry(
db_handle,
type_id,
type_info
)
except Exception as err_msg: # pragma: no cover
logger.error(
'ERROR: unable to write to cache' +
'\n\ttype_id: {0}'.format(type_id) +
'\n\ttype_info: {0}'.format(type_info),
exc_info=True
)
db_handle.close()
return type_info | 5,327,147 |
def init_server_socket() -> socket.socket:
"""Initialize and bind the server unix socket."""
socket_address = get_socket_address()
try:
os.unlink(socket_address)
except (OSError, EnvironmentError):
pass
sock = socket.socket(family=socket.AF_UNIX, type=socket.SOCK_DGRAM)
sock.bind(socket_address)
sock.settimeout(1)
return sock | 5,327,148 |
def test_async_request(upload_test_sector_scenario):
"""
Tests async_request() function
"""
cmd = reset_simulation()
assert cmd == True
upload_test_sector_scenario()
# Get the position
position = all_positions()
acid1, acid2 = position.index
commands = []
commands.append(
async_change_altitude(aircraft_id=acid1, flight_level=100)
)
commands.append(async_change_heading(aircraft_id=acid1, heading=90))
results = batch(commands)
assert results == True
resp = simulation_step()
assert resp == True
new_position = all_positions()
assert new_position.loc[acid1, "current_flight_level"] < position.loc[acid1, "current_flight_level"]
assert new_position.loc[acid1, "longitude"] > position.loc[acid1, "longitude"]
# send more commands - return to original values
more_commands = []
more_commands.append(
async_change_altitude(aircraft_id=acid1, flight_level=400)
)
more_commands.append(async_change_speed(aircraft_id=acid1, speed=100))
results = batch(more_commands)
assert results == True
# send an invalid and a valid command
commands_wrong = []
commands_wrong.append(async_change_heading(aircraft_id=acid1, heading=0))
commands_wrong.append(async_change_speed(aircraft_id=acid1, speed=-5))
with pytest.raises(Exception):
results = batch(commands_wrong) | 5,327,149 |
def getInfo_insert(sql : str, tableInfo : table_info_module.TableInfo) -> tuple:
"""테이블 이름과 컬럼을 반환합니다."""
sql = string_module.removeNoise(sql)
tableName = string_module.getParenthesesContext2(sql, "INSERT INTO ", " ")
columns = tableInfo[tableName]
return (tableName, columns) | 5,327,150 |
def compute_flow_for_supervised_loss(
feature_model,
flow_model,
batch,
training
):
"""Compute flow for an image batch.
Args:
feature_model: A model to compute features for flow.
flow_model: A model to compute flow.
batch: A tf.tensor of shape [b, seq, h, w, c] holding a batch of triplets.
training: bool that tells the model to use training or inference code.
Returns:
A tuple consisting of the images, the extracted features, the estimated
flows, and the upsampled refined flows.
"""
feature_dict = feature_model(batch[:, 0],
batch[:, 1],
training=training)
return flow_model(feature_dict, training=training) | 5,327,151 |
def _get_filename_from_request(request):
"""
Gets the filename from an url request.
:param request: url request to get filename from
:type request: urllib.requests.Request or urllib2.Request
:rtype: str
"""
try:
headers = request.headers
content = headers["content-disposition"]
filename_str = content.split("filename=")[1]
return filename_str.strip("\"")
except (KeyError, AttributeError):
return os.path.basename(request.url) | 5,327,152 |
def farey_sequence(n):
"""Return the nth Farey sequence as order pairs of the form (N,D) where `N' is the numerator and `D' is the denominator."""
a, b, c, d = 0, 1, 1, n
sequence=[(a,b)]
while (c <= n):
k = int((n + b) / d)
a, b, c, d = c, d, (k*c-a), (k*d-b)
sequence.append( (a,b) )
return sequence | 5,327,153 |
def test_texture():
"""Test adding texture coordinates"""
# create a rectangle vertices
vertices = np.array([[0, 0, 0],
[1, 0, 0],
[1, 0.5, 0],
[0, 0.5, 0],])
# mesh faces
faces = np.hstack([[3, 0, 1, 2],
[3, 0, 3, 2]]).astype(np.int8)
# Create simple texture coordinates
t_coords = np.array([[0, 0],
[1, 0],
[1, 1],
[0, 1]])
# Create the poly data
mesh = vtki.PolyData(vertices, faces)
# Attempt setting the texture coordinates
mesh.t_coords = t_coords
# now grab the texture coordinates
foo = mesh.t_coords
assert np.allclose(foo, t_coords) | 5,327,154 |
def make_vgg19_block(block):
"""Builds a vgg19 block from a dictionary
Args:
block: a dictionary
"""
layers = []
for i in range(len(block)):
one_ = block[i]
for k, v in one_.items():
if 'pool' in k:
layers += [nn.MaxPool2d(kernel_size=v[0], stride=v[1],
padding=v[2])]
else:
conv2d = nn.Conv2d(in_channels=v[0], out_channels=v[1],
kernel_size=v[2], stride=v[3],
padding=v[4])
layers += [conv2d, nn.ReLU(inplace=True)]
return nn.Sequential(*layers) | 5,327,155 |
def process_one(f, mesh_directory, dataset_directory, skip_existing, log_level):
"""Processes a single mesh, adding it to the dataset."""
relpath = f.replace(mesh_directory, '')
print('relpath:', relpath)
assert relpath[0] == '/'
relpath = relpath[1:]
split, synset = relpath.split('/')[:2]
log.verbose(f'The split is {split} and the synset is {synset}')
name = os.path.basename(f)
name, extension = os.path.splitext(name)
valid_extensions = ['.ply']
if extension not in valid_extensions:
raise ValueError(f'File with unsupported extension {extension} found: {f}.'
f' Only {valid_extensions} are supported.')
output_dir = f'{dataset_directory}/{split}/{synset}/{name}/'
# This is the last file the processing writes, if it already exists the
# example has already been processed.
final_file_written = f'{output_dir}/depth_and_normals.npz'
make_example.mesh_to_example(
os.path.join(path_util.get_path_to_ldif_parent(), 'ldif'), f,
f'{dataset_directory}/{split}/{synset}/{name}/', skip_existing, log_level)
return output_dir | 5,327,156 |
def test_client_route(mock_request, mock_connect):
"""Should return RouteResult instance."""
mock_connect.return_value = "some-token", 1234567
mock_request.return_value = MagicMock(
status_code=status.HTTP_200_OK,
data={
"status_message": "Found route between points",
"alternative_names": [
["COMMONWEALTH AVENUE WEST", "NORTH BUONA VISTA ROAD"]
],
"route_name": ["CLEMENTI AVENUE 2", "ULU PANDAN ROAD"],
"route_geometry": (
"yr`oAm`k{dEksAstD~e@iW`e@{UxtAqr@pd@sVrOmItC}GZ}GJwDeSmWkm@gb@qKuEyCw"
"E}AgHJiH\\kE{BaRoCoEsGcLiE{N{AmQvB{QbFkN|E}FzMcPtQmTh|A_iBfCcDzHcKpJa"
"Mr\\w_@t\\i`@hb@gg@lAkJRqJg@wJeCoMgQ{f@qHsTuC_FiMsT_S_ViVkPkfAyi@oXiN"
"q{@q_@qn@cU{SsGgEqAiDeAcTsGcd@eMoF{AoBi@uGkB}d@uMwDoA_EsA{QiG_VyJaSkL"
"kQuN}CgDqJkKqDsFqE_H}CuE}CyEsBsGcDeKuK}f@}FiJ_FaEkKiEgHcAe~@xMsr@`LqM"
"rB_En@gAy`@kBkVwE{W_^gbAkHg[aFeQaRe^_Nea@iEwYJkYsAyj@KiRkGglAcDqn@KiU"
"rDkc@nFkY`Lo]lIeQfJgOfcAyhAzJ}KtPsTjIuQxFaQrBcN|E{u@rDgh@hBuYjDy_@zHo"
"UbI}O|PwSkDuBiP_K{]cTq_Ack@ixAe|@_L}G{LoHynBujAsh@iZiRqK}|@ig@xg@wo@v"
"{@_gA~q@g}@fUgZp^{`@gDqLv`@oNfTwH~LcIl@gEy@{PqU_V_`@cuAvHwJt^_MvXgMxC"
"aD"
),
"route_instructions": [
["10", "PANDAN LOOP", 853, 0, 89, "853m", "NE", 65, 1, "SW", 245]
],
"alternative_summaries": [
{
"end_point": "REBECCA ROAD",
"start_point": "PANDAN LOOP",
"total_time": 761,
"total_distance": 8133,
}
],
"via_points": [[1.311549, 103.749657], [1.32036, 103.800156]],
"route_summary": {
"end_point": "REBECCA ROAD",
"start_point": "PANDAN LOOP",
"total_time": 740,
"total_distance": 7957,
},
"found_alternative": True,
"status": 200,
"via_indices": [0, 140],
"hint_data": {
"locations": [
"NzgBANtqAQBRBQAAAAAAAAQAAAAAAAAAuQIAAEOcAABoAAAAPQMUABcYLwYAAAEB",
"0OUAAF4zAQChAwAABAAAAAwAAABIAAAAdQAAACx9AABoAAAAqCUUAFndLwYCAAEB",
],
"checksum": 585417468,
},
"alternative_geometries": [
(
"yr`oAm`k{dEksAstD~e@iW`e@{UxtAqr@pd@sVrOmItC}GZ}GJwDeSmWkm@gb@qKu"
"EyCwE}AgHJiH\\kE{BaRoCoEsGcLiE{N{AmQvB{QbFkN|E}FzMcPtQmTh|A_iBfCc"
"DzHcKpJaMr\\w_@t\\i`@hb@gg@lAkJRqJg@wJeCoMgQ{f@qHsTuC_FiMsT_S_ViV"
"kPkfAyi@oXiNq{@q_@qn@cU{SsGgEqAiDeAcTsGcd@eMoF{AoBi@uGkB}d@uMwDoA"
"_EsA{QiG_VyJaSkLkQuN}CgDqJkKqDsFqE_H}CuE}CyEsBsGcDeKuK}f@}FiJ_FaE"
"kKiEgHcAe~@xMsr@`LqMrB_En@gAy`@kBkVwE{W_^gbAkHg[aFeQaRe^_Nea@iEwY"
"JkYsAyj@KiRkGglAcDqn@KiUrDkc@nFkY`Lo]lIeQfJgOfcAyhAzJ}KtPsTjIuQxF"
"aQrBcN|E{u@rDgh@hBuYjDy_@zHoUbI}O|PwSkDuBiP_K{]cTq_Ack@ixAe|@_L}G"
"{LoHynBujAsh@iZiRqK}|@ig@xg@wo@v{@_gA~q@g}@fUgZp^{`@gDqLv`@oNfTwH"
"~LcIl@gEy@{PqU_V_`@cuAvHwJt^_MvXgMxCaD"
)
],
"alternative_instructions": [
[
["10", "PANDAN LOOP", 853, 0, 89, "853m", "NE", 65, 1, "SW", 245],
["8", "JALAN BUROH", 217, 9, 23, "217m", "NE", 50, 1, "SW", 230],
["1", "WEST COAST HIGHWAY", 62, 14, 7, "61m", "E", 92, 1, "W", 272],
]
],
"alternative_indices": [0, 159],
},
)
route_result = OneMap("email@example.com", "password").route(
"1.23,1.01", "1.01,1.23", "drive"
)
assert isinstance(route_result, response.RouteResult) | 5,327,157 |
def qsl_ranking(call, key, qsl):
""" Add qsl information to either sent or received (determined by key)
qsl information is ranked and higher one is kept
the order is: direct$ > direct > bureau
"""
oqsl = getattr(call, key, None)
for q in qsl_types:
if q == qsl or q == oqsl:
call[key] = q
break | 5,327,158 |
def tld():
"""
Return a random tld (Top Level Domain) from the tlds list below
:return: str
"""
tlds = ('com', 'org', 'edu', 'gov', 'co.uk', 'net', 'io', 'ru', 'eu',)
return pickone(tlds) | 5,327,159 |
def validate_boolean(option, value):
"""Validates that 'value' is 'true' or 'false'.
"""
if isinstance(value, bool):
return value
elif isinstance(value, basestring):
if value not in ('true', 'false'):
raise ConfigurationError("The value of '%s' must be "
"'true' or 'false'" % (option,))
return value == 'true'
raise TypeError("Wrong type for %s, value must "
"be a boolean or string representation" % (option,)) | 5,327,160 |
def A2RT(room_size, A_wall_all, F_abs, c=343, A_air=None, estimator='Norris_Eyring'):
""" Estimate reverberation time based on room acoustic parameters,
translated from matlab code developed by Douglas R Campbell
Args:
room_size: three-dimension measurement of shoebox room
A_wall_all: sound absorption coefficients of six wall surfaces
c: sound speed, default to 343 m/s
F_abs: center frequency of each frequency band
A_air: absorption coefficients of air, if not specified, it will
calculated based on humidity of 50
estimator: estimate methods, choose from [Sabine,SabineAir,
SabineAirHiAbs,Norris_Eyring], default to Norris_Eyring
"""
if A_air is None:
humidity = 50
A_air = (5.5e-4)*(50/humidity)*((F_abs/1000)**1.7)
Lx, Ly, Lz = room_size
V_room = np.prod(room_size) # Volume of room m^3
S_wall_all = [Lx*Lz, Ly*Lz, Lx*Ly]
S_room = 2.*np.sum(S_wall_all) # Total area of shoebox room surfaces
# Effective absorbing area of room surfaces at each frequency
Se = (S_wall_all[1]*(A_wall_all[:, 0] + A_wall_all[:, 1])
+ S_wall_all[0]*(A_wall_all[:, 2] + A_wall_all[:, 3])
+ S_wall_all[2]*(A_wall_all[:, 4] + A_wall_all[:, 5]))
A_mean = Se/S_room # Mean absorption of wall surfaces
# Mean absorption of air averaged across frequency.
# A_air_mean = np.mean(A_air)
# Mean Free Path (Average distance between succesive reflections) (Ref A4)
# MFP = 4*V_room/S_room
# Reverberation time estimate
# Detect anechoic case and force RT60 all zeros
if np.linalg.norm(1-A_mean) < EPSILON:
RT60 = np.zeros(F_abs.shape)
else: # Select an estimation equation
if estimator == 'Sabine':
RT60 = np.divide((55.25/c)*V_room, Se) # Sabine equation
if estimator == 'SabineAir':
# Sabine equation (SI units) adjusted for air
RT60 = np.divide((55.25/c)*V_room, (4*A_air*V_room+Se))
if estimator == 'SabineAirHiAbs':
# % Sabine equation (SI units) adjusted for air and high absorption
RT60 = np.divide(55.25/c*V_room,
4*A_air*V_room+np.multiply(Se, (1+A_mean/2)))
if estimator == 'Norris_Eyring':
# Norris-Eyring estimate adjusted for air absorption
RT60 = np.divide(55.25/c*V_room,
4*A_air*V_room-S_room*np.log(1-A_mean+EPSILON))
return RT60 | 5,327,161 |
def GetVideoFromRate(content):
"""
从视频搜索源码页面提取视频信息
"""
#av号和标题
regular1 = r'<a href="/video/av(\d+)/" target="_blank" class="title" [^>]*>(.*)</a>'
info1 = GetRE(content, regular1)
#观看数
regular2 = r'<i class="b-icon b-icon-v-play" title=".+"></i><span number="([^"]+)">\1</span>'
info2 = GetRE(content, regular2)
#收藏
regular3 = r'<i class="b-icon b-icon-v-fav" title=".+"></i><span number="([^"]+)">\1</span></span>'
info3 = GetRE(content, regular3)
#弹幕
regular4 = r'<i class="b-icon b-icon-v-dm" title=".+"></i><span number="([^"]+)">\1</span>'
info4 = GetRE(content, regular4)
#日期
regular5 = r'<span class="v-date" title=".+">(.+)</span>'
info5 = GetRE(content, regular5)
#封面
regular6 = r'<img data-img="(.+)" [^>]*>'
info6 = GetRE(content, regular6)
#Up的id和名字
regular7 = r'<a class="v-author" href=".+/(\d+).+">(.+)</a>'
info7 = GetRE(content, regular7)
#!!!!!!!!这里可以断言所有信息长度相等
videoNum = len(info1) #视频长度
videoList = []
for i in range(videoNum):
video_t = Video()
video_t.aid = getint(info1[i][0])
video_t.title = info1[i][1]
video_t.guankan = getint(info2[i])
video_t.shoucang = getint(info3[i])
video_t.danmu = getint(info4[i])
video_t.date = info5[i]
video_t.cover = info6[i]
video_t.author = User(info7[i][0], info7[i][1])
videoList.append(video_t)
return videoList | 5,327,162 |
def PPVfn(Mw, fc, Rho, V):
"""Calculates the peak-particle-velocity (PPV) at the source
for a given homogeneous density and velocity model.
:param Mw: the moment magnitude
:type Mw: float
:param fc: the corner frequency in Hz
:type fc: float
:param Rho: Density at the source in kg/m**3
:type Rho: float
:param V: Seismic velocity at the source in m/s
:type V: float
:returns: the PPV
:rtype: float
"""
M0 = Mw2M0(Mw) # the seismic moment
w0 = 2 * np.pi * fc
PPV = w0 ** 2 * M0 / (4 * np.pi * Rho * (V ** 3))
return PPV | 5,327,163 |
def extract_feature_label(feat_path, lab_path, audio_sr=22050, hop_size=1024):
"""Basic feature extraction block.
Parameters
----------
feat_path: Path
Path to the raw feature folder.
lab_path: Path
Path to the corresponding label folder.
audio_sr: int
sampling rate, default=22050.
hop_size: int
number of samples between successive CQT columns, default=1024.
Returns
-------
data:
Processed data
"""
label = load_label(lab_path)
feature = load_feature(feat_path)
pitch_shift = int(feat_path.replace(".npy", "").split('_pitch_shift=')[-1])
beatles_id = feat_path.replace(".npy", "")
n_frames = feature.shape[0]
# Get frame-wise labels
chords = np.zeros(n_frames, dtype=np.int32)
for lab in label:
onset_idx = int(lab['onset'] * audio_sr / hop_size)
end_idx = int(math.ceil(lab['end'] * audio_sr / hop_size))
chord = CHORD_INT_MAPPING_2[lab['chord']]
chords[onset_idx:end_idx] = chord
# Chord labels modulation
chords_shift = _shift_chord_labels(chords, pitch_shift)
# Chord transition
transition = _get_chord_transition(chords_shift)
data = {'cqt': feature, 'chord': chords_shift, 'transition': transition}
# Reshape
data = reshape_data(data, beatles_id)
return data | 5,327,164 |
def includeme(config):
"""register custom datatables"""
config.register_datatable('parameters', Concepts)
config.register_datatable('languages', Languages)
config.register_datatable('values', Words) | 5,327,165 |
def compute_generic_stats(graph):
"""
Compute generic statistic for a graph
:param graph:
:return:
"""
num_nodes = 0
for g in graph:
num_nodes += g.number_of_nodes()
if len(graph) > 0:
num_nodes /= len(graph)
fmtl_print('Average number of nodes (graph size {})'.format(len(graph)), num_nodes) | 5,327,166 |
def get_local_episodes(anime_folder, name):
"""return a list of files of a anime-folder inside ANIME_FOLDER"""
episodes = []
name = name.replace("'", "_")
path = os.path.join(anime_folder, name)
if not os.path.isdir(path):
os.makedirs(path)
return episodes
for episode in os.listdir(path):
ep_path = os.path.join(path, episode)
if os.path.isfile(ep_path):
anime = parse_name(episode)
anime["size"] = os.stat(ep_path).st_size
episodes.append(anime)
return episodes | 5,327,167 |
def _find_pkg_info(directory):
"""find and return the full path to a PKG-INFO file or None if not found"""
for root, dirs, files in os.walk(directory):
for filename in files:
if filename == 'PKG-INFO':
return os.path.join(root, filename)
# no PKG-INFO file found
return None | 5,327,168 |
def get_m3u8_url(text):
# type: (str) -> Union[str, None]
"""Attempts to get the first m3u8 url from the given string"""
m3u8 = re.search(r"https[^\"]*\.m3u8", text)
sig = re.search(r"(\?sig=[^\"]*)", text)
if m3u8 and sig:
return "{}{}".format(clean_uri(m3u8.group()), sig.group())
return None | 5,327,169 |
def xavier_init(fan_in, fan_out, constant=1):
""" Xavier initialization of network weights"""
# https://stackoverflow.com/questions/33640581/how-to-do-xavier-initialization-on-tensorflow
low = -constant*np.sqrt(6.0/(fan_in + fan_out))
high = constant*np.sqrt(6.0/(fan_in + fan_out))
return tf.random_uniform((fan_in, fan_out), minval=low, maxval=high, dtype=tf.float32) | 5,327,170 |
def object_bbox_flip(
bbox: remote_blob_util.BlobDef,
image_size: remote_blob_util.BlobDef,
flip_code: Union[int, remote_blob_util.BlobDef],
name: Optional[str] = None,
) -> remote_blob_util.BlobDef:
"""This operator flips the object bounding box.
The flip code corresponds to the different flip mode:
0 (0x00): Non Flip
1 (0x01): Horizontal Flip
16 (0x10): Vertical Flip
17 (0x11): Both Horizontal and Vertical Flip
Args:
bbox (BlobDef): The bounding box.
image_size (BlobDef): The size of input image.
flip_code (Union[int, BlobDef]): The flip code.
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
BlobDef: The result Blob
For example:
.. code-block:: python
import numpy as np
import oneflow as flow
import oneflow.typing as tp
def _of_object_bbox_flip(bbox_list, image_size, flip_code):
bbox_shape = _get_bbox_static_shape(bbox_list)
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
func_config.default_logical_view(flow.scope.mirrored_view())
@flow.global_function(function_config=func_config)
def object_bbox_flip_job(
bbox_def: tp.ListListNumpy.Placeholder(
shape=tuple(bbox_shape), dtype=flow.float
),
image_size_def: tp.ListNumpy.Placeholder(
shape=image_size.shape, dtype=flow.int32
),
) -> tp.ListListNumpy:
bbox_buffer = flow.tensor_list_to_tensor_buffer(bbox_def)
flip_bbox = flow.object_bbox_flip(bbox_buffer, image_size_def, flip_code)
return flow.tensor_buffer_to_tensor_list(
flip_bbox, shape=bbox_shape[1:], dtype=flow.float
)
input_bbox_list = [np.expand_dims(bbox, axis=0) for bbox in bbox_list]
bbox_tensor = object_bbox_flip_job([input_bbox_list], [image_size])
return bbox_tensor[0]
def _get_bbox_static_shape(bbox_list):
bbox_shapes = [bbox.shape for bbox in bbox_list]
bbox_static_shape = np.amax(bbox_shapes, axis=0)
assert isinstance(
bbox_static_shape, np.ndarray
), "bbox_shapes: {}, bbox_static_shape: {}".format(
str(bbox_shapes), str(bbox_static_shape)
)
bbox_static_shape = bbox_static_shape.tolist()
bbox_static_shape.insert(0, len(bbox_list))
return bbox_static_shape
if __name__ == "__main__":
bbox = np.array([[[20.0, 40.0, 80.0, 160.0],
[30.0, 50.0, 70.0, 100.0]]]).astype(np.single) # [x1, y1, x2, y2]
image_size = np.array([[480, 620]]).astype(np.int32)
bbox_flip = _of_object_bbox_flip(bbox,
image_size,
flip_code=1) # Horizontal Flip
print(bbox_flip[0][0])
# [[399. 40. 459. 160.]
# [409. 50. 449. 100.]]
"""
assert isinstance(bbox, remote_blob_util.BlobDef)
assert isinstance(image_size, remote_blob_util.BlobDef)
assert bbox.shape[0] == image_size.shape[0]
if name is None:
name = id_util.UniqueStr("ObjectBboxFlip_")
if not isinstance(flip_code, remote_blob_util.BlobDef):
assert isinstance(flip_code, int)
flip_code = flow.constant(
flip_code,
shape=(bbox.shape[0],),
dtype=flow.int8,
name="{}_FlipCode".format(name),
)
else:
assert bbox.shape[0] == flip_code.shape[0]
op = (
flow.user_op_builder(name)
.Op("object_bbox_flip")
.Input("bbox", [bbox])
.Input("image_size", [image_size])
.Input("flip_code", [flip_code])
.Output("out")
.Build()
)
return op.InferAndTryRun().SoleOutputBlob() | 5,327,171 |
def endwin ():
"""Close down curses mode.
"""
global screen
if screen:
try:
curses.endwin ()
except:
pass
screen = None | 5,327,172 |
def while_K():
""" Upper case Alphabet letter 'K' pattern using Python Python while loop"""
row = 0
while row<6:
col = 0
while col<4:
if col==0 or row+col==3 or row-col==2:
print('*', end = ' ')
else:
print(' ', end = ' ')
col += 1
print()
row += 1 | 5,327,173 |
def compile(spec):
"""
Args:
spec (dict): A specification dict that attempts to "break" test dicts
Returns:
JsonMatcher.
"""
return JsonMatcher(spec) | 5,327,174 |
def labels_to_intervals(labels_list):
"""
labels_to_intervals() converts list of labels of each frame into set of time intervals where a tag occurs
Args:
labels_list: list of labels of each frame
e.g. [{'person'}, {'person'}, {'person'}, {'surfboard', 'person'}]
Returns:
tags - set of time intervals where a tag occurs:
{ (label, start, end) }, a video from time 0 (inclusive) to time T (exclusive)
e.g. {('cat', 3, 9), ('dog', 5, 8), ('people', 0, 6)}
e.g. {('cat', 0, 1), ('cat', 2, 4), ('cat', 6, 8), ('dog', 0, 3),
('dog', 6, 8), ('people', 0, 2), ('people', 4, 6)}
"""
labels_dict = dict()
for frame, labels in enumerate(labels_list):
for label in labels:
if label in labels_dict:
labels_dict[label].add(frame)
else:
labels_dict[label] = {frame}
output = set()
for key, value in labels_dict.items():
frame_list = sorted(value)
for interval in [(t[0][1], t[-1][1]) for t in
(tuple(g[1]) for g in itertools.groupby(enumerate(frame_list), lambda x: x[0]-x[1]))]:
output.add((key, interval[0], interval[1]+1))
return output | 5,327,175 |
def get_wolfram_query_url(query):
"""Get Wolfram query URL."""
base_url = 'www.wolframalpha.com'
if not query:
return 'http://{0}'.format(base_url)
return 'http://{0}/input/?i={1}'.format(base_url, query) | 5,327,176 |
def show_2dfit_residuals(x, y, data, fit, xfield=None, yfield=None, nfig=None, cmap=None) :
""" Create a figure with a map of the residuals in %
:param x: input xaxis coordinates - array
:param y: input yaxis coordinates - array (should have the same dim as x)
:param data: input data points (should have the same dim as x)
:param fit: fitted points (should have the same dim as x)
:param xfield: 2d rectangular array for the field to show (xaxis)
:param yfield: 2d rectangular array (yaxis)
:param nfig: if None, will use the existing figure, otherwise will use that number
:returns: Nothing
"""
if nfig is None:
fig = plt.gcf()
else :
fig = plt.figure(nfig)
fig.clf()
## Making the arrays as 1D
x_rav = x.ravel()
y_rav = y.ravel()
d_rav = data.ravel()
f_rav = fit.ravel()
lx = len(x_rav)
ly = len(y_rav)
ld = len(d_rav)
lf = len(f_rav)
## checking that the dimensions are correct
if (lx != ld) or (lx != lf) or (lx != ly) :
print "ERROR: dimensions for x, y, data, and fit are not the same"
print " (respectively: %d %d %d and %d)"%(lx, ly, ld, lf)
return
unbinned_residuals = derive_unbinned_field(x, y, 100.0 * (data-fit) / fit, xfield, yfield)
Sauron_Cmap = get_SauronCmap()
plt.imshow(unbinned_residuals, vmin=-20., vmax=20., cmap=Sauron_Cmap)
plt.colorbar() | 5,327,177 |
def test_column_wrangler():
"""Columns are transformed into a consistent format.
"""
data = pd.DataFrame({
'column1': [1, 2, 3],
'cOLUmn2': [1, 2, 3],
' cOLUmn3 ': [1, 2, 3],
' column 4 ': [1, 2, 3],
})
result = _column_wrangler(data).columns
expected = pd.Index(['column1', 'column2', 'column3', 'column_4'])
assert_index_equal(result, expected) | 5,327,178 |
def center_of_mass(points: Sequence[float]) -> np.ndarray:
"""Gets the center of mass of the points in space.
Parameters
----------
points
The points to find the center of mass from.
Returns
-------
np.ndarray
The center of mass of the points.
"""
points = [np.array(point).astype("float") for point in points]
return sum(points) / len(points) | 5,327,179 |
def get_posts(session, client_id, now=None):
"""Returns all posts."""
now = _utcnow(now)
try:
results = _get_post_query(session, client_id)\
.order_by(MappedPost.created_datetime.desc())
posts = tuple(_make_post(*result) for result in results)
return PaginatedSequence(posts)
except sa.exc.IntegrityError:
session.rollback()
raise db_util.DbException._chain() | 5,327,180 |
def tessellate_cell(csn, children, acells, position, parent, cell_params):
"""
Tessellate a cell.
:param int csn: Cell number.
:param ndarray children: Array specifying children of each cell.
:param ndarray acells: Array specifying the adjacent cells of each cell.
:param ndarray position: Array specifying the position of each cell.
:param ndarray parent: Array specifying the parent of each cell.
:param ndarray cell_params: Array specifying the corner parameters of each
cell.
:return: The number of triangles and the parameter values for each of the
three triangle vertices (ntri, triangles). The *triangles* list will
be *ntri* x 3, where each row is a triangle and contains a list of
three tuples containing two parameter values on the original surface.
These parameter values represent a vertex of the triangle. The order of
the parameter values should result in a normal vector oriented the same
as the original surface.
:rtype: tuple
Reference: Anderson, J., Khamayseh, A., and Jean, B. "Adaptive Resolution
Refinement," Technical Report, Los Alamos National Laboratory.
"""
# Determine number of interior points and parameters on each edge.
nprms = []
edge_prms = []
edge_order = [1, 3, 4, 2]
simple = False
for i in edge_order:
adj_cells = _find_neighbors(csn, i, children, acells, position, parent)
prms = _find_edge_params(i, adj_cells, cell_params)
nprms.append(len(prms))
edge_prms.append(prms)
if len(prms) > 1:
simple = True
# Use simple triangulation if any edge has more than one interior point.
if simple:
# Make a single list of parameters in counter-clockwise order.
# Edge 1
all_params = [cell_params[csn, 0]]
for uv in edge_prms[0]:
all_params.append(uv)
all_params.append(cell_params[csn, 1])
# Edge 3
for uv in edge_prms[1]:
all_params.append(uv)
all_params.append(cell_params[csn, 2])
# Edge 4
for uv in edge_prms[2]:
all_params.append(uv)
all_params.append(cell_params[csn, 3])
# Edge 2
for uv in edge_prms[3]:
all_params.append(uv)
all_params.append(cell_params[csn, 0])
# Middle parameter.
uv0 = cell_params[csn, 0]
uv1 = cell_params[csn, 2]
uvc = 0.5 * (uv0 + uv1)
# Generate triangles
triangles = []
for i in range(len(all_params) - 1):
uv0 = all_params[i]
uv1 = all_params[i + 1]
triangles.append([uv0, uv1, uvc])
return len(triangles), triangles
# Use predefined triangles.
# Determine triangulation case by the number of interior points on each
# edge.
triangles = []
triapp = triangles.append
cprms = cell_params[csn, :]
eprms = [row[0] for row in edge_prms if len(row) > 0]
case = nprms[0] + nprms[1] * 10 + nprms[2] * 100 + nprms[3] * 1000
# Case 0
if case == 0:
triapp([cprms[0], cprms[1], cprms[2]])
triapp([cprms[2], cprms[3], cprms[0]])
return len(triangles), triangles
# Case 1
if case == 1:
triapp([cprms[0], eprms[0], cprms[3]])
triapp([eprms[0], cprms[1], cprms[2]])
triapp([cprms[2], cprms[3], eprms[0]])
return len(triangles), triangles
# Case 2
if case == 10:
triapp([cprms[0], cprms[1], eprms[0]])
triapp([eprms[0], cprms[2], cprms[3]])
triapp([cprms[3], cprms[0], eprms[0]])
return len(triangles), triangles
# Case 3
if case == 11:
triapp([cprms[0], eprms[0], cprms[3]])
triapp([eprms[0], cprms[1], eprms[1]])
triapp([eprms[1], cprms[2], cprms[3]])
triapp([cprms[3], eprms[0], eprms[1]])
return len(triangles), triangles
# Case 4
if case == 100:
triapp([cprms[0], cprms[1], eprms[0]])
triapp([cprms[1], cprms[2], eprms[0]])
triapp([eprms[0], cprms[3], cprms[0]])
return len(triangles), triangles
# Case 5
if case == 101:
triapp([cprms[0], eprms[0], cprms[3]])
triapp([eprms[0], eprms[1], cprms[3]])
triapp([eprms[0], cprms[1], eprms[1]])
triapp([cprms[1], cprms[2], eprms[1]])
return len(triangles), triangles
# Case 6
if case == 110:
triapp([cprms[0], cprms[1], eprms[0]])
triapp([eprms[0], cprms[2], eprms[1]])
triapp([eprms[1], cprms[0], eprms[0]])
triapp([eprms[1], cprms[3], cprms[0]])
return len(triangles), triangles
# Case 7
if case == 111:
triapp([cprms[0], eprms[0], eprms[2]])
triapp([eprms[0], cprms[1], eprms[1]])
triapp([eprms[1], cprms[2], eprms[2]])
triapp([eprms[2], eprms[0], eprms[1]])
triapp([eprms[2], cprms[3], cprms[0]])
return len(triangles), triangles
# Case 8
if case == 1000:
triapp([cprms[0], cprms[1], eprms[0]])
triapp([cprms[1], cprms[2], eprms[0]])
triapp([cprms[2], cprms[3], eprms[0]])
return len(triangles), triangles
# Case 9
if case == 1001:
triapp([cprms[0], eprms[0], eprms[1]])
triapp([eprms[0], cprms[1], cprms[2]])
triapp([cprms[2], eprms[1], eprms[0]])
triapp([cprms[2], cprms[3], eprms[1]])
return len(triangles), triangles
# Case 10
if case == 1010:
triapp([cprms[0], cprms[1], eprms[0]])
triapp([eprms[0], cprms[2], eprms[1]])
triapp([cprms[2], cprms[3], eprms[1]])
triapp([eprms[1], cprms[0], eprms[0]])
return len(triangles), triangles
# Case 11
if case == 1011:
triapp([cprms[0], eprms[0], eprms[2]])
triapp([eprms[0], eprms[1], eprms[2]])
triapp([eprms[0], cprms[1], eprms[1]])
triapp([eprms[1], cprms[2], eprms[2]])
triapp([cprms[2], cprms[3], eprms[2]])
return len(triangles), triangles
# Case 12
if case == 1100:
triapp([cprms[0], cprms[1], eprms[1]])
triapp([cprms[1], cprms[2], eprms[0]])
triapp([eprms[0], eprms[1], cprms[1]])
triapp([eprms[0], cprms[3], eprms[1]])
return len(triangles), triangles
# Case 13
if case == 1101:
triapp([cprms[0], eprms[0], eprms[2]])
triapp([eprms[0], cprms[1], cprms[2]])
triapp([cprms[2], eprms[1], eprms[0]])
triapp([eprms[1], eprms[2], eprms[0]])
triapp([eprms[1], cprms[3], eprms[2]])
return len(triangles), triangles
# Case 14
if case == 1110:
triapp([cprms[0], cprms[1], eprms[0]])
triapp([eprms[0], eprms[2], cprms[0]])
triapp([eprms[0], cprms[2], eprms[1]])
triapp([eprms[1], eprms[2], eprms[0]])
triapp([eprms[1], cprms[3], eprms[2]])
return len(triangles), triangles
# Case 15
if case == 1111:
triapp([cprms[0], eprms[0], eprms[3]])
triapp([eprms[0], eprms[1], eprms[3]])
triapp([eprms[0], cprms[1], eprms[1]])
triapp([eprms[1], cprms[2], eprms[2]])
triapp([eprms[2], eprms[3], eprms[1]])
triapp([eprms[2], cprms[3], eprms[3]])
return len(triangles), triangles
# Return empty list.
return 0, [] | 5,327,181 |
def add_eig_vec(g, pos_enc_dim):
"""
Graph positional encoding v/ Laplacian eigenvectors
This func is for eigvec visualization, same code as positional_encoding() func,
but stores value in a diff key 'eigvec'
"""
# Laplacian
A = g.adjacency_matrix_scipy(return_edge_ids=False).astype(float)
N = sp.diags(dgl.backend.asnumpy(g.in_degrees()).clip(1) ** -0.5, dtype=float)
L = sp.eye(g.number_of_nodes()) - N * A * N
# Eigenvectors with numpy
EigVal, EigVec = np.linalg.eig(L.toarray())
idx = EigVal.argsort() # increasing order
EigVal, EigVec = EigVal[idx], np.real(EigVec[:, idx])
g.ndata["eigvec"] = torch.from_numpy(EigVec[:, 1 : pos_enc_dim + 1]).float()
# zero padding to the end if n < pos_enc_dim
n = g.number_of_nodes()
if n <= pos_enc_dim:
g.ndata["eigvec"] = F.pad(
g.ndata["eigvec"], (0, pos_enc_dim - n + 1), value=float("0")
)
return g | 5,327,182 |
def _advanced_clip(
data, p_min=35, p_max=99.98, nonnegative=True, dtype="int16", invert=False
):
"""
Remove outliers at both ends of the intensity distribution and fit into a given dtype.
This interface tries to emulate ANTs workflows' massaging that truncate images into
the 0-255 range, and applies percentiles for clipping images.
For image registration, normalizing the intensity into a compact range (e.g., uint8)
is generally advised.
To more robustly determine the clipping thresholds, spikes are removed from data with
a median filter.
Once the thresholds are calculated, the denoised data are thrown away and the thresholds
are applied on the original image.
"""
import numpy as np
from scipy import ndimage
from skimage.morphology import ball
# Calculate stats on denoised version, to preempt outliers from biasing
denoised = ndimage.median_filter(data, footprint=ball(3))
a_min = np.percentile(denoised[denoised > 0] if nonnegative else denoised, p_min)
a_max = np.percentile(denoised[denoised > 0] if nonnegative else denoised, p_max)
# Clip and cast
data = np.clip(data, a_min=a_min, a_max=a_max)
data -= data.min()
data /= data.max()
if invert:
data = 1.0 - data
if dtype in ("uint8", "int16"):
data = np.round(255 * data).astype(dtype)
return data | 5,327,183 |
def allocate_buffers(engine):
"""
Allocates all buffers required for the specified engine
"""
inputs = []
outputs = []
bindings = []
# Iterate over binding names in engine
for binding in engine:
# Get binding (tensor/buffer) size
size = trt.volume(engine.get_binding_shape(binding)) * engine.max_batch_size
# Get binding (tensor/buffer) data type (numpy-equivalent)
dtype = trt.nptype(engine.get_binding_dtype(binding))
# Allocate page-locked memory (i.e., pinned memory) buffers
host_mem = cuda.pagelocked_empty(size, dtype)
# Allocate linear piece of device memory
device_mem = cuda.mem_alloc(host_mem.nbytes)
# Append the device buffer to device bindings
bindings.append(int(device_mem))
# Append to inputs/ouputs list
if engine.binding_is_input(binding):
inputs.append(HostDeviceMem(host_mem, device_mem))
else:
outputs.append(HostDeviceMem(host_mem, device_mem))
# Create a stream (to eventually copy inputs/outputs and run inference)
stream = cuda.Stream()
return inputs, outputs, bindings, stream | 5,327,184 |
def D_to_M(D, ecc):
"""Mean anomaly from eccentric anomaly.
Parameters
----------
D : float
Parabolic eccentric anomaly (rad).
ecc : float
Eccentricity.
Returns
-------
M : float
Mean anomaly (rad).
"""
with u.set_enabled_equivalencies(u.dimensionless_angles()):
M = _kepler_equation_parabolic(D, 0.0 * u.rad, ecc)
return M | 5,327,185 |
def dict_merge(a, b):
"""Merge a and b.
Parameters
----------
a
One dictionary that will be merged
b
Other dictionary that will be merged
"""
return _merge(dict(a), b) | 5,327,186 |
def merge_dictionary(src: dict, dest: dict) -> dict:
"""
Merge two dictionaries.
:param src: A dictionary with the values to merge.
:param dest: A dictionary where to merge the values.
"""
for name, value in src.items():
if name not in dest:
# When field is not available in destination add the value from the source
if isinstance(value, dict):
# A new dictionary is created to avoid keeping references
dest[name] = copy.deepcopy(value)
elif isinstance(value, list):
# A new list is created to avoid keeping references
dest[name] = copy.deepcopy(value)
else:
dest[name] = value
elif isinstance(value, dict):
# When field exists in destination and is dict merge the source value
merge_dictionary(value, dest[name])
elif isinstance(value, list) and isinstance(dest[name], list):
# When both values are a list merge them
dest[name].extend(copy.deepcopy(value))
return dest | 5,327,187 |
def create_from_source(wp_config, source: Location):
"""
Using a Location object and the WP config, generates the appropriate LuhSql
object
"""
if isinstance(source, SshLocation):
ssh_user = source.user
ssh_host = source.host
elif isinstance(source, LocalLocation):
ssh_user = None
ssh_host = None
else:
raise LuhError(f"Unknown source type: {source.__class__.__name__}")
return LuhSql(
host=wp_config["db_host"],
user=wp_config["db_user"],
password=wp_config["db_password"],
db_name=wp_config["db_name"],
ssh_user=ssh_user,
ssh_host=ssh_host,
) | 5,327,188 |
def eye(N, M=None, k=0, dtype=DEFAULT_FLOAT_DTYPE):
"""
Returns a 2-D tensor with ones on the diagnoal and zeros elsewhere.
Args:
N (int): Number of rows in the output, must be larger than 0.
M (int, optional): Number of columns in the output. If None, defaults to N,
if defined, must be larger than 0. Deault is None.
k (int, optional): Index of the diagonal: 0 (the default) refers to the main
diagonal, a positive value refers to an upper diagonal, and a negative value
to a lower diagonal. Default is 0.
dtype (Union[mstype.dtype, str], optional): Designated tensor dtype, can
be in format of np.float32, or `float32`. Default is mstype.float32.
Returns:
result (Tensor): A tensor of shape (N,M). A tensor where all elements
are equal to zero, except for the k-th diagonal, whose values are equal to one.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore.numpy as np
>>> print(np.eye(2, 2))
[[1. 0.]
[0. 1.]]
"""
dtype = _check_dtype(dtype)
make_eye = P.Eye()
if M is None:
M = N
M = int(M)
N = int(N)
k = int(k)
out = None
if k != 0 or N == 0 or M == 0:
# Fall back to original numpy creation method
out = onp.eye(N, M, k)
else:
out = make_eye(N, M, dtype)
return asarray(out, dtype=dtype) | 5,327,189 |
def create_inception_graph():
"""
从被保存的GraphDef文件创建一个graph
:return: 受inception 训练过的图,同时保存了几个tensor
"""
with tf.Session() as sess:
model_filename = os.path.join(model_dir, 'def.pb')
if not os.path.exists(model_filename):
model_filename = os.path.join(model_dir, 'classify_image_graph_def.pb')
with gfile.FastGFile(model_filename, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
bottleneck_tensor, jpeg_data_tensor, resized_input_tensor = (
tf.import_graph_def(graph_def, name='', return_elements=[
BOTTLENECK_TENSOR_NAME, JPEG_DATA_TENSOR_NAME,
RESIZED_INPUT_TENSOR_NAME]))
return sess.graph, bottleneck_tensor, jpeg_data_tensor, resized_input_tensor | 5,327,190 |
def team_game_log(request, team_id, season):
"""Individual team season game log page.
"""
response = requests.get(f'http://{request.get_host()}/api/teams/{team_id}/{season}/Regular')
return render(request, 'main/team_games.html', context=response.json()) | 5,327,191 |
def _make_allocated_size_testcases():
"""
Build test cases for some common allocation_units.
"""
for unit in (Byte, MB, MiB, GB, GiB):
for size in (1, 2, 4, 8):
test_case = make_allocated_size_tests(unit(size))
globals()[test_case.__name__] = test_case | 5,327,192 |
def infer_labels(fn_pickle,testdata, fout_pickle, weak_lower,weak_upper):
#def infer_labels(fn_pickle,testdata, fout_pickle, weak_lower=0.935,weak_upper=0.98):
"""
- this is the linear case of getting labels for new spectra
best log g = weak_lower = 0.95, weak_upper = 0.98
best teff = weak_lower = 0.95, weak_upper = 0.99
best_feh = weak_lower = 0.935, weak_upper = 0.98
this returns the parameters for a field of data - and normalises if it is not already normalised
this is slow because it reads a pickle file
"""
file_in = open(fn_pickle, 'r')
dataall, metaall, labels, offsets, coeffs, covs, scatters,chis,chisqs = pickle.load(file_in)
file_in.close()
nstars = (testdata.shape)[1]
nlabels = len(labels)
Params_all = np.zeros((nstars, nlabels))
MCM_rotate_all = np.zeros((nstars, nlabels, nlabels))
for jj in range(0,nstars):
if np.any(testdata[:,jj,0] != dataall[:, 0, 0]):
print testdata[range(5),jj,0], dataall[range(5),0,0]
assert False
xdata = testdata[:,jj,0]
ydata = testdata[:,jj,1]
ysigma = testdata[:,jj,2]
ydata_norm = ydata - coeffs[:,0] # subtract the mean
cut_to = shape(metaall)[1]*-1.
coeffs_slice = coeffs[:,cut_to:]
#ind1 = np.logical_and(logical_and(dataall[:,jj,0] > 16200., dataall[:,jj,0] < 16500.), np.logical_and(ydata > weak_lower , ydata < weak_upper))
ind1 = np.logical_and(ydata > weak_lower , ydata < weak_upper)
Cinv = 1. / (ysigma ** 2 + scatters ** 2)
MCM_rotate = np.dot(coeffs_slice[ind1].T, Cinv[:,None][ind1] * coeffs_slice[ind1])
MCy_vals = np.dot(coeffs_slice[ind1].T, Cinv[ind1] * ydata_norm[ind1])
Params = np.linalg.solve(MCM_rotate, MCy_vals)
Params = Params + offsets
print Params
Params_all[jj,:] = Params
MCM_rotate_all[jj,:,:] = MCM_rotate
file_in = open(fout_pickle, 'w')
pickle.dump((Params_all, MCM_rotate_all), file_in)
file_in.close()
return Params_all , MCM_rotate_all | 5,327,193 |
def validate_worker(worker: DaskWorkerDeployment):
"""Validates the worker by accessing the diagnostics server.
:param worker: Worker to validate.
"""
validate_tunnel_http_connection(tunnel=worker.bokeh_tunnel) | 5,327,194 |
def main():
"""Console script for vaqc."""
parser = argparse.ArgumentParser()
parser.add_argument('derivatives_dir',
type=Path,
action='store',
help='the root folder of a BIDS derivative dataset '
'(sub-XXXXX folders '
'should be found at the top level in this folder).')
args = parser.parse_args()
print("using input directory as ", args.derivatives_dir)
if args.derivatives_dir.name == 'fmriprep':
process_fmriprep(args.derivatives_dir)
return 0 | 5,327,195 |
def construct_unique_cluster_name_column(cdips_cat_vnum=0.4):
"""
We already have a catalog with the following columns:
source_id;cluster;reference;ext_catalog_name;ra;dec;pmra;pmdec;parallax;
phot_g_mean_mag;phot_bp_mean_mag;phot_rp_mean_mag;
We want to supplement it with:
unique_cluster_name;k13_name_match;how_match;not_in_k13;
comment;logt;e_logt;logt_provenance
----------
The need for a "unique_cluster_name" is mainly in order to (a) connect with
other literature, and (b) know roughly how many "unique clusters" exist and
have been searched. The concept of "unique cluster" in substuctured regions
like Orion or Sco-OB2 is not defined, so whatever we arrive at for the
unique name should not be taken as gospel.
For "comment", we want any relevant information about the cluster (e.g., if
its existence is dubious, if it overlaps with other clusters, etc.)
For "logt", "e_logt", and "logt_provenance", we want both the K13 age if
it's available, and the KC19 age, if that's available.
----------
"""
cdips_df = ccl.get_cdips_catalog(ver=cdips_cat_vnum)
k13 = get_k13_df()
sourceid = nparr(cdips_df['source_id'])
clusterarr = nparr(cdips_df['cluster'])
ras = nparr(cdips_df['ra'])
decs = nparr(cdips_df['dec'])
referencearr = nparr(cdips_df['reference'])
#
# Attempt to get Kharchenko+2013 matches from a mix of cluster names and
# star positions (i.e., Appendix B of CDIPS-I).
#
inds = ~pd.isnull(clusterarr)
sourceid = sourceid[inds]
clusterarr = clusterarr[inds]
ras = ras[inds]
decs = decs[inds]
referencearr = referencearr[inds]
uarr, inds = np.unique(clusterarr, return_index=True)
sourceid = sourceid[inds]
clusterarr = clusterarr[inds]
ras = ras[inds]
decs = decs[inds]
referencearr = referencearr[inds]
namematchpath = (
'/nfs/phtess1/ar1/TESS/PROJ/lbouma/OC_MG_FINAL_v{}_with_K13_name_match.csv'.
format(cdips_cat_vnum)
)
if not os.path.exists(namematchpath):
res = list(map(get_k13_name_match,
zip(clusterarr, ras, decs, referencearr, repeat(k13))))
resdf = pd.DataFrame(res,
columns=['k13_name_match', 'how_match', 'have_name_match',
'have_mwsc_id_match', 'is_known_asterism', 'not_in_k13',
'why_not_in_k13']
)
resdf['source_id'] = sourceid
mdf = resdf.merge(cdips_df, how='left', on='source_id')
mdf.to_csv(namematchpath, index=False, sep=';')
print('made {}'.format(namematchpath))
else:
mdf = pd.read_csv(namematchpath, sep=';')
uniqpath = (
'/nfs/phtess1/ar1/TESS/PROJ/lbouma/OC_MG_FINAL_v{}_uniq.csv'.
format(cdips_cat_vnum)
)
if not os.path.exists(uniqpath):
mdf['unique_cluster_name'] = list(map(
get_unique_cluster_name, zip(mdf.iterrows()))
)
cols = ['unique_cluster_name', 'k13_name_match', 'how_match',
'have_name_match', 'have_mwsc_id_match', 'is_known_asterism',
'not_in_k13', 'why_not_in_k13', 'source_id', 'cluster', 'dec',
'dist', 'ext_catalog_name', 'parallax', 'phot_bp_mean_mag',
'phot_g_mean_mag', 'phot_rp_mean_mag', 'pmdec', 'pmra', 'ra',
'reference']
mdf[cols].to_csv(uniqpath, index=False, sep=';')
print('made {}'.format(uniqpath))
else:
mdf = pd.read_csv(uniqpath, sep=';')
#
# Merge the unique cluster names against the whole cdips dataframe, using
# mdf as a lookup table. This is slightly wrong, b/c some spatial matches
# would give different results than using this string match. However they
# are a small subset, this is computationally cheaper, and assigning a
# unique name has inherent limitations.
#
subdf = mdf[['unique_cluster_name', 'k13_name_match', 'how_match',
'have_name_match', 'have_mwsc_id_match', 'is_known_asterism',
'not_in_k13', 'why_not_in_k13', 'cluster']]
print('beginning big merge...')
fdf = cdips_df.merge(subdf, on='cluster', how='left')
assert len(fdf) == len(cdips_df)
print(42*'#')
print('# unique cluster names: {}'.format(
len(np.unique(
fdf[~pd.isnull(fdf['unique_cluster_name'])]['unique_cluster_name'])
))
)
print('fraction with k13 name match: {:.5f}'.format(
len(fdf[~pd.isnull(fdf['k13_name_match'])])/len(fdf)
))
print('fraction with any unique name: {:.5f}'.format(
len(fdf[~pd.isnull(fdf['unique_cluster_name'])])/len(fdf)
))
#
# Merge fdf k13_name_match against K13 index, and get updated comments.
# Join "why_not_in_k13" with "Source object type" "SType" from K13 index.
# This gives the "comments" column.
#
Vizier.ROW_LIMIT = -1
catalog_list = Vizier.find_catalogs('J/A+A/558/A53')
catalogs = Vizier.get_catalogs(catalog_list.keys())
k13_index = catalogs[1].to_pandas()
for c in k13_index.columns:
if c != 'N':
k13_index[c] = k13_index[c].str.decode('utf-8')
k13_df = k13
styped = {
"ass":"stellar association",
"ast":"Dias: possible asterism/dust hole/star cloud",
"dub":"Dias: dubious, objects considered doubtful by the DSS images inspection",
"emb":"embedded open cluster/cluster associated with nebulosity",
"glo":"globular cluster/possible globular cluster",
"irc":"infrared cluster",
"irg":"infrared stellar group",
"mog":"Dias: possible moving group",
"non":"Dias: non-existent NGC/ objects not found in DSS images inspection",
"rem":"Possible cluster remnant",
"var":"clusters with variable extinction"
}
k13_index['STypeComment'] = k13_index['SType'].map(styped)
# in order to do the desired name join, must remove duplicates from k13
# index. since we only care about the comments, keep "last" works from
# inspection.
ids = k13_index['Name']
print('removing duplicates from K13 index...\n{}'.format(repr(
k13_index[ids.isin(ids[ids.duplicated()])].sort_values(by='Name'))))
k13_index = k13_index.drop_duplicates(subset=['Name'], keep='last')
_df = fdf.merge(k13_index, left_on='k13_name_match', right_on='Name',
how='left')
assert len(_df) == len(fdf)
comment = _df['why_not_in_k13']
stypecomment = _df['STypeComment']
_df['comment'] = comment.map(str) + ". " + stypecomment.map(str)
_df['comment'] = _df['comment'].map(lambda x: x.replace('nan',''))
_df['comment'] = _df['comment'].map(lambda x: x.lstrip('. ').rstrip('. '))
# unique comments include:
# array(['',
# 'Dias: dubious, objects considered doubtful by the DSS images inspection',
# 'Dias: non-existent NGC/ objects not found in DSS images inspection',
# 'Dias: possible asterism/dust hole/star cloud',
# 'K13index: duplicated/coincides with other cluster',
# 'K13index: duplicated/coincides with other cluster. Dias: dubious, objects considered doubtful by the DSS images inspection',
# 'K13index: duplicated/coincides with other cluster. Dias: non-existent NGC/ objects not found in DSS images inspection',
# 'K13index: duplicated/coincides with other cluster. clusters with variable extinction',
# 'K13index: duplicated/coincides with other cluster. embedded open cluster/cluster associated with nebulosity',
# 'K13index: duplicated/coincides with other cluster. infrared cluster',
# 'K13index: duplicated/coincides with other cluster. stellar association',
# 'K13index: possibly this is a cluster, but parameters are not determined',
# 'K13index: possibly this is a cluster, but parameters are not determined. Dias: dubious, objects considered doubtful by the DSS images inspection',
# 'K13index: possibly this is a cluster, but parameters are not determined. Dias: non-existent NGC/ objects not found in DSS images inspection',
# 'K13index: possibly this is a cluster, but parameters are not determined. infrared cluster',
# 'K13index: possibly this is a cluster, but parameters are not determined. stellar association',
# 'K13index: this is not a cluster',
# 'K13index: this is not a cluster. Dias: dubious, objects considered doubtful by the DSS images inspection',
# 'K13index: this is not a cluster. Dias: non-existent NGC/ objects not found in DSS images inspection',
# 'K13index: this is not a cluster. Dias: possible asterism/dust hole/star cloud',
# 'K13index: this is not a cluster. Possible cluster remt',
# 'K13index: this is not a cluster. infrared cluster',
# 'Majaess IR cluster match missing in K13', 'Possible cluster remt',
# 'clusters with variable extinction',
# 'embedded open cluster/cluster associated with nebulosity',
# 'infrared cluster', 'is_bell_mg', 'is_gagne_mg',
# 'is_gagne_mg. infrared cluster',
# 'is_gagne_mg. stellar association', 'is_gaia_member',
# 'is_kraus_mg', 'is_oh_mg', 'is_oh_mg. stellar association',
# 'is_rizzuto_mg. stellar association', 'known missing from K13',
# 'stellar association']
fdf['comment'] = _df['comment']
del _df
#
# Assign ages. If a Kharchenko+2013 name match was found, report that age.
# In addition, if the star is in the Kounkel & Covey 2019 table, report
# their age as well.
#
_df = fdf.merge(k13_df,
left_on='k13_name_match',
right_on='Name',
how='left')
assert len(_df) == len(fdf)
k13_logt = np.round(nparr(_df['logt']),2).astype(str)
k13_e_logt = np.round(nparr(_df['e_logt']),2).astype(str)
k13_logt_prov = np.repeat('',len(fdf)).astype('>U20')
k13_logt_prov[k13_logt != 'nan'] = 'Kharchenko2013'
# kounkel & covey 2019 age matches are only for the groups that were not
# already known. (otherwise, use the kharchenko age, which is assumed to
# exist). do it by searching the cluster name. extract the groupid from the
# name.
kc19_df1 = pd.read_csv(os.path.join(clusterdatadir,'KC19_string_table1.csv'))
kc19_df2 = pd.read_csv(os.path.join(clusterdatadir,'KC19_string_table2.csv'))
kc19_sdf2 = kc19_df2[['group_id','name','age']]
kc19_mdf = kc19_df1.merge(kc19_sdf2, on='group_id', how='left')
# if the source_id is in the Kounkel & Covey table, then also report the
# KC19 age.
kc19_merge_cdips_df = fdf.merge(kc19_mdf, how='left', on='source_id')
assert len(kc19_merge_cdips_df) == len(fdf)
kc19_logt = np.round(nparr(kc19_merge_cdips_df['age']),2).astype(str)
kc19_e_logt = np.repeat('',len(fdf)).astype('>U10')
kc19_e_logt[kc19_logt != 'nan'] = '0.15' # Kounkel&Covey 2019 abstract precision
kc19_logt_prov = np.repeat('',len(fdf)).astype('>U20')
kc19_logt_prov[kc19_logt != 'nan'] = 'Kounkel_2019'
#
# Concatenate and separate by ",". Then remove all "nans". NOTE: if you add
# extra age provenances (i.e., more than K13 and KC19), this nan-stripping
# will require adjustments.
#
logt = list(map(','.join, zip(k13_logt, kc19_logt)))
e_logt = list(map(','.join, zip(k13_e_logt, kc19_e_logt)))
logt_prov = list(map(','.join, zip(k13_logt_prov, kc19_logt_prov)))
logt = [_.lstrip('nan,').rstrip(',nan') for _ in logt]
e_logt = [_.lstrip('nan,').rstrip(',nan') for _ in e_logt]
logt_prov = [_.lstrip(',').rstrip(',') for _ in logt_prov]
fdf['logt'] = logt
fdf['e_logt'] = e_logt
fdf['logt_provenance'] = logt_prov
# reformat for table to publish
scols = ['source_id', 'cluster', 'reference', 'ext_catalog_name', 'ra',
'dec', 'pmra', 'pmdec', 'parallax', 'phot_g_mean_mag',
'phot_bp_mean_mag', 'phot_rp_mean_mag', 'k13_name_match',
'unique_cluster_name', 'how_match', 'not_in_k13', 'comment',
'logt', 'e_logt', 'logt_provenance']
_df = fdf[scols]
pubpath = (
'/nfs/phtess1/ar1/TESS/PROJ/lbouma/OC_MG_FINAL_v{}_publishable.csv'.
format(cdips_cat_vnum)
)
_df.to_csv(pubpath, index=False, sep=';')
print('made {}'.format(pubpath))
import IPython; IPython.embed() | 5,327,196 |
def figure(*args, grid=True, style='default', figsize=(9, 5), **kwargs):
"""
Returns a matplotlib axis object.
"""
import pylab as pl
available = [s for s in pl.style.available + ['default'] if not s.startswith('_')]
if style not in available:
raise ValueError(f'\n\n Valid Styles are {available}')
pl.style.use(style)
pl.figure(*args, figsize=figsize, **kwargs)
pl.grid(grid)
ax = pl.gca()
return ax | 5,327,197 |
def test_count_with_chainable_filter_startswith_operator(service):
"""Check getting $count with $filter in"""
# pylint: disable=redefined-outer-name
responses.add(
responses.GET,
f"{service.url}/Employees/$count?$filter=startswith%28NickName%2C%20%27Tim%27%29%20eq%20true",
json=3,
status=200)
employees = service.entity_sets.Employees.get_entities()
request = employees.filter(NickName__startswith="Tim").count()
assert isinstance(request, pyodata.v2.service.GetEntitySetRequest)
assert request.execute() == 3 | 5,327,198 |
def boundcond(stato):
"""This function applies the boundary conditions that one chooses to adopt.
The boundaries can be reflective, periodic or constant.
It takes as input the state to be evolved. """
if bc=='const':
status=''.join(('.',stato,'.')) #constant boundaries
elif bc=='refl':
status=''.join((stato[0],stato,stato[n-1]))#reflective boundaries
elif bc=='period':
status=''.join((stato[n-1],stato,stato[0]))#periodic boundaries
else:
print('Invalid BCs')
return status | 5,327,199 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.