content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def get_standard_dev_pkgs() -> set[str]:
"""Check the standard dev package locations for hutch-python"""
pythonpath = os.environ.get('PYTHONPATH', '')
if not pythonpath:
return set()
paths = pythonpath.split(os.pathsep)
valid_paths = filter(not_ignored, paths)
pkg_names = set(n.name for n in
pkgutil.iter_modules(path=valid_paths)
if n.ispkg)
return pkg_names | 5,332,200 |
def create_alert_from_slack_message(payload, time):
"""
Create a new raw alert (json) from the new alert form in Slack
"""
alert_json = {}
values = payload['view']['state']['values']
for value in values:
for key in values[value]:
if key == 'severity':
alert_json[key] = \
values[value][key]['selected_option']['text']['text']
else:
alert_json[key] = values[value][key]['value']
alert_json['datetime'] = time
return alert_json | 5,332,201 |
def generate(self):
"""generate card number that satisfies the Luhn's algorithm"""
pass | 5,332,202 |
def psd(buf_in, buf_out):
"""
Perform discrete fourier transforms using the FFTW library and use it to
get the power spectral density. FFTW optimizes
the fft algorithm based on the size of the arrays, with SIMD parallelized
commands. This optimization requires initialization, so this is a factory
function that returns a numba gufunc that performs the FFT. FFTW works on
fixed memory buffers, so you must tell it what memory to use ahead of time.
When using this with ProcessingChain, to ensure the correct buffers are used
call ProcessingChain.get_variable('var_name') to give it the internal memory
buffer directly (with raw_to_dsp, you can just give it the name and it will
automatically happen!). The possible dtypes for the input/outputs are:
- complex64 (size n) -> float32/float (size n)
- complex128 (size n) -> float64/double (size n)
- complex256/clongdouble (size n) -> float128/longdouble (size n)
- float32/float (size n) -> float32/float (size n/2+1)
- float64/double (size n) -> float64/double (size n/2+1)
- float128/longdouble (size n) -> float128/longdouble (size n/2+1)
"""
# build intermediate array for the dft, which will be abs'd to get the PSD
buf_dft = np.ndarray(buf_out.shape, np.dtype('complex'+str(buf_out.dtype.itemsize*16)))
try:
dft_fun = FFTW(buf_in, buf_dft, axes=(-1,), direction='FFTW_FORWARD')
except ValueError:
raise ValueError("""Incompatible array types/shapes. Allowed:
- complex64 (size n) -> float32/float (size n)
- complex128 (size n) -> float64/double (size n)
- complex256/clongdouble (size n) -> float128/longdouble (size n)
- float32/float (size n) -> float32/float (size n/2+1)
- float64/double (size n) -> float64/double (size n/2+1)
- float128/longdouble (size n) -> float128/longdouble (size n/2+1)""")
typesig = 'void(' + str(buf_in.dtype) + '[:, :], ' + str(buf_out.dtype) + '[:, :])'
sizesig = '(m, n)->(m, n)' if buf_in.shape == buf_out.shape else '(m, n),(m, l)'
@guvectorize([typesig], sizesig, forceobj=True)
def psd(wf_in, psd_out):
dft_fun(wf_in, buf_dft)
np.abs(buf_dft, psd_out)
return psd | 5,332,203 |
def get_cell_ids(num_celltypes=39):
"""get valid cell ids by removing cell types with missing data.
Return:
A cell id list.
"""
missing_ids = [8,23,25,30,32,33,34,35,38,39,17]
return [item for item in list(range(1,num_celltypes+1)) if item not in missing_ids] | 5,332,204 |
def test_from_storage_table(haiku_metadata):
"""Test the from_storage_table method."""
metadata = haiku_metadata.from_storage_table(
{
"PartitionKey": "FIVE",
"RowKey": "1",
"MaxID": 7,
}
)
assert metadata.size == HaikuKey.FIVE
assert metadata.max_id == 7 | 5,332,205 |
def limit_data():
"""Slice data by dolphot values and recovered stars in two filters"""
fmt = '{:s}_{:s}'
filter1, filter2 = filters.value.split(',')
selected = data[
(np.abs(data[fmt.format(filter1, 'VEGA')]) <= 60) &
(np.abs(data[fmt.format(filter2, 'VEGA')]) <= 60) &
(data[fmt.format(filter1, 'SNR')] <= snr.value[1]) &
(data[fmt.format(filter1, 'SNR')] >= snr.value[0]) &
(data[fmt.format(filter1, 'SHARP')] <= shp.value[1]) &
(data[fmt.format(filter1, 'SHARP')] >= shp.value[0]) &
(data[fmt.format(filter1, 'CROWD')] <= cwd.value[1]) &
(data[fmt.format(filter1, 'CROWD')] >= cwd.value[0]) &
(data[fmt.format(filter1, 'ROUND')] <= rnd.value[1]) &
(data[fmt.format(filter1, 'ROUND')] >= rnd.value[0]) &
(data[fmt.format(filter1, 'ERR')] <= err.value[1]) &
(data[fmt.format(filter1, 'ERR')] >= err.value[0]) &
(data[fmt.format(filter1, 'CHI')] <= chi.value[1]) &
(data[fmt.format(filter1, 'CHI')] >= chi.value[0]) &
(data[fmt.format(filter2, 'SNR')] <= snr.value[1]) &
(data[fmt.format(filter2, 'SNR')] >= snr.value[0]) &
(data[fmt.format(filter2, 'SHARP')] <= shp.value[1]) &
(data[fmt.format(filter2, 'SHARP')] >= shp.value[0]) &
(data[fmt.format(filter2, 'CROWD')] <= cwd.value[1]) &
(data[fmt.format(filter2, 'CROWD')] >= cwd.value[0]) &
(data[fmt.format(filter2, 'ROUND')] <= rnd.value[1]) &
(data[fmt.format(filter2, 'ROUND')] >= rnd.value[0]) &
(data[fmt.format(filter2, 'ERR')] <= err.value[1]) &
(data[fmt.format(filter2, 'ERR')] >= err.value[0]) &
(data[fmt.format(filter2, 'CHI')] <= chi.value[1]) &
(data[fmt.format(filter2, 'CHI')] >= chi.value[0])]
return selected | 5,332,206 |
def setup_app(app):
"""Initialize database extensions on application."""
# Set default configuration
app.config.setdefault('SQLALCHEMY_DATABASE_URI', 'sqlite:////tmp/test.db')
# Add extension CLI to application.
app.cli.add_command(database)
db.init_app(app) | 5,332,207 |
def purge_versions(path, suffix, num_keep, reverse=False):
"""
Purge file versions created by get_versioned_path.
Purge specified quantity in normal or reverse sequence.
"""
(base, ext) = os.path.splitext(path)
re_strip_version = re.compile('(.*)-%s(-[0-9]*)?' % suffix)
matched = re_strip_version.match(base)
if matched:
base = matched.group(1)
versions = [version for version in glob('%s-%s*%s' % (base, suffix, ext))]
versions.sort(key=_get_version, reverse=reverse)
num_purge = len(versions) - num_keep
if num_purge > len(versions):
num_purge = 0
if num_purge > 0:
for version_path in versions[:num_purge]:
os.remove(version_path)
return num_purge | 5,332,208 |
def make_valance_getter(
lexicon: Dict[str, float],
lemmatize: bool = True,
lowercase: bool = True,
cap_differential: Optional[float] = C_INCR,
) -> Callable[[Token], float]:
"""Creates a token getter which return the valence (sentiment) of a token including the capitalization of the token.
Args:
lexicon (Dict[str, float]): The valence scores of the tokens.
lemmatize (bool, optional): Should it look up in the lexicon (and intensifiers) using the lemma? Defaults to True.
lowercase (bool, optional): Should it look up in the lexicon (and intensifiers) using the lowercased word? Defaults to True.
cap_differential (Optional[float], optional): Capitalization differential, which is added to the valence of the score it is emphasized using all caps.
Defaults to 0.733, an emperically derived constant (Hutto and Gilbert, 2014). If None it will not be used.
Returns:
Callable[[Token], float]: The getter function
"""
t_getter = make_txt_getter(lemmatize, lowercase)
def lemma_valence_getter(token: Token) -> float:
valence = 0
t = t_getter(token)
if (t in lexicon) and not (
Token.has_extension("intensifier") and token._.intensifier
): # if token isn't a intensifier
return lexicon[t]
return 0.0
def cap_diff_valence_getter(token: Token) -> float:
valence = token._.raw_valence
if token.is_upper and token.sent._.is_cap_diff:
if valence > 0:
valence += cap_differential
elif valence < 0:
valence -= cap_differential
return valence
if cap_differential:
if not Token.has_extension("raw_valence"):
Token.set_extension("raw_valence", getter=lemma_valence_getter)
if not Span.has_extension("is_cap_diff"):
Span.set_extension("is_cap_diff", getter=allcap_differential_getter)
return cap_diff_valence_getter
return lemma_valence_getter | 5,332,209 |
def proper_units(text: str) -> str:
"""
Function for changing units to a better form.
Args:
text (str): text to check.
Returns:
str: reformatted text with better units.
"""
conv = {
r"degK": r"K",
r"degC": r"$^{\circ}$C",
r"degrees\_celsius": r"$^{\circ}$C",
r"degrees\_north": r"$^{\circ}$N",
r"degrees\_east": r"$^{\circ}$E",
r"degrees\_west": r"$^{\circ}$W",
r"I metric": r"$\mathcal{I}$--metric",
}
regex = re.compile(
"|".join(
re.escape(key) for key in sorted(conv.keys(), key=lambda item: -len(item))
)
)
return regex.sub(lambda match: conv[match.group()], text) | 5,332,210 |
def coerce_number(value, convert = float):
""" 将数据库字段类型转为数值类型 """
pattern = re.compile(r'^\d{4}(-\d\d){2}')
format = '%Y-%m-%d %H:%M:%S'
if isinstance(value, basestring) and pattern.match(value):
#将字符串的日期时间先转为对象
try:
mask = format[:len(value) - 2]
value = datetime.strptime(value, mask)
except ValueError:
pass
if isinstance(value, date):
value = value.strftime('%s')
return convert(value) | 5,332,211 |
async def test_option_flow(hass):
"""Test config flow options."""
entry, _, _ = await setup_onvif_integration(hass)
result = await hass.config_entries.options.async_init(entry.entry_id)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "onvif_devices"
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={
config_flow.CONF_EXTRA_ARGUMENTS: "",
config_flow.CONF_RTSP_TRANSPORT: config_flow.RTSP_TRANS_PROTOCOLS[1],
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["data"] == {
config_flow.CONF_EXTRA_ARGUMENTS: "",
config_flow.CONF_RTSP_TRANSPORT: config_flow.RTSP_TRANS_PROTOCOLS[1],
} | 5,332,212 |
def create_credential_resolver():
"""Create a credentials resolver for Localstack."""
env_provider = botocore.credentials.EnvProvider()
default = DefaultCredentialProvider()
resolver = botocore.credentials.CredentialResolver(
providers=[env_provider, default]
)
return resolver | 5,332,213 |
def does_algorithm_implementation_have_capabilities_to_execute_parameter(parameter_kisao_id, algorithm_specs):
""" Determine if an implementation of an algorithm has the capabilities to execute a model langugae
Args:
parameter_kisao_id (:obj:`str`): KiSAO id for an algorithm parameter
algorithm_specs (:obj:`dict` with schema ``https://api.biosimulators.org/openapi.json#/components/schemas/Algorithm``):
specifications of the implementation of an algorithm
Returns:
:obj:`bool`: whether the implementation of the algorithm has the capabilities to execute the SED parameter
"""
for parameter_specs in algorithm_specs['parameters']:
if parameter_specs['kisaoId']['id'] == parameter_kisao_id:
return True
return False | 5,332,214 |
def branch(_, verbose=False, no_stash=False):
"""
Creates a branch from a release tag for creating a new patch or minor release from that branch.
"""
_ensure_configured('release')
from invoke_release.version import __version__
_standard_output('Invoke Release {}', __version__)
_setup_task(no_stash, verbose)
try:
_fetch_tags(verbose)
tags = _get_tag_list(verbose)
branch_version = _prompt('Enter a version tag from which to create a new branch (or "exit"):').lower()
if not branch_version or branch_version == INSTRUCTION_EXIT:
raise ReleaseExit()
if branch_version not in tags:
raise ReleaseFailure('Version number {} not in the list of available tags.'.format(branch_version))
_v = LooseVersion(branch_version)
minor_branch = '.'.join(list(map(six.text_type, _v.version[:2])) + ['x'])
major_branch = '.'.join(list(map(six.text_type, _v.version[:1])) + ['x', 'x'])
proceed_instruction = _prompt(
'Using tag {tag}, would you like to create a minor branch for patch versions (branch {minor}, '
'recommended), or a major branch for minor versions (branch {major})? (MINOR/major/exit):',
tag=branch_version,
minor=minor_branch,
major=major_branch,
)
if proceed_instruction == INSTRUCTION_EXIT:
raise ReleaseExit()
new_branch = major_branch if proceed_instruction == INSTRUCTION_MAJOR else minor_branch
if USE_PULL_REQUEST:
if _is_branch_on_remote(verbose, new_branch):
_standard_output(
'Branch {branch} exists on remote. Creating local tracking branch.',
branch=new_branch,
)
created = _create_local_tracking_branch(verbose, new_branch)
if not created:
raise ReleaseFailure(
'Could not create local tracking branch {branch}.\n'
'Does a local branch named {branch} already exists?\n'
'Delete or rename your local branch {branch} and try again.'.format(branch=new_branch),
)
else:
_standard_output(
'Branch {branch} does not exist on remote.\n'
'Creating branch, and pushing to remote.',
branch=new_branch,
)
_create_branch_from_tag(verbose, branch_version, new_branch)
_push_branch(verbose, new_branch)
cherry_pick_branch_suffix = _prompt(
'Now we will create the branch where you will apply your fixes. We\n'
'need a name to uniquely identify your feature branch. I suggest using\n'
'the JIRA ticket id, e.g. EB-120106, of the issue you are working on:'
)
if not cherry_pick_branch_suffix:
raise ReleaseFailure('You must enter a name to identify your feature branch.')
_create_branch(
verbose,
'cherry-pick-{hotfix_branch_name}-{suffix}'.format(
hotfix_branch_name=new_branch,
suffix=cherry_pick_branch_suffix,
)
)
else:
_create_branch_from_tag(verbose, branch_version, new_branch)
push_instruction = _prompt(
'Branch {} created. Would you like to go ahead and push it to remote? (y/N):',
new_branch,
).lower()
if push_instruction and push_instruction == INSTRUCTION_YES:
_push_branch(verbose, new_branch)
_standard_output('Branch process is complete.')
except ReleaseFailure as e:
_error_output(e.args[0])
except subprocess.CalledProcessError as e:
_error_output(
'Command {command} failed with error code {error_code}. Command output:\n{output}',
command=e.cmd,
error_code=e.returncode,
output=e.output.decode('utf8'),
)
except (ReleaseExit, KeyboardInterrupt):
_standard_output('Canceling branch!')
finally:
_cleanup_task(verbose) | 5,332,215 |
def win_sparkle_set_app_details(company_name, app_name, app_version):
""" Sets application metadata.
Normally, these are taken from VERSIONINFO/StringFileInfo resources,
but if your application doesn't use them for some reason, using this
function is an alternative.
note company_name and app_name are used to determine the location
of WinSparkle settings in registry.
(HKCU\Software\<company_name>\<app_name>\WinSparkle is used.)
:param company_name: Company name of the vendor.
:param app_name: Application name. This is both shown to the user
and used in HTTP User-Agent header
:param app_version: Version of the app, as string (e.g. "1.2" or "1.2rc1").
"""
dll.win_sparkle_set_app_details.restype = None
dll.win_sparkle_set_app_details.argtypes = [c_wchar_p, c_wchar_p, c_wchar_p]
dll.win_sparkle_set_app_details(company_name, app_name, app_version) | 5,332,216 |
def run_saliency():
"""Method called when click saliency button"""
print("Exec saliency code for explanation")
output = XAI.run_saliency()
print("run_saliency")
update_state("xai_type", output.get("type"))
if output.get("type") == "classification":
_xai_saliency = output.get("saliency")
nb_classes = _xai_saliency.shape[0]
heat_maps = {}
for i in range(nb_classes):
_key = f"heatmap_{i}"
heat_maps[_key] = _xai_saliency[i].ravel().tolist()
update_state("xai_class_heatmaps", heat_maps)
elif output.get("type") == "similarity":
_xai_saliency = output.get("saliency")
heat_maps = {
"heatmap_0": _xai_saliency.ravel().tolist(),
}
update_state("xai_similarity_heatmaps", heat_maps)
elif output.get("type") == "detection":
_xai_saliency = output.get("saliency")
nb_classes = _xai_saliency.shape[0]
heat_maps = {}
for i in range(nb_classes):
_key = f"heatmap_{i}"
heat_maps[_key] = _xai_saliency[i].ravel().tolist()
update_state("xai_detection_heatmaps", heat_maps)
else:
print(output.get("type"))
for key, value in output.items():
if key != "type":
print(f"{key}: {value.shape} | {value.dtype}") | 5,332,217 |
def main():
"""
This method allows the script to be run in stand alone mode.
@return Exit code from running the script
"""
record = Record()
result = record.Run()
return result | 5,332,218 |
def stamp_pixcov_from_theory(N,cmb2d_TEB,n2d_IQU=0.,beam2d=1.,iau=False,return_pow=False):
"""Return the pixel covariance for a stamp N pixels across given the 2D IQU CMB power spectrum,
2D beam template and 2D IQU noise power spectrum.
"""
n2d = n2d_IQU
cmb2d = cmb2d_TEB
assert cmb2d.ndim==4
ncomp = cmb2d.shape[0]
assert cmb2d.shape[1]==ncomp
assert ncomp==3 or ncomp==1
wcs = cmb2d.wcs
shape = cmb2d.shape[-2:]
if ncomp==3: cmb2d = rotate_pol_power(shape,wcs,cmb2d,iau=iau,inverse=True)
p2d = cmb2d*beam2d**2.+n2d
if not(return_pow): return fcov_to_rcorr(shape,wcs,p2d,N)
return fcov_to_rcorr(shape,wcs,p2d,N), cmb2d | 5,332,219 |
def format(): # noqa
"""Formats raw data
Returns:
None
"""
format_data() | 5,332,220 |
def make_data_parallel(module, expose_methods=None):
"""Wraps `nn.Module object` into `nn.DataParallel` and links methods whose name is listed in `expose_methods`
"""
dp_module = nn.DataParallel(module)
if expose_methods is None:
if hasattr(module, 'expose_methods'):
expose_methods = module.expose_methods
if expose_methods is not None:
for mt in expose_methods:
setattr(dp_module, mt, getattr(dp_module.module, mt))
return dp_module | 5,332,221 |
def model_creator(model_dict, X_train, y_train, rd=None, rev=None):
"""Returns a SVM classifier"""
# Load model based on model_dict
clf = model_loader(model_dict, rd, rev)
# If model does not exist, train a new SVM
if clf is None:
clf = model_trainer(model_dict, X_train, y_train, rd, rev)
return clf | 5,332,222 |
async def folder2azureblob(container_client_instance=None, src_folder=None, dst_blob_name=None,
overwrite=False, max_concurrency=8, timeout=None
):
"""
Asynchronously upload a local folder (including its content) to Azure blob container
:param container_client_instance: instance of azure.storage.blob.aio.ContainerClient
:param src_folder: str, full abs path to the folder to be uploaded
:param dst_blob_name: str the name of the blob where the content fo the folder will be downloaded
:param overwrite: bool, defaults to false, sepcifiy if an existing blob will be overwritten
:param max_concurrency: int, maximum number of parallel connections to use when the blob size exceeds 64MB
:param timeout, timeout in seconds to be applied to uploading all files in the folder.
:return:
"""
assert src_folder not in [None, '', '/' ], f'src_folder={src_folder} is invalid'
assert os.path.exists(src_folder), f'src_folder={src_folder} does not exist'
assert os.path.isabs(src_folder), f'src_folder={src_folder} is not a an absolute path'
assert os.path.isdir(src_folder), f'src_folder={src_folder} is not a directory'
assert len(src_folder)>1, f'src_folder={src_folder} is invalid'
try:
async with container_client_instance:
prefix = os.path.split(src_folder)[-1] if dst_blob_name is None else dst_blob_name
r = scantree(src_folder)
nfiles = count(r)
nchunks = nfiles//100 + 1
n = 0
r = scantree(src_folder)
with tqdm(total=nchunks, desc="Uploading ... ", initial=0, unit_scale=True,
colour='green') as pbar:
for chunk in slicer(r,100):
ftrs = list()
#logger.info(f'Uploading file chunk no {n} from {nchunks} - {n / nchunks * 100:.2f}%')
await asyncio.sleep(1)
for local_file in chunk:
if not local_file.is_file():continue
blob_path = os.path.join(prefix, os.path.relpath(local_file.path, src_folder))
#print(e.path, blob_path)
fut = asyncio.ensure_future(
upload_file(container_client_instance=container_client_instance,
src=local_file.path, dst_blob_name=blob_path, overwrite=overwrite,
max_concurrency=max_concurrency)
)
ftrs.append(fut)
done, pending = await asyncio.wait(ftrs, timeout=timeout, return_when=asyncio.ALL_COMPLETED)
results = await asyncio.gather(*done, return_exceptions=True)
for res in results:
if type(res) == tuple:
blob_client, file_path_to_upload = res
else: #error
logger.error(f'{dst_blob_name} was not uploaded successfully')
logger.error(res)
for failed in pending:
blob_client, file_path_to_upload = await failed
logger.debug(f'Uploading {file_path_to_upload} to {container_client_instance.url} has timed out.')
pbar.update(1)
n+=1
except Exception as err:
logger.error(f'Failed to upload {src_folder} to {container_client_instance.url}')
raise | 5,332,223 |
def bert_predict(model, loader):
"""Perform a forward pass on the trained BERT model to predict probabilities
on the test set.
"""
# Put the model into the evaluation mode. The dropout layers are disabled during
# the test time.
model.eval()
all_logits = []
# For each batch in our test set...
for batch in loader:
# Load batch to GPU
b_input_ids, b_attn_mask = tuple(t.to(device) for t in batch)[:2]
# Compute logits
with torch.no_grad():
logits = model(b_input_ids, b_attn_mask)
all_logits.append(logits)
# Concatenate logits from each batch
all_logits = torch.cat(all_logits, dim=0)
# Apply softmax to calculate probabilities
probs = F.softmax(all_logits, dim=1).cpu().numpy()
return probs | 5,332,224 |
def write_v3dtxt(fname, trc, forces, freq=0, show_msg=True):
"""Write Visual3d text file from .trc and .forces files or dataframes.
The .trc and .forces data are assumed to correspond to the same time
interval. If the data have different number of samples (different
frequencies), the data will be resampled to the highest frequency (or to
the inputed frequency if it is higher than the former two) using the tnorm
function.
Parameters
----------
fname : string
Full file name of the Visual3d text file to be saved.
trc : pandas dataframe or string
If string, it is a full file name of the .trc file to read.
If dataframe, data of the .trc file has shape (nsamples, 2 + 3*nmarkers)
where the first two columns are from the Frame and Time values.
Input an empty string '' if there is no .trc file/dataframe (in this
case there must be forces and the input freq is the forces frequency).
forces : pandas dataframe or string
If string, it is a full file name of the .forces file to read.
If dataframe, data of the .forces file has shape (nsamples, 7*nforceplates)
Input an empty string '' if there is no forces file/dataframe (in this
case there must be a trc file/dataframe).
freq : float (optional, dafault=0)
Sampling frequency in Hz to resample data if desired.
Data will be resampled to the highest frequency between freq, trc, forces.
show_msg : bool (default = True)
Whether to print messages about the execution of the intermediary steps
(True) or not (False).
"""
if isinstance(trc, str):
if trc:
_, trc = read_trc(trc, fname2='', units='', df_multi=False)
else:
trc = pd.DataFrame()
if isinstance(forces, str):
if forces:
_, forces = read_forces(forces)
else:
forces = pd.DataFrame()
if trc.shape[0] != forces.shape[0] or freq:
from tnorm import tnorm
freq_trc = 0 if trc.empty else 1/np.nanmean(np.diff(trc.iloc[:, 1].values))
if freq_trc:
freq_forces = 0 if forces.empty else freq_trc*(forces.shape[0]/trc.shape[0])
else:
freq_forces = freq
freq = np.max([freq, freq_trc, freq_forces])
nsample = np.max([trc.shape[0], forces.shape[0]]) * freq/(np.max([freq_trc, freq_forces]))
frame_time = np.vstack((np.arange(1, nsample+1, 1), np.arange(0, nsample, 1)/freq)).T
if freq_trc:
trc2, _, _ = tnorm(trc.iloc[:, 2:].values, step=-nsample)
trc2 = np.hstack((frame_time, trc2))
trc = pd.DataFrame(trc2, index=None, columns=trc.columns)
else:
trc = pd.DataFrame(frame_time, index=None, columns=['Frame#', 'Time'])
if freq_forces:
forces2, _, _ = tnorm(forces.values, step=-nsample)
forces = pd.DataFrame(forces2, index=None, columns=forces.columns)
ntrc = trc.shape[1]
nforces = forces.shape[1]
if nforces:
data = pd.concat([trc, forces], axis=1)
else:
data = trc
with open(file=fname, mode='wt', encoding='utf-8', newline='') as f:
if show_msg:
print('Saving file "{}" ... '.format(fname), end='')
rows = [[''] + ['default']*(ntrc + nforces - 1),
[''] + data.columns.tolist()[1:],
[''] + ['FRAME_NUMBERS'] + ['TARGET']*(ntrc - 2) + ['ANALOG']*nforces,
[''] + ['ORIGINAL']*(ntrc + nforces -1),
[data.columns[0]] + ['0'] + ['X', 'Y', 'Z']*int((ntrc - 2)/3) + ['0']*nforces]
write = csv.writer(f, delimiter='\t')
write.writerows(rows)
write.writerows(data.values)
if show_msg:
print('done.') | 5,332,225 |
def eval_input_fn(training_dir, params):
"""Returns input function that feeds the model during evaluation"""
return _input_fn('eval') | 5,332,226 |
def GNIs(features, labels, mode, params, config):
"""Builds the model function for use in an estimator.
Arguments:
features: The input features for the estimator.
labels: The labels, unused here.
mode: Signifies whether it is train or test or predict.
params: Some hyperparameters as a dictionary.
config: The RunConfig, unused here.
Returns:
EstimatorSpec: A tf.estimator.EstimatorSpec instance.
"""
del config
N, H = params["N"], params["H"]
n_samples = params["n_samples"]
params["non_targeted_layers"] = []
if params["input_inject"]:
params["non_targeted_layers"] = list(range(1, N + 1))
params["non_targeted_layers"] += [N + 1]
image_tile_summary("input", features, rows=1, cols=16)
# --- Ensure input data is flat
features = tf.reshape(features, (-1, np.prod(params['image_shape'])))
features = tf.cast(features, dtype=tf.float32)
if labels is not None:
labels = tf.cast(labels, dtype=tf.float32)
else:
labels = tf.ones_like(features[:, :10], dtype=None)
B = int_shape(labels)[0]
n_output = int_shape(labels)[-1]
if params['activation'] != 'linear':
activation = getattr(tf.nn, params['activation'])
else:
activation = None
# --- Make discriminator
if params["disc_type"] == 'mlp':
mlp = make_mlp(activation, np.prod(params['image_shape']), N, H,
n_output)
if params["disc_type"] == 'convnet':
mlp = make_convnet(activation, params['image_shape'], n_output)
if params["disc_type"] == 'vgg':
mlp = make_vgg13(activation, params['image_shape'], n_output)
# --- Retrieve intermediate activations, and layer output
# --- we don't want to mask the final layer so activations doesn't include the output layer
p_phi_y = mlp(features)
sel_layer_shapes = [p_phi_y['layer_shapes'][i] for i in range(N + 1)]
# --- Get Predictions using log(p(y|x))
preds = p_phi_y['activations'][-1]
# --- Classification loss, log(p(y|x))
if params["loss"] == 'cross_entropy':
loss = cross_entropy(labels, preds)
pred_class = tf.argmax(input=preds, axis=-1)
true_class = tf.argmax(input=labels, axis=-1)
acc = tf.cast(tf.equal(pred_class, true_class), tf.float32)
tf.compat.v1.summary.scalar("accuracy", tf.reduce_mean(acc))
elif params["loss"] == 'mse':
loss = square_error(labels, preds)
global_step = tf.compat.v1.train.get_or_create_global_step()
p_phi_y_noisy = replace_mask_layer(
features,
p_phi_y,
non_targeted_layers=params['non_targeted_layers'],
var=params["var"],
n_samples=n_samples,
mode=params["noise_mode"])
preds_noisy = p_phi_y_noisy['activations'][-1]
# --- Classification loss, log(p(y|x))
if params["loss"] == 'cross_entropy':
noisy_loss = cross_entropy(labels, preds_noisy)
elif params["loss"] == 'mse':
noisy_loss = square_error(labels, preds_noisy)
optimizer = tf.compat.v1.train.GradientDescentOptimizer(
params["learning_rate"])
gradients, variables = [], []
tf.compat.v1.summary.scalar("learning_rate", params["learning_rate"])
tf.compat.v1.summary.scalar("batch_size", B)
# --- Enumerate over activation layers, zip automatically removes final
# --- logit layer
layers = [
l for l in p_phi_y['net'].layers
if ('dense' in l.name or 'conv' in l.name)
]
noises = [
tf.reshape(n, (B, n_samples, -1)) for n in p_phi_y_noisy['noise'][:-1]
]
weights = [layers[i].trainable_weights[0] for i in range(N + 1)]
acts = p_phi_y['activations'][:-1]
Js = [
tf.reshape(batch_jacobian(preds, a, use_pfor=True), (B, -1, n_output))
for a in acts
]
print(Js)
G, C, H = calc_taylor_expansion(Js, loss, preds, noises, B, n_samples)
EC = calc_tikhonov_reg(Js, acts, preds, params["noise_mode"],
params["var"], params["loss"])
H_sig = heavy_tail_variance(Js, loss, preds)
l_noise = 0
if params["noise_type"] is None:
noisy_loss_estimate = loss
elif params["noise_type"] == 'input':
noisy_loss_estimate = noisy_loss
elif 'full' in params["noise_type"]:
# --- This is the Gaussian stuff
assert n_samples == 1
l_noise += H + G + C
noisy_loss_estimate = loss + l_noise
elif 'marginal' in params["noise_type"]:
# --- Don't ever noise final layer
assert n_samples == 1
l_noise = EC
if 'H' in params["noise_type"]:
l_noise += H
if 'C' in params["noise_type"]:
# alpha, beta, sigma, mu = tf.py_func(
# estimate_all_params,
# inp=[(C - EC)],
# Tout=[tf.float32, tf.float32, tf.float32, tf.float32])
#
# tf.compat.v1.summary.scalar('C/alpha', alpha)
# tf.compat.v1.summary.scalar('C/beta', beta)
# tf.compat.v1.summary.scalar('C/sigma', sigma)
# tf.compat.v1.summary.scalar('C/mu', mu)
# tf.compat.v1.summary.scalar('C', tf.reduce_mean(C - EC))
# tf.compat.v1.summary.histogram('C', C)
l_noise += (C - EC)
if 'G' in params["noise_type"]:
l_noise += G
noisy_loss_estimate = loss + l_noise
actual_noise = tf.reduce_mean(noisy_loss - loss)
estimated_noise = tf.reduce_mean(noisy_loss_estimate - loss)
tf.compat.v1.summary.scalar('loss/actual_noise', actual_noise)
tf.compat.v1.summary.scalar('loss/estimated_noise', estimated_noise)
tf.compat.v1.summary.scalar("loss/noisy_" + params["loss"],
tf.reduce_mean(noisy_loss))
tf.compat.v1.summary.scalar("loss/og_" + params["loss"],
tf.reduce_mean(loss))
noise_err = tf.reduce_mean(estimated_noise - actual_noise)
tf.compat.v1.summary.scalar(
'loss/noise_est_pe',
tf.abs(noise_err / tf.reduce_mean(actual_noise + 1e-8)))
tf.compat.v1.summary.scalar('loss/noise_est_mse',
tf.abs(tf.reduce_mean(noise_err**2)))
loss_err = tf.reduce_mean(noisy_loss_estimate - noisy_loss)
tf.compat.v1.summary.scalar(
'loss/loss_est_pe',
tf.abs(loss_err / tf.reduce_mean(noisy_loss + 1e-8)))
tf.compat.v1.summary.scalar('loss/loss_est_mse',
tf.abs(tf.reduce_mean(loss_err**2)))
if params["L2"] > 0:
vars = tf.trainable_variables()
l2_reg = tf.add_n([tf.nn.l2_loss(v) for v in vars]) * params["L2"]
noisy_loss_estimate += l2_reg
tf.compat.v1.summary.scalar("loss/L2_reg", l2_reg)
loss_err = tf.reduce_mean(noisy_loss_estimate - noisy_loss)
# tf.compat.v1.summary.image('activations_covariance', activation_covariance)
# g_noise =
for i, w in enumerate(weights):
layer_name = "layer_" + str(i)
num_params = np.prod(int_shape(w))
a = p_phi_y['activations'][i]
noisy_a = p_phi_y_noisy['activations'][i]
inj_noise = noisy_a - a
print(noisy_a, a)
# --- Display in tensorboard -- Injected noise stats
tf.compat.v1.summary.histogram(layer_name + '/injected_noise',
inj_noise)
n_neurons = int_shape(a)[1]
tf.compat.v1.summary.histogram(layer_name + '/w', w)
corr = tfp.stats.correlation(a)
tf.compat.v1.summary.scalar(layer_name + '/corr', tf.reduce_mean(corr))
sparsity = tf.reduce_sum(tf.cast(a <= 1e-6, tf.float32))
# tf.compat.v1.summary.scalar(layer_name + '/lifetime_sparsity',
# sparsity / B)
tf.compat.v1.summary.scalar(layer_name + '/population_sparsity',
sparsity / (B * n_neurons))
# --- Retrieve the noise of the gradient of each layer
# --- = noisy gradients - gradients, this corresponds to
# --- n_t * gradients where n_t is our noise matrix
# --- W gradients
og_W_n = tf.gradients([tf.reduce_mean(noisy_loss)], [w])[0]
g_W_n = tf.gradients([tf.reduce_mean(noisy_loss_estimate)], [w])[0]
g = tf.gradients(tf.reduce_mean(loss), w)[0]
err = -g_W_n + og_W_n
g_noise = g_W_n - g
tf.compat.v1.summary.scalar(layer_name + '/mean_grad_noise',
tf.reduce_mean(g_noise))
tf.compat.v1.summary.histogram(layer_name + '/grad_noise', g_noise)
tf.compat.v1.summary.scalar(layer_name + '/weights_l2/',
tf.reduce_mean(tf.norm(w)))
tf.compat.v1.summary.scalar(layer_name + '/grad_est_mse',
tf.reduce_mean((og_W_n - g_W_n)**2))
tf.compat.v1.summary.scalar(layer_name + '/grad_est_pe',
tf.reduce_mean((-og_W_n + g_W_n) / og_W_n))
gradients.extend([g_W_n])
variables.extend([w])
if i > 0 and params['calc_hessian']:
# --- Number of parameters does not include batch_size
hessians = trace_hessian([noisy_loss], weights)
h_trace = tf.reduce_sum(tf.concat(hessians, axis=1)) / (B * n_samples)
for i, h in enumerate(hessians):
layer_name = "layer_" + str(i)
tf.compat.v1.summary.scalar(layer_name + '/H_trace',
tf.reduce_sum(h) / (B * n_samples))
tf.compat.v1.summary.scalar('network/H_trace', h_trace)
# --- Sum all them losses
loss = tf.reduce_mean(loss)
noisy_loss = tf.reduce_mean(noisy_loss)
train_step = optimizer.apply_gradients(zip(gradients, variables),
global_step=global_step)
if mode == tf.estimator.ModeKeys.PREDICT:
eval_metrics = {}
predictions = {
'preds': tf.nn.softmax(p_phi_y['activations'][-1], axis=1)
}
predictions['GCH'] = G + C + H - EC
for i, J in enumerate(Js):
predictions['J' + str(i)] = J
# for i, w in enumerate(weights):
# predictions['dGCH' + str(i)] = tf.gradients(
# [predictions['GCH']], [w])[0]
if params['calc_hessian']:
# --- Number of parameters does not include batch_size
hessians = trace_hessian([noisy_loss], weights[1:3])
h_trace = tf.reduce_sum(tf.concat(hessians,
axis=1)) / (B * n_samples)
predictions['h_trace'] = h_trace
else:
predictions = {}
eval_metrics = {
"loss/og": tf.compat.v1.metrics.mean(loss),
}
if params["loss"] == 'cross_entropy':
eval_metrics["accuracy"] = tf.compat.v1.metrics.mean(acc)
return tf.estimator.EstimatorSpec(mode=mode,
loss=loss,
predictions=predictions,
train_op=train_step,
eval_metric_ops=eval_metrics) | 5,332,227 |
def b58decode(v, length):
""" decode v into a string of len bytes
"""
long_value = 0L
for (i, c) in enumerate(v[::-1]):
long_value += __b58chars.find(c) * (__b58base**i)
result = ''
while long_value >= 256:
div, mod = divmod(long_value, 256)
result = chr(mod) + result
long_value = div
result = chr(long_value) + result
nPad = 0
for c in v:
if c == __b58chars[0]: nPad += 1
else: break
result = chr(0)*nPad + result
if length is not None and len(result) != length:
return None
return result | 5,332,228 |
def create_sample_meta_info(args):
"""
Load and parse samples json for templating
"""
if not os.path.exists(args.samples_json):
print('could not find file: {}!'.format(args.samples_json))
sys.exit(1)
sample_info = json.load(open(args.samples_json))
for i, sample_data in enumerate(sample_info):
cols = set(SAMPLE_META_COLS).difference(set(sample_data.keys()))
for col in cols:
sample_info[i][col] = ""
sample_info = {i['cmoSampleName']: i for i in sample_info}
return sample_info | 5,332,229 |
def has_remove_arg(args):
"""
Checks if remove argument exists
:param args: Argument list
:return: True if remove argument is found, False otherwise
"""
if "remove" in args:
return True
return False | 5,332,230 |
def supported_locales(prefix, parsed_args, **kwargs):
"""
Returns all supported locales.
:param prefix: The prefix text of the last word before the cursor on the command line.
:param parsed_args: The result of argument parsing so far.
:param kwargs: keyword arguments.
:returns list: list of all supported locales.
"""
return constants.locales() | 5,332,231 |
def test_less_than_or_equal(valOne, unitOne, valTwo, unitTwo):
"""Verify that the less than or equal to operator properly compares the VALUE of two instances."""
first = ComplesTypeStub(valOne, unitOne)
second = ComplesTypeStub(valTwo, unitTwo)
assert first is not second
assert first <= second | 5,332,232 |
def printmat(name, matrix):
"""
Prints matrix in a easy to read form with
dimension and label.
Parameters
==========
name:
data type : str
The name displayed in the output.
matrix:
data type : numpy array
The matrix to be displayed.
"""
print("matrix " + name + ":", matrix.shape)
print(matrix, "\n") | 5,332,233 |
def pre_process_nli_df(data):
"""
Apply preprocess on the input text from a NLI dataframe
:param data: data frame with the colum 'text'
:type data: pd.DataFrame
"""
simple_pre_process_text_df(data, text_column="premise")
simple_pre_process_text_df(data, text_column="hypothesis") | 5,332,234 |
def display_screen_data(ticker: str, export: str = ""):
"""FinViz ticker screener
Parameters
----------
ticker : str
Stock ticker
export : str
Format to export data
"""
fund_data = finviz_model.get_data(ticker)
print("")
if gtff.USE_TABULATE_DF:
print(tabulate(fund_data, tablefmt="fancy_grid", showindex=True))
else:
print(fund_data.to_string(header=False))
print("")
export_data(export, os.path.dirname(os.path.abspath(__file__)), "data", fund_data) | 5,332,235 |
def generate_tool_panel_dict_for_tool_config( guid, tool_config, tool_sections=None ):
"""
Create a dictionary of the following type for a single tool config file name. The intent is to call this method for every tool config
in a repository and append each of these as entries to a tool panel dictionary for the repository. This allows for each tool to be
loaded into a different section in the tool panel.
{<Tool guid> : [{ tool_config : <tool_config_file>, id: <ToolSection id>, version : <ToolSection version>, name : <TooSection name>}]}
"""
tool_panel_dict = {}
file_name = suc.strip_path( tool_config )
tool_section_dicts = generate_tool_section_dicts( tool_config=file_name, tool_sections=tool_sections )
tool_panel_dict[ guid ] = tool_section_dicts
return tool_panel_dict | 5,332,236 |
def finalize_client(client):
"""Stops and closes a client, even if it wasn't started."""
client.stop()
client.close() | 5,332,237 |
def test_load_current_directory(create_config, monkeypatch):
"""Init the config using charmcraft.yaml in current directory."""
tmp_path = create_config(
"""
type: charm
"""
)
monkeypatch.chdir(tmp_path)
with patch("datetime.datetime") as mock:
mock.utcnow.return_value = "test_timestamp"
config = load(None)
assert config.type == "charm"
assert config.project.dirpath == tmp_path
assert config.project.config_provided
assert config.project.started_at == "test_timestamp" | 5,332,238 |
def get_tmp_dir():
"""get or create the tmp dir corresponding to each process"""
tmp_dir = result_dir / "tmp"
tmp_dir.mkdir(exist_ok=True)
return tmp_dir | 5,332,239 |
def get_logs(job_id, user, index):
"""get logs"""
return instance().get_logs(job_id=job_id,
user=user,
log_index=int(index)) | 5,332,240 |
def repeat_as_list(x: TensorType, n: int):
"""
:param x: Array/Tensor to be repeated
:param n: Integer with the number of repetitions
:return: List of n repetitions of Tensor x
"""
return [x for _ in range(n)] | 5,332,241 |
def transition(x, concat_axis, nb_filter, dropout_rate=None, weight_decay=1E-4):
"""Apply BatchNorm, Relu 1x1Conv2D, optional dropout and Maxpooling2D
:parameter x: keras model
:parameter concat_axis: int -- index of contatenate axis
:parameter nb_filter: int -- number of filters
:parameter dropout_rate: int -- dropout rate
:parameter weight_decay: int -- weight decay factor
:returns: model
:return type: keras model, after applying batch_norm, relu-conv, dropout, maxpool
"""
x = BatchNormalization(axis=concat_axis, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay))(x)
x = Activation('relu')(x)
x = Conv2D(nb_filter, (1, 1), kernel_initializer="he_uniform", padding="same", use_bias=False, kernel_regularizer=l2(weight_decay))(x)
if dropout_rate:
x = Dropout(dropout_rate)(x)
x = AveragePooling2D((2, 2), strides=(2, 2))(x)
return x | 5,332,242 |
def test_get_file():
"""Verify that an image is writable to a file after modifying its EXIF metadata.
Assert the produced file is equivalent to a known baseline.
"""
image = Image(os.path.join(os.path.dirname(__file__), "noise.jpg"))
image.software = "Python"
file_hex = binascii.hexlify(image.get_file()).decode("utf-8")
assert "\n".join(textwrap.wrap(file_hex, 90)) == MODIFIED_NOISE_FILE_HEX_BASELINE | 5,332,243 |
def _generate(args: list):
"""Обработка команды генерации миссии"""
_args_count = len(args)
if _args_count > 0:
_type = args[0].lower()
generate(*args)
else:
show_help('generate') | 5,332,244 |
def apply_defaults(conf):
"""
applies default values for select configurations
This call will populate default values for various configuration options.
This method is used in alternative to the default values provided in the
`add_config_value` call, which allows this extension to apply defaults at
a more controlled time.
Args:
conf: the configuration to modify
"""
if conf.confluence_add_secnumbers is None:
conf.confluence_add_secnumbers = True
if conf.confluence_adv_ignore_nodes is None:
conf.confluence_adv_ignore_nodes = []
if conf.confluence_adv_restricted is None:
conf.confluence_adv_restricted = []
if conf.confluence_client_cert is not None:
if not isinstance(conf.confluence_client_cert, tuple):
conf.confluence_client_cert = (conf.confluence_client_cert, None)
if (not conf.confluence_file_suffix or
conf.confluence_file_suffix.endswith('.')):
conf.confluence_file_suffix = '.conf'
if conf.confluence_jira_servers is None:
conf.confluence_jira_servers = {}
if conf.confluence_remove_title is None:
conf.confluence_remove_title = True
if conf.confluence_secnumber_suffix is None:
conf.confluence_secnumber_suffix = '. '
config2bool = [
'confluence_add_secnumbers',
'confluence_adv_aggressive_search',
'confluence_adv_hierarchy_child_macro',
'confluence_adv_permit_raw_html',
'confluence_adv_trace_data',
'confluence_adv_writer_no_section_cap',
'confluence_ask_password',
'confluence_ask_user',
'confluence_asset_force_standalone',
'confluence_disable_autogen_title',
'confluence_disable_notifications',
'confluence_disable_ssl_validation',
'confluence_ignore_titlefix_on_index',
'confluence_master_homepage',
'confluence_page_hierarchy',
'confluence_publish_dryrun',
'confluence_publish_onlynew',
'confluence_purge',
'confluence_purge_from_master',
'confluence_remove_title',
'confluence_watch',
]
for key in config2bool:
if getattr(conf, key) is not None:
if not isinstance(getattr(conf, key), bool) and conf[key]:
conf[key] = str2bool(conf[key])
config2int = [
'confluence_max_doc_depth',
'confluence_parent_page_id_check',
'confluence_publish_root',
'confluence_timeout',
]
for key in config2int:
if getattr(conf, key) is not None:
if not isinstance(getattr(conf, key), int) and conf[key]:
conf[key] = int(conf[key]) | 5,332,245 |
def _bug_data_diff_plot(
project_name: str, project_repo: pygit2.Repository,
bugs_left: tp.FrozenSet[PygitBug], bugs_right: tp.FrozenSet[PygitBug]
) -> gob.Figure:
"""Creates a chord diagram representing the diff between two sets of bugs as
relation between introducing/fixing commits."""
commits_to_nodes_map = _map_commits_to_nodes(project_repo)
commit_occurrences: tp.Dict[pygit2.Commit, DiffOccurrence] = {}
commit_count = len(commits_to_nodes_map.keys())
commit_coordinates = _compute_node_placement(commit_count)
for commit in project_repo.walk(
project_repo.head.target.hex, pygit2.GIT_SORT_TIME
):
commit_occurrences[commit] = DiffOccurrence.NONE
lines: tp.List[gob.Scatter] = _generate_diff_line_data(
_diff_raw_bugs(bugs_left, bugs_right), commits_to_nodes_map,
commit_coordinates, commit_occurrences
)
commit_types = {
commit: __DIFF_TO_NODE_TYPE[do]
for commit, do in commit_occurrences.items()
}
nodes: tp.List[gob.Scatter] = _generate_node_data(
project_repo, commit_coordinates, commits_to_nodes_map, commit_types
)
data = lines + nodes
layout = _create_layout(f'szz_diff {project_name}')
return gob.Figure(data=data, layout=layout) | 5,332,246 |
def test_elias_gamma(stream_encoder, data):
"""Test that elias gama encoder and decoder are inverse of each other."""
int_type, value = data
encoded = stream_encoder.call("elias_gamma", str(int_type), value)
decoded = decoder_utils.decode_elias_gamma(_br(encoded))
assert decoded == value | 5,332,247 |
async def test_client_handles_network_issues_unexpected_close(
event_loop: asyncio.AbstractEventLoop,
hyperion_fixture: HyperionFixture,
) -> None:
"""Verify an unexpected close causes a reconnection."""
# == Verify an empty read causes a disconnect and reconnect.
(rw, _) = hyperion_fixture.rw, hyperion_fixture.hc
# == Read returns empty, connection closed, but instantly re-established.
with patch("asyncio.open_connection", return_value=(rw, rw)):
await rw.add_flow(
[
("read", ""),
("close", None),
("write", {**SERVERINFO_REQUEST, **{"tan": 2}}),
(
"read",
{**_read_file(FILE_SERVERINFO_RESPONSE), **{"tan": 2}},
),
]
)
await _block_until_done(rw) | 5,332,248 |
def scale_params(cfg):
"""
Scale:
* learning rate,
* weight decay,
* box_loss_gain,
* cls_loss_gain,
* obj_loss_gain
according to:
* effective batch size
* DDP world size
* image size
* num YOLO output layers
* num classes
"""
logger = get_logger(__name__)
# Scale LR and weight decay
is_ddp = cfg.sg_model.multi_gpu == MultiGPUMode.DISTRIBUTED_DATA_PARALLEL and torch.distributed.is_initialized()
world_size = torch.distributed.get_world_size() if is_ddp else 1
# Scale LR and WD for DDP due to gradients being averaged between devices
# Equivalent to loss * WORLD_SIZE in ultralytics
cfg.training_params.initial_lr *= world_size
cfg.training_params.warmup_bias_lr *= world_size
cfg.training_params.optimizer_params.weight_decay /= world_size
# Scale WD with a factor of [effective batch size]/64.
batch_size, batch_accumulate = cfg.dataset_params.batch_size, cfg.training_params.batch_accumulate
batch_size_factor = cfg.sg_model.num_devices if is_ddp else cfg.sg_model.dataset_interface.batch_size_factor
effective_batch_size = batch_size * batch_size_factor * batch_accumulate
cfg.training_params.optimizer_params.weight_decay *= effective_batch_size / 64.
# Scale EMA beta to match Ultralytics update
cfg.training_params.ema_params.beta = cfg.training_params.max_epochs * len(cfg.sg_model.train_loader) / 2000.
log_msg = \
f"""
IMPORTANT:\n
Training with world size of {world_size}, {'DDP' if is_ddp else 'no DDP'}, effective batch size of {effective_batch_size},
scaled:
* initial_lr to {cfg.training_params.initial_lr};
* warmup_bias_lr to {cfg.training_params.warmup_bias_lr};
* weight_decay to {cfg.training_params.optimizer_params.weight_decay};
* EMA beta to {cfg.training_params.ema_params.beta};
"""
if cfg.training_params.loss == 'yolo_v5_loss':
# Scale loss gains
model = cfg.sg_model.net
model = model.module if hasattr(model, 'module') else model
num_levels = model._head._modules_list[-1].detection_layers_num
train_image_size = cfg.dataset_params.train_image_size
num_branches_norm = 3. / num_levels
num_classes_norm = len(cfg.sg_model.classes) / 80.
image_size_norm = train_image_size / 640.
cfg.training_params.criterion_params.box_loss_gain *= num_branches_norm
cfg.training_params.criterion_params.cls_loss_gain *= num_classes_norm * num_branches_norm
cfg.training_params.criterion_params.obj_loss_gain *= image_size_norm ** 2 * num_branches_norm
log_msg += \
f"""
* box_loss_gain to {cfg.training_params.criterion_params.box_loss_gain};
* cls_loss_gain to {cfg.training_params.criterion_params.cls_loss_gain};
* obj_loss_gain to {cfg.training_params.criterion_params.obj_loss_gain};
"""
logger.info(log_msg)
return cfg | 5,332,249 |
async def test_loading_non_existing(hass, store):
"""Test we can save and load data."""
with patch("homeassistant.util.json.open", side_effect=FileNotFoundError):
data = await store.async_load()
assert data is None | 5,332,250 |
def get_model(args):
"""
Load model and move tensors to a given devices.
"""
if args.model == "lstm":
model = LSTM(args)
if args.model == "lstmattn":
model = LSTMATTN(args)
if args.model == "bert":
model = Bert(args)
if args.model == "lqt":
model = LastQuery(args)
model.to(args.device)
return model | 5,332,251 |
def convert_to_boolean(value):
"""Turn strings to bools if they look like them
Truthy things should be True
>>> for truthy in ['true', 'on', 'yes', '1']:
... assert convert_to_boolean(truthy) == True
Falsey things should be False
>>> for falsey in ['false', 'off', 'no', '0']:
... assert convert_to_boolean(falsey) == False
Other things should be unchanged
>>> for value in ['falsey', 'other', True, 0]:
... assert convert_to_boolean(value) == value
"""
if isinstance(value, str):
if value.lower() in ['t', 'true', 'on', 'yes', '1']:
return True
elif value.lower() in ['f', 'false', 'off', 'no', '0']:
return False
return value | 5,332,252 |
def set_default_input(input):
"""
Set the default `Input` class.
(Used for instance, for the telnet submodule.)
"""
assert isinstance(input, Input)
_default_input.set(input) | 5,332,253 |
def progress_plots(histories, filename, start_epoch: int = 0, title: str = None,
moving_avg: bool = False, beta: float = 0.9, plot_folds: bool = False):
"""
Plot various metrics as a function of training epoch.
:param histories: list
List of history objects (each corresponding to a validation fold,
captured from the output of the tf.keras.model.fit() method)
:param filename: str
Prefix for the name of the output figure file.
:param start_epoch: int
The first training epoch to be plotted.
:param title: str
Title of the figure.
:param moving_avg: boolean
If True, compute exponentially weighted moving averages (EWMA)
:param beta: float
Parameter for computing EWMA.
:param plot_folds: boolean
If True, individual cross-validation folds will be plotted in addition to their mean.
"""
# Extract the list of metrics from histories:
metrics = []
for metric, _ in histories[0].history.items():
if "val_" not in metric:
metrics.append(metric)
for metric in metrics:
fig = plt.figure(figsize=(5, 4))
ax = fig.add_subplot(111)
fig.subplots_adjust(bottom=0.13, top=0.87, hspace=0.3, left=0.05, right=0.85, wspace=0)
if title is not None:
plt.title(title, fontsize=8)
if metric == 'loss':
plt.ylabel("log loss")
else:
plt.ylabel(metric)
plt.xlabel('training epoch')
train_seq = np.array([h.history[metric] for h in histories], dtype=object)
if len(train_seq.shape) == 2: # if there are the same number of epochs for each fold
n_epochs = train_seq.shape[1]
mean_train_seq = np.mean(train_seq, axis=0)
epochs = np.linspace(1, n_epochs, n_epochs, endpoint=True).astype(int)
# Plot training sequence:
if metric == 'loss':
train_seq = np.log10(train_seq)
mean_train_seq = np.log10(mean_train_seq)
line_train, = plt.plot(epochs[start_epoch:], mean_train_seq[start_epoch:], '-', c='darkred', linewidth=1,
zorder=9, label='TR')
if plot_folds:
plt.plot(epochs[start_epoch:], train_seq.T[start_epoch:], 'r-', linewidth=1, zorder=8, alpha=0.3)
if moving_avg:
ewma_train = ewma(mean_train_seq, beta=beta) # compute exponentially weighted moving averages
plt.plot(epochs[start_epoch:], ewma_train[start_epoch:], 'k-', linewidth=1, zorder=10)
if 'val_' + metric in histories[0].history:
val_seq = np.array([h.history['val_' + metric] for h in histories])
mean_val_seq = np.mean(val_seq, axis=0)
if metric == 'loss':
val_seq = np.log10(val_seq)
mean_val_seq = np.log10(mean_val_seq)
line_val, = plt.plot(epochs[start_epoch:], mean_val_seq[start_epoch:], 'g-', linewidth=1, zorder=11,
label='CV')
if plot_folds:
plt.plot(epochs[start_epoch:], val_seq.T[start_epoch:], 'g-', linewidth=1, zorder=12, alpha=0.3)
if moving_avg:
ewma_val = ewma(mean_val_seq, beta) # compute exponentially weighted moving averages
plt.plot(epochs[start_epoch:], ewma_val[start_epoch:], 'k-', linewidth=1, zorder=12)
plt.legend(handles=[line_train, line_val], loc='upper left')
else:
plt.legend(handles=[line_train], loc='upper left')
else: # if each fold can have a different number of epochs
# Plot training sequence:
for tseq in train_seq:
if metric == 'loss':
tseq = np.log10(np.array(tseq))
else:
tseq = np.array(tseq)
epochs = np.linspace(1, len(tseq), len(tseq), endpoint=True).astype(int)
line_train, = plt.plot(epochs[start_epoch:], tseq[start_epoch:], 'r-', linewidth=1,
zorder=8, alpha=0.5, label='TR')
if 'val_' + metric in histories[0].history:
val_seq = np.array([h.history['val_' + metric] for h in histories], dtype=object)
for vseq in val_seq:
if metric == 'loss':
vseq = np.log10(np.array(vseq))
else:
vseq = np.array(vseq)
epochs = np.linspace(1, len(vseq), len(vseq), endpoint=True).astype(int)
line_val, = plt.plot(epochs[start_epoch:], vseq[start_epoch:], 'g-', linewidth=1,
zorder=8, alpha=0.5, label='CV')
plt.legend(handles=[line_train, line_val], loc='upper left')
else:
plt.legend(handles=[line_train], loc='upper left')
ax.tick_params(axis='both', direction='in', labelleft=False, labelright=True)
ax.yaxis.tick_right()
# if 'root_mean_squared_error' in metric:
# ax.set_ylim((0.1, 0.3))
# # plt.yticks(np.arange(0.0, 0.6, 0.1))
# if 'loss' in metric:
# ax.set_ylim((-0.2, 0.25))
# # ax.set_yticks(np.arange(0.9, 1.005, 0.005))
plt.grid(True)
plt.savefig(filename + metric + '.pdf', format='pdf')
plt.close(fig) | 5,332,254 |
def getSampleBandPoints(image, region, **kwargs):
"""
Function to perform sampling of an image over a given region, using ee.Image.samp;e(image, region, **kwargs)
Args:
image (ee.Image): an image to sample
region (ee.Geometry): the geometry over which to sample
Returns:
An ee.FeatureCollection of sampled points along with coordinates
"""
dargs = {
'numPixels': 1000,
'region': region
}
dargs.update(kwargs)
sample = image.sample(**dargs)
return sample | 5,332,255 |
def updateRIPCount(idx,RIPtracker,addRev=0,addFwd=0,addNonRIP=0):
"""Add observed RIP events to tracker by row."""
TallyRev = RIPtracker[idx].revRIPcount + addRev
TallyFwd = RIPtracker[idx].RIPcount + addFwd
TallyNonRIP = RIPtracker[idx].nonRIPcount + addNonRIP
RIPtracker[idx] = RIPtracker[idx]._replace(revRIPcount=TallyRev,RIPcount=TallyFwd,nonRIPcount=TallyNonRIP)
return RIPtracker | 5,332,256 |
def sync(dir_path, archive_pass, client_id, client_secret):
"""
archives all files(not dirs) in a given location and uploads them to onedrive
:param dir_path: str
path to directory with files desired to be synced
:param archive_pass:
archive password
:param client_id:
one drive client id
:param client_secret:
one drive secret
:return: None
"""
logger.info('synchronizing data to one drive (not downloading yet)')
client = onedrive_api.get_onedrive_client(client_id, client_secret)
archive_name = os.path.basename(dir_path)
archive_path = archiver.archive_files(dir_path, archive_name, archive_pass)
archive_cloud_name = archive_name + ".zip"
onedrive_api.upload(client, archive_path, archive_cloud_name)
backuper.backup_locally(archive_path) | 5,332,257 |
def serialize(obj):
""" Return a JSON-serializable representation of an object """
cls = obj.__class__
cls_name = cls.__name__
module_name = cls.__module__
serializer = None
if hasattr(obj, "to_serializable"):
# The object implements its own serialization
s = obj.to_serializable()
elif hasattr(obj, "__dict__"):
# Use the object's __dict__ if it's there
s = obj.__dict__
else:
# Use a custom serializer
serializer = _serializers.get((module_name, cls_name))
# If we don't have one, that's a problem
assert serializer is not None
# Apply the serializer to the object
s = serializer[0](obj)
# Do some sanity checks: we must be able to recreate
# an instance of this class during de-serialization
assert module_name and module_name != "__main__"
assert serializer is not None or hasattr(cls, "from_serializable")
# Return a serialization wrapper dict with enough info
# for deserialization
return dict(
__cls__=cls_name,
__module__=module_name,
__obj__=s
) | 5,332,258 |
def download_file(url, offset=0, filename='tmp', verbosity=True):
"""
Intended for simulating the wget linux command
:param url: The URL for the resource to be downloaded
:param offset: Number of bytes to be skipped
:param filename: Name of file where the content downloaded will be stored
:param verbosity: Boolean value that indicates the verbosity in logger
:return: None
"""
logger.setLevel(logging.DEBUG) if verbosity else logger.setLevel(logging.INFO)
headers = {'Range': "bytes=%s-" % offset, 'Accept': '*/*', 'Connection': 'keep-alive',
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) '
'Chrome/23.0.1271.64 Safari/537.11'}
logger.debug("Setting Range Header for HTTP Request")
if offset != 0:
logger.info("This download is being resumed")
req = urllib2.Request(url, headers=headers)
try:
logger.debug("Opening URL")
u = urllib2.urlopen(req)
to_download = int(u.info().getheaders("Content-Length")[0])
logger.debug("The program will download %s bytes" % to_download)
f = open(filename, 'ab') if offset != 0 else open(filename, 'wb')
logger.debug("The file is going to be downloaded with a block size of %s bytes" % BLOCK_SIZE)
buffer_ = u.read(BLOCK_SIZE)
downloaded = 0
while buffer_:
downloaded += len(buffer_)
logger.debug("%d %3.2f%%" % (downloaded, downloaded * 100. / to_download))
f.write(buffer_)
buffer_ = u.read(BLOCK_SIZE)
f.close()
logger.info("The download has finished")
return True
except HTTPError, e:
if e.code == 416:
logger.info("This file has been downloaded already")
except ValueError:
logger.exception("The string %s is not a valid url" % url)
return False | 5,332,259 |
def GenerateAggregatorReduceStore(emitter, registers, aggregators, result_type,
lhs_add, rhs_add, left, right, lhs, rhs,
results, results_stride):
"""Emit code that reduces 4 lane aggregators to 1 value, and stores them."""
if lhs_add:
left_offset = ReadParams(emitter, registers, lhs, left, 4)
left_offsets = Duplicate(emitter, registers, left, right, 4, left_offset)
else:
left_offsets = None
if rhs_add:
right_offset = ReadParams(emitter, registers, rhs, right, 4)
else:
right_offset = None
if result_type is 'float':
result_scale = DuplicateGeneralRegister(
emitter, registers, right, registers.MapParameter('result_scale'), 4)
else:
result_scale = None
emitter.EmitNewline()
emitter.EmitComment('Reduce rows.')
row_temps = []
for i in range(left):
row_temps.append(ReduceAggregators(emitter, registers, aggregators[
i * right:(i + 1) * right]))
if lhs_add:
emitter.EmitNewline()
emitter.EmitComment('Add lhs offsets to aggregated rows.')
for (row_temp, left_offset) in zip(row_temps, left_offsets):
emitter.EmitVAdd('s32', row_temp, row_temp, left_offset)
if rhs_add:
emitter.EmitNewline()
emitter.EmitComment('Add rhs offset to aggregated rows.')
for row_temp in row_temps:
emitter.EmitVAdd('s32', row_temp, row_temp, right_offset)
if result_type is 'float':
emitter.EmitNewline()
emitter.EmitComment('Convert to float. Multiply by result scale.')
for row_temp in row_temps:
emitter.EmitVCvt('f32', 's32', row_temp, row_temp)
for row_temp in row_temps:
emitter.EmitVMul('f32', row_temp, row_temp, result_scale)
emitter.EmitNewline()
emitter.EmitComment('Store reduced rows.')
for row_temp in row_temps:
emitter.EmitVStoreOffsetE(32, right, row_temp, results, results_stride) | 5,332,260 |
async def async_unload_entry(hass, config_entry):
"""Unload a config entry."""
controller_id = CONTROLLER_ID.format(
host=config_entry.data[CONF_CONTROLLER][CONF_HOST],
site=config_entry.data[CONF_CONTROLLER][CONF_SITE_ID]
)
controller = hass.data[DOMAIN].pop(controller_id)
return await controller.async_reset() | 5,332,261 |
def convert_1D_to_lists(file='j234420m4245_00615.1D.fits'):
"""
Convert 1D spectral data to lists suitable for putting into dataframes
and sending to the databases.
"""
from collections import OrderedDict
import astropy.io.fits as pyfits
from .. import utils
if not os.path.exists(file):
print('Spectrum file not found')
return False
im = pyfits.open(file)
obj_id = im[0].header['ID']
obj_root = im[0].header['TARGET']
if '.R30.' in file:
skip_columns = ['line', 'cont']
pref = 'spec1d_r30'
else:
skip_columns = []
pref = 'spec1d'
spectra = OrderedDict()
has_spectra = False
for gr in ['G102', 'G141', 'G800L']:
if gr in im:
has_spectra = True
sp = utils.GTable.read(file, hdu=gr)
prefix = '{0}_{1}_'.format(pref, gr.lower())
spd = {prefix+'id': obj_id, prefix+'root': obj_root}
for c in sp.colnames:
if c in skip_columns:
continue
spd[prefix+c] = sp[c].tolist()
spectra[gr.lower()] = spd
if has_spectra:
return spectra
else:
return False | 5,332,262 |
def get_farthest_three_shots(gps_shots):
"""get three shots with gps that are most far apart"""
areas = {}
for (i, j, k) in combinations(gps_shots, 3):
areas[(i, j, k)] = area(np.array(i.metadata.gps_position), np.array(j.metadata.gps_position), np.array(k.metadata.gps_position))
return max(areas.items(), key=operator.itemgetter(1))[0] | 5,332,263 |
def __parse_entry(entry_line):
"""Parse the SOFT file entry name line that starts with '^', '!' or '#'.
:param entry_line: str -- line from SOFT file
:returns: tuple -- type, value
"""
if entry_line.startswith("!"):
entry_line = sub(r"!\w*?_", '', entry_line)
else:
entry_line = entry_line.strip()[1:]
try:
entry_type, entry_name = [i.strip() for i in entry_line.split("=", 1)]
except ValueError:
entry_type = [i.strip() for i in entry_line.split("=", 1)][0]
entry_name = ''
return entry_type, entry_name | 5,332,264 |
def test_generate_validation_message():
"""Test for static method to generate a ValidationMessage
Test for a basic message without a kind set.
"""
message = {
'message': 'message',
'error': False
}
expected = {
'message': 'message',
'error': False,
'kind': 'ValidationMessage',
'level': 'Info',
'documents': [],
'diagnostic': None,
'name': None,
'source': None
}
generated = configdocs_helper._generate_validation_message(message)
assert generated == expected | 5,332,265 |
def _mut_insert_is_applied(original, mutated):
""" Checks if mutation was caused by `mut_insert`.
:param original: the pre-mutation individual
:param mutated: the post-mutation individual
:return: (bool, str). If mutation was caused by function, True. False otherwise.
str is a message explaining why mutation is not caused by function.
"""
if len(list(original.primitives)) >= len(list(mutated.primitives)):
return (
False,
"Number of primitives should be strictly greater, was {} is {}.".format(
len(list(original.primitives)), len(list(mutated.primitives))
),
)
return True, None | 5,332,266 |
def add_base(this, base):
""" Add a base class to a class.
:params class this: a class instance
:params class base: a class to add as a base class
"""
cls = this.__class__
namespace = this.__class__.__dict__.copy()
this.__class__ = cls.__class__(cls.__name__, (cls, base), namespace)
base().__init__() | 5,332,267 |
def remap_ids(
mapping_table: Dict[Any, int] = {}, default: int = 0, dtype: DTypes = "i"
) -> Model[InT, OutT]:
"""Remap string or integer inputs using a mapping table, usually as a
preprocess before embeddings. The mapping table can be passed in on input,
or updated after the layer has been created. The mapping table is stored in
the "mapping_table" attribute.
"""
return Model(
"remap_ids",
forward,
attrs={"mapping_table": mapping_table, "dtype": dtype, "default": default},
) | 5,332,268 |
def test_ant():
"""
target_files = [
"tests/apache-ant/main/org/apache/tools/ant/types/ArchiveFileSet.java",
"tests/apache-ant/main/org/apache/tools/ant/types/TarFileSet.java",
"tests/apache-ant/main/org/apache/tools/ant/types/ZipFileSet.java"
]
"""
ant_dir = "tests/apache-ant-1-7-0"
print("Success!" if pushdown_field(
utils.get_filenames_in_dir(ant_dir),
"org.apache.tools.ant.types",
"ArchiveFileSet",
"src",
[],
lambda x: "tests/pushdown_field_ant/" + x[len(ant_dir):]
) else "Cannot refactor.") | 5,332,269 |
def test_fold_along_delay_mismatched_uncertainty_shape():
"""Test fold_along_delay errors if inputs are a different sizes."""
delays = np.arange(20) * units.s
array = np.ones((1, 10, 20)) * units.mK ** 2 * units.Mpc ** 3
errs = np.ones((1, 10, 21)) * units.mK ** 2 * units.Mpc ** 3
axis = 2
pytest.raises(ValueError, utils.fold_along_delay, delays, array, errs, axis=axis) | 5,332,270 |
def list_repos_command():
"""List source code repos that have been discovered by the checkoutproject command."""
__author__ = "Shawn Davis <shawn@develmaycare.com>"
__date__ = "2017-03-20"
__help__ = """FILTERING
Use the -f/--filter option to by most project attributes:
- name (partial, case insensitive)
- project
- host (bitbucket, bb, github, gh)
- type (git, hg, svn)
- user
"""
__version__ = "0.2.1-d"
# Define options and arguments.
parser = ArgumentParser(description=__doc__, epilog=__help__, formatter_class=RawDescriptionHelpFormatter)
parser.add_argument(
"-a",
"--all",
action="store_true",
dest="show_all",
help="List all (even remote) repos."
)
parser.add_argument(
"-f=",
"--filter=",
action="append",
dest="criteria",
help="Specify filter in the form of key:value. This may be repeated. Use ? to list available values."
)
# parser.add_argument(
# "--hold",
# action="store_true",
# dest="list_on_hold",
# help="Only list projects that are on hold."
# )
# parser.add_argument(
# "-p=",
# "--path=",
# default=PYPROJECTUTILS_CONFIG,
# dest="pyprojectutils_config",
# help="Path to where repo meta data is stored. Defaults to %s" % PYPROJECTUTILS_CONFIG
# )
# Access to the version number requires special consideration, especially
# when using sub parsers. The Python 3.3 behavior is different. See this
# answer: http://stackoverflow.com/questions/8521612/argparse-optional-subparser-for-version
# parser.add_argument('--version', action='version', version='%(prog)s 2.0')
parser.add_argument(
"-v",
action="version",
help="Show version number and exit.",
version=__version__
)
parser.add_argument(
"--version",
action="version",
help="Show verbose version information and exit.",
version="%(prog)s" + " %s %s by %s" % (__version__, __date__, __author__)
)
# Parse arguments. Help, version, and usage errors are automatically handled.
args = parser.parse_args()
# print args
# TODO: Get the path to where repo meta data is stored.
path = REPO_META_PATH
# Capture (and validate) filtering options.
criteria = dict()
if args.criteria:
for c in args.criteria:
# We need to test for the proper format of the each filter given.
try:
key, value = c.split(":")
except ValueError:
print_warning('Filter must be given in "key:value" format: %s' % c)
sys.exit(EXIT_INPUT)
# TODO: Handle requests to display available values by which filtering may occur. Otherwise, set criteria.
# if value == "?":
# print(key)
# print("-" * 80)
#
# d = get_distinct_project_attributes(key, path=project_home)
# for name, count in d.items():
# print("%s (%s)" % (name, count))
#
# print("")
#
# sys.exit(EXIT_OK)
# else:
# criteria[key] = value
criteria[key] = value
# Print the report heading.
heading = "Repos"
if "type" in criteria:
heading += " (%s)" % criteria['type']
print("=" * 130)
print(heading)
print("=" * 130)
# Print the column headings.
print(
"%-30s %-30s %-5s %-15s %-20s %-15s %-8s %-5s"
% ("Name", "Project", "Type", "Host", "User", "Private", "Location", "")
)
print("-" * 130)
# Print the rows.
repos, errors = get_repos(criteria=criteria, path=path, show_all=args.show_all)
if len(repos) == 0:
print("")
print("No results.")
sys.exit(EXIT_OK)
error_count = 0
for r in repos:
if len(r.name) > 30:
name = r.name[:27] + "..."
else:
name = r.name
if r.project:
project_title = r.project.truncated_title()
else:
project_title = r.name
if r.has_error:
error = "(e)"
error_count += 1
else:
error = ""
if r.is_private in (True, "True", "yes"):
private = "yes"
else:
private = "no"
print(
"%-30s %-30s %-5s %-15s %-20s %-15s %-8s %-5s"
% (name, project_title, r.type, r.host, r.user, private, r.location, error)
)
if len(repos) == 1:
label = "result"
else:
label = "results"
print("-" * 130)
print("")
print("%s %s." % (len(repos), label))
if error_count >= 1:
print("(e) indicates an error.")
if len(errors) > 0:
print("\n".join(errors))
# Quit.
sys.exit(EXIT_OK) | 5,332,271 |
def get_text(cell):
""" get stripped text from a BeautifulSoup td object"""
return ''.join([x.strip() + ' ' for x in cell.findAll(text=True)]).strip() | 5,332,272 |
async def test_dispense_implementation(
decoy: Decoy,
mock_cmd_handlers: CommandHandlers,
) -> None:
"""A PickUpTipRequest should have an execution implementation."""
location = WellLocation(origin=WellOrigin.BOTTOM, offset=(0, 0, 1))
request = DispenseRequest(
pipetteId="abc",
labwareId="123",
wellName="A3",
wellLocation=location,
volume=50,
)
decoy.when(
await mock_cmd_handlers.pipetting.dispense(
pipette_id="abc",
labware_id="123",
well_name="A3",
well_location=location,
volume=50,
)
).then_return(42)
impl = request.get_implementation()
result = await impl.execute(mock_cmd_handlers)
assert result == DispenseResult(volume=42) | 5,332,273 |
def main(api_endpoint, credentials, project_id,
device_model_id, device_id, device_config, lang, verbose,
input_audio_file, output_audio_file,
audio_sample_rate, audio_sample_width,
audio_iter_size, audio_block_size, audio_flush_size,
grpc_deadline, once, *args, **kwargs):
"""Samples for the Google Assistant API.
Examples:
Run the sample with microphone input and speaker output:
$ python -m googlesamples.assistant
Run the sample with file input and speaker output:
$ python -m googlesamples.assistant -i <input file>
Run the sample with file input and output:
$ python -m googlesamples.assistant -i <input file> -o <output file>
"""
# Setup logging.
logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO)
# Load OAuth 2.0 credentials.
try:
with open(credentials, 'r') as f:
credentials = google.oauth2.credentials.Credentials(token=None,
**json.load(f))
http_request = google.auth.transport.requests.Request()
credentials.refresh(http_request)
except Exception as e:
logging.error('Error loading credentials: %s', e)
logging.error('Run google-oauthlib-tool to initialize '
'new OAuth 2.0 credentials.')
sys.exit(-1)
# Create an authorized gRPC channel.
grpc_channel = google.auth.transport.grpc.secure_authorized_channel(
credentials, http_request, api_endpoint)
logging.info('Connecting to %s', api_endpoint)
# Configure audio source and sink.
audio_device = None
if input_audio_file:
audio_source = audio_helpers.WaveSource(
open(input_audio_file, 'rb'),
sample_rate=audio_sample_rate,
sample_width=audio_sample_width
)
else:
audio_source = audio_device = (
audio_device or audio_helpers.SoundDeviceStream(
sample_rate=audio_sample_rate,
sample_width=audio_sample_width,
block_size=audio_block_size,
flush_size=audio_flush_size
)
)
if output_audio_file:
audio_sink = audio_helpers.WaveSink(
open(output_audio_file, 'wb'),
sample_rate=audio_sample_rate,
sample_width=audio_sample_width
)
else:
audio_sink = audio_device = (
audio_device or audio_helpers.SoundDeviceStream(
sample_rate=audio_sample_rate,
sample_width=audio_sample_width,
block_size=audio_block_size,
flush_size=audio_flush_size
)
)
# Create conversation stream with the given audio source and sink.
conversation_stream = audio_helpers.ConversationStream(
source=audio_source,
sink=audio_sink,
iter_size=audio_iter_size,
sample_width=audio_sample_width,
)
device_handler = device_helpers.DeviceRequestHandler(device_id)
@device_handler.command('action.devices.commands.OnOff')
def onoff(on):
if on:
logging.info('Turning device on')
else:
logging.info('Turning device off')
if not device_id or not device_model_id:
try:
with open(device_config) as f:
device = json.load(f)
device_id = device['id']
device_model_id = device['model_id']
except Exception as e:
logging.warning('Device config not found: %s' % e)
logging.info('Registering device')
if not device_model_id:
logging.error('Option --device-model-id required '
'when registering a device instance.')
sys.exit(-1)
if not project_id:
logging.error('Option --project-id required '
'when registering a device instance.')
sys.exit(-1)
device_base_url = (
'https://%s/v1alpha2/projects/%s/devices' % (api_endpoint,
project_id)
)
device_id = str(uuid.uuid1())
payload = {
'id': device_id,
'model_id': device_model_id
}
session = google.auth.transport.requests.AuthorizedSession(
credentials
)
r = session.post(device_base_url, data=json.dumps(payload))
if r.status_code != 200:
logging.error('Failed to register device: %s', r.text)
sys.exit(-1)
logging.info('Device registered: %s', device_id)
os.makedirs(os.path.dirname(device_config), exist_ok=True)
with open(device_config, 'w') as f:
json.dump(payload, f)
with SampleAssistant(lang, device_model_id, device_id,
conversation_stream,
grpc_channel, grpc_deadline,
device_handler) as assistant:
# If file arguments are supplied:
# exit after the first turn of the conversation.
if input_audio_file or output_audio_file:
assistant.assist()
return
# If no file arguments supplied:
# keep recording voice requests using the microphone
# and playing back assistant response using the speaker.
# When the once flag is set, don't wait for a trigger. Otherwise, wait.
wait_for_user_trigger = not once
while True:
if wait_for_user_trigger:
click.pause(info='Press Enter to send a new request...')
continue_conversation, quiz, continue_quiz = assistant.assist(False, True)
else:
continue_conversation2, quiz2, continue_quiz2 = assistant.assist(quiz, continue_quiz)
# wait for user trigger if there is no follow-up turn in
# the conversation.
wait_for_user_trigger = not continue_conversation
# If we only want one conversation, break.
if once and (not continue_conversation):
break | 5,332,274 |
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Train a Fast R-CNN network')
parser.add_argument('--dataset', dest='dataset',
help='training dataset',
default='pascal_voc', type=str)
parser.add_argument('--net', dest='net',
help='vgg16, res101',
default='vgg16', type=str)
parser.add_argument('--start_epoch', dest='start_epoch',
help='starting epoch',
default=1, type=int)
parser.add_argument('--epochs', dest='max_epochs',
help='number of epochs to train',
default=20, type=int)
parser.add_argument('--disp_interval', dest='disp_interval',
help='number of iterations to display',
default=100, type=int)
parser.add_argument('--checkpoint_interval', dest='checkpoint_interval',
help='number of iterations to display',
default=10000, type=int)
parser.add_argument('--save_dir', dest='save_dir',
help='directory to save models', default="models",
type=str)
parser.add_argument('--nw', dest='num_workers',
help='number of worker to load data',
default=0, type=int)
parser.add_argument('--cuda', dest='cuda',
help='whether use CUDA',
action='store_true')
parser.add_argument('--ls', dest='large_scale',
help='whether use large imag scale',
action='store_true')
parser.add_argument('--mGPUs', dest='mGPUs',
help='whether use multiple GPUs',
action='store_true')
parser.add_argument('--bs', dest='batch_size',
help='batch_size',
default=1, type=int)
parser.add_argument('--cag', dest='class_agnostic',
help='whether perform class_agnostic bbox regression',
action='store_true')
# config optimization
parser.add_argument('--o', dest='optimizer',
help='training optimizer',
default="sgd", type=str)
parser.add_argument('--lr', dest='lr',
help='starting learning rate',
default=0.001, type=float)
parser.add_argument('--lr_decay_step', dest='lr_decay_step',
help='step to do learning rate decay, unit is epoch',
default=5, type=int)
parser.add_argument('--lr_decay_gamma', dest='lr_decay_gamma',
help='learning rate decay ratio',
default=0.1, type=float)
# set training session
parser.add_argument('--s', dest='session',
help='training session',
default=1, type=int)
# resume trained model
parser.add_argument('--r', dest='resume',
help='resume checkpoint or not',
action='store_true')
parser.add_argument('--checksession', dest='checksession',
help='checksession to load model',
default=1, type=int)
parser.add_argument('--checkepoch', dest='checkepoch',
help='checkepoch to load model',
default=1, type=int)
parser.add_argument('--checkpoint', dest='checkpoint',
help='checkpoint to load model',
default=0, type=int)
# log and diaplay
parser.add_argument('--use_tfb', dest='use_tfboard',
help='whether use tensorboard',
action='store_true')
parser.add_argument('--name', dest='name',
help='name of models', default="faster_rcnn_curr.pth",
type=str)
parser.add_argument('--mm', dest='mimic',
help='whether perform mimicking',
action='store_true')
parser.add_argument('--layers', dest='layers',
help='tiny network layers',
default=101, type=int)
parser.add_argument('--save_model', dest='save_model',
help='name to save', default="my_faster_rcnn_curr.pth",
type=str)
parser.add_argument('--recall', dest='evl_rec',
help='whether evaluate recall',
action='store_true')
parser.add_argument('--decouple', dest='decouple',
help='whether to use decouple roi pooling',
action='store_true')
parser.add_argument('--scale', dest='scale',
help='scale of sigma with respect to ROI',
default=1.0, type=float)
args = parser.parse_args()
return args | 5,332,275 |
def test_augmentation(text, text_lengths, augmentation_class):
"""
test_augmentation method is written for augment input text in evaluation
:param text: input text
:param text_lengths: text length
:param augmentation_class: augmentation class
:return:
"""
augmentation_text = augmentation_class.test_augment(text, text_lengths)
augmentation_text.append(text)
augmentation_text = torch.FloatTensor(augmentation_text).long()
return augmentation_text | 5,332,276 |
def selfintersection(linear_ring: Points):
"""
not support warp polygon.
"""
validate.linear_ring(linear_ring)
if len(linear_ring) == 4:
return (
abs(
linear_ring[0][1] * (linear_ring[1][0] - linear_ring[2][0])
+ linear_ring[1][1] * (linear_ring[2][0] - linear_ring[0][0])
+ linear_ring[2][1] * (linear_ring[0][0] - linear_ring[1][0])
)
< EPSILON
)
lines = [[linear_ring[i], linear_ring[i + 1]] for i in range(len(linear_ring) - 1)]
def check(lines, start=0):
if start + 2 >= len(lines):
return False
l1 = lines[start]
endIndex = len(lines) - 1 if start == 0 else len(lines)
for i in range(start + 2, endIndex):
l2 = lines[i]
if intersection(*l1, *l2):
return True
return check(lines, start + 1)
return check(lines) | 5,332,277 |
def ToMercPosition(lat_deg, num_tiles):
"""Calculate position of a given latitude on qt grid.
LOD is log2(num_tiles)
Args:
lat_deg: (float) Latitude in degrees.
num_tiles: (integer) Number of tiles in the qt grid.
Returns:
Floating point position of latitude in tiles relative to equator.
"""
lat_rad = lat_deg / 180.0 * math.pi
y_merc = math.log(math.tan(lat_rad / 2.0 + math.pi / 4.0))
return num_tiles / 2.0 * (1 + y_merc / math.pi) | 5,332,278 |
def test_list_disks():
"""Validate disk list"""
assert False | 5,332,279 |
def conv3x3(in_planes, out_planes, stride=1, dilation=1, groups=1, bias=False):
"""2D 3x3 convolution.
Args:
in_planes (int): number of input channels.
out_planes (int): number of output channels.
stride (int): stride of the operation.
dilation (int): dilation rate of the operation.
groups (int): number of groups in the operation.
bias (bool): whether to add learnable bias parameter.
Returns:
`nn.Conv2d' instance.
"""
return nn.Conv2d(
in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=dilation,
dilation=dilation,
groups=groups,
bias=bias) | 5,332,280 |
def assert_dataframes_equal(abseil_testcase_instance,
actual,
expected,
sort_by_column=None,
nan_equals_nan=False):
"""Assert dataframes equal up to reordering of columns and rows.
Supports non-indexable datatypes in fields, like `set` and np.ndarray.
Args:
abseil_testcase_instance: absltest.TestCase (or parameterized.TestCase).
E.g. pass 'self' from within an absltest.TestCase.
actual: pd.DataFrame.
expected: pd.DataFrame.
sort_by_column: optional string name of a column. This column must be
sortable (e.g. an int, not an np.array).
nan_equals_nan: bool. If true, then allow nan == nan.
"""
abseil_testcase_instance.assertEqual(
len(actual), len(expected),
'Lengths were not equal: {}\nand\n{}'.format(actual, expected))
potential_error_message = 'actual:\n{}\nexpected:\n{}'.format(
actual, expected)
abseil_testcase_instance.assertSetEqual(
set(actual.columns), set(expected.columns), potential_error_message)
if len(set(actual.columns)) == 0: # pylint: disable=g-explicit-length-test
# Both dataframes are empty.
return
# Sort rows of DFs in same way, based on just one of the columns.
if sort_by_column:
actual = actual.sort_values(by=sort_by_column)
expected = expected.sort_values(by=sort_by_column)
actual_records = actual.to_dict('records')
expected_records = expected.to_dict('records')
for actual_record, expected_record in zip(actual_records, expected_records):
abseil_testcase_instance.assertCountEqual(actual_record.keys(),
expected_record.keys(),
potential_error_message)
for col_name in actual_record.keys():
actual_value = actual_record[col_name]
expected_value = expected_record[col_name]
if isinstance(actual_value, np.ndarray):
np.testing.assert_allclose(
actual_value, expected_value, err_msg=potential_error_message)
elif isinstance(actual_value, float) and np.isnan(actual_value):
if nan_equals_nan and np.isnan(actual_value) and np.isnan(
expected_value):
continue
else:
raise AssertionError(
actual_value, expected_value,
'Actual value is nan, and nan is not equal to anything. '
'{} != {}. {}'.format(actual_value, expected_value,
potential_error_message))
else:
abseil_testcase_instance.assertEqual(actual_value, expected_value,
potential_error_message) | 5,332,281 |
def extract_current_alarm(strValue):
"""抽取show alarm current命令的信息
Args:
strValue (str): show alarm current显示的信息
Returns:
list: 包含信息的字典
"""
# TODO: FIXME: 抽取告警信息没有实现
titleExpr = re.compile('\s*(Item Description)\s+(Code vOLT)\s+(Object)\s+(Begintime)\s+(Endtime)\s*')
valueExpr = re.compile('???')
lines = strValue.splitlines()
ret = [ ]
titles = None
for line in lines:
match = titleExpr.match(line)
if match != None:
titles = match.groups()
match = valueExpr.match(line)
if match != None:
values = match.groups()
ret.append({ })
for title, value in zip(titles, values):
ret[-1][title] = value
return ret | 5,332,282 |
def parse_hostnames(filename, hostnames):
"""Parses host names from a comma-separated list or a filename.
Fails if neither filename nor hostnames provided.
:param filename: filename with host names (one per line)
:type filename: string
:param hostnames: comma-separated list of host names
:type hostnames: string
:rtype: list of host names
"""
if bool(filename) == bool(hostnames):
die('Please specify either --filename or --hosts')
if filename:
hostnames = _parse_hostname_file(filename)
elif hostnames:
hostnames = _parse_hostname_list(hostnames)
if not hostnames:
die('No valid hosts found.')
return hostnames | 5,332,283 |
def ssh(host, command, stdin=None):
"""Run 'command' (list) on 'host' via ssh.
stdin is an string to send."""
return run([*SSH_COMMAND, ssh_user_host(host), *command], stdin=stdin) | 5,332,284 |
def getScope(parameters, nonConstantParameters):
"""
Asks user to define the ranges for the parameters.
The parameters can be constant aswell, in that case, the user will only define one value.
:param parameters: List containing all parameters, the ranges will be added to each parameter inside this list
:param nonConstantParameters: Amount of non constant parameters.
:return:
"""
found = False
for parameter in parameters:
print("please provide a range and resolution for ", parameter.name, " (min:max:step):")
scope = []
while len(scope) != 1 and len(scope) != 3: #the number of arguments can only be either 1 or 3
string = input()
try:
scope = (list(map(float, string.split(":"))))
except:
pass
if len(scope) != 1 and len(scope) != 3:
print("invalid amount of arguments")
parameter.scope = scope
if len(parameter.scope) == 1:
print("parameter will be kept constant at:", parameter.scope[0])
else:
nonConstantParameters[0] += 1
found = True
# all parameters are constant
# if found == False:
# for parameter in parameters:
# parameter.axis[0] = parameter.scope[0] | 5,332,285 |
def start_south_north(clean_setup_foglamp_packages, add_south, start_north_pi_server_c_web_api, remove_data_file,
foglamp_url, pi_host, pi_port, pi_admin, pi_passwd, asset_name=ASSET):
""" This fixture
clean_setup_foglamp_packages: purge the foglamp* packages and install latest for given repo url
add_south: Fixture that adds a south service with given configuration
start_north_pi_server_c_web_api: Fixture that starts PI north task
remove_data_file: Fixture that remove data file created during the tests """
# Define the template file for fogbench
fogbench_template_path = os.path.join(
os.path.expandvars('${FOGLAMP_ROOT}'), 'data/{}'.format(TEMPLATE_NAME))
with open(fogbench_template_path, "w") as f:
f.write(
'[{"name": "%s", "sensor_values": '
'[{"name": "%s", "type": "number", "min": %d, "max": %d, "precision": 0}]}]' % (
asset_name, DATAPOINT, DATAPOINT_VALUE, DATAPOINT_VALUE))
south_plugin = "coap"
# south_branch does not matter as these are archives.dianomic.com version install
add_south(south_plugin, None, foglamp_url, service_name="CoAP FOGL-2964", installation_type='package')
start_north_pi_server_c_web_api(foglamp_url, pi_host, pi_port, pi_user=pi_admin, pi_pwd=pi_passwd)
yield start_south_north
# Cleanup code that runs after the caller test is over
remove_data_file(fogbench_template_path) | 5,332,286 |
def _bgzip_file(finput, config, work_dir, needs_bgzip, needs_gunzip, needs_convert, data):
"""Handle bgzip of input file, potentially gunzipping an existing file.
Handles cases where finput might be multiple files and need to be concatenated.
"""
if isinstance(finput, six.string_types):
in_file = finput
else:
assert not needs_convert, "Do not yet handle quality conversion with multiple inputs"
return _bgzip_multiple_files(finput, work_dir, data)
out_file = os.path.join(work_dir, os.path.basename(in_file).replace(".bz2", "") +
(".gz" if not in_file.endswith(".gz") else ""))
if not utils.file_exists(out_file):
with file_transaction(config, out_file) as tx_out_file:
bgzip = tools.get_bgzip_cmd(config)
is_remote = objectstore.is_remote(in_file)
in_file = objectstore.cl_input(in_file, unpack=needs_gunzip or needs_convert or
needs_bgzip or dd.get_trim_ends(data))
if needs_convert or dd.get_trim_ends(data):
in_file = fastq_convert_pipe_cl(in_file, data)
if needs_gunzip and not (needs_convert or dd.get_trim_ends(data)):
if in_file.endswith(".bz2"):
gunzip_cmd = "bunzip2 -c {in_file} |".format(**locals())
else:
gunzip_cmd = "gunzip -c {in_file} |".format(**locals())
bgzip_in = "/dev/stdin"
else:
gunzip_cmd = ""
bgzip_in = in_file
if needs_bgzip:
do.run("{gunzip_cmd} {bgzip} -c {bgzip_in} > {tx_out_file}".format(**locals()),
"bgzip input file")
elif is_remote:
bgzip = "| bgzip -c" if (needs_convert or dd.get_trim_ends(data)) else ""
do.run("cat {in_file} {bgzip} > {tx_out_file}".format(**locals()), "Get remote input")
else:
raise ValueError("Unexpected inputs: %s %s %s %s" % (in_file, needs_bgzip,
needs_gunzip, needs_convert))
return out_file | 5,332,287 |
def test_event_add_maintenance_view_get(client, authenticated_user):
"""test the get method on add maintenance event view"""
response = client.get("/add-maintenance-event")
assert response.status_code == 200
assert "add_maintenance_form" in response.context
assertTemplateUsed("event/add_maintenance_event_form.html") | 5,332,288 |
def parse_period(data, out):
"""parse the period int-string into int (OT => 4)"""
period = data['period']
period.replace('OT', '4', inplace=True)
log.debug(period.astype(int))
out['per'] = period.astype(int) | 5,332,289 |
def cart_step1_choose_type_of_order(request):
"""
This view is not login required because we want to display some summary of
ticket prices here as well.
"""
special_fares = get_available_fares_for_type(TicketType.other)
context = {"show_special": bool(special_fares)}
return TemplateResponse(
request, "conference/cart/step_1_choose_type_of_order.html", context
) | 5,332,290 |
def buildAndTrainModel(model, learningRate, batchSize, epochs, trainingData, validationData, testingData, trainingLabels, validationLabels, testingLabels, MODEL_NAME, isPrintModel=True):
"""Take the model and model parameters, build and train the model"""
# Build and compile model
# To use other optimizers, refer to: https://keras.io/optimizers/
# Please do not change the loss function
optimizer = tf.keras.optimizers.Adam(lr=learningRate)
model.compile(optimizer=optimizer,
loss=tf.keras.losses.MeanSquaredError())
if isPrintModel:
print(model.summary())
for epoch in range(0, epochs):
model.fit(trainingData, trainingLabels,
epochs=1,
verbose=0,
batch_size=batchSize,
shuffle=False)
# Evaluate model
valLoss = model.evaluate(validationData, validationLabels, verbose=False)
#model.save('Results/StructuredBinary/{}/epoch_{}'.format(filename,epoch))
## get metrics
predictions = model.predict(testingData)
MSE, MAE, MAPE, RMSE, PR = getMetrics(testingLabels,predictions)
MeanSquaredError.append(MSE)
RootMeanSquaredError.append(RMSE)
MeanAbsoluteError.append(MAE)
MeanAbsolutePercentageError.append(MAPE)
PearsonR.append(PR)
ValMSE.append(valLoss)
Epoch.append(epoch)
if valLoss <= min(ValMSE):
max_predictions = predictions
return MeanSquaredError, RootMeanSquaredError, MeanAbsoluteError, MeanAbsolutePercentageError, ValMSE, PearsonR, Epoch, max_predictions | 5,332,291 |
def vflip():
"""Toggle vertical flipping of camera image."""
# Catch ajax request with form data
vflip_val = 'error'
if request.method == 'POST':
vflip_val = request.form.get('vflip')
if vflip_val is not None:
app.logger.info('Form brightness submitted: %s', vflip_val)
camera.set_vflip(vflip_val == 'true')
return {'brightness': vflip_val} | 5,332,292 |
def draw_map(console: 'Curses_Window', map: '2D_Numpy_Array', px: 'float', py: 'float', depth: 'integer'):
"""This method is used to draw the map, coming from the 2D_Numpy_Array in the console,
Args:
console (Curses_Window): A window defined using the curses library
map (2D_Numpy_Array): 2D Array representation of the map
px (float): Playe's x position
py (float): Playe's y position
depth (integer): Player's depth of vision
"""
x_min = max(int(px) - depth, 0)
x_max = min(int(px) + depth, len(map))
y_min = max(int(py) - depth, 0)
y_max = min(int(py) + depth, len(map[0]))
for j in range(y_min, y_max):
map_str = ''
for i in range(x_min, x_max):
if map[i, j] == 1:
# Draw a wall
map_str += u'\u2590'
elif map[i, j] == 2:
# Draw player
map_str += u'\u25C8'
else:
# Draw empty space
map_str += ' '
if j != len(map[0]) - 1:
map_str += '\n'
console.addstr(j - y_min, 0, map_str, curses.color_pair(17)) | 5,332,293 |
def getGoalHistogramData(responses):
"""
Goal Completion histogram chart on project detail page.
Return: {obj} Counts and % of each Goal Completion rating across given responses.
"""
try:
snapshotResponses = responses.exclude(Q(primary_goal__isnull=True) | Q(primary_goal__name=''))
respsnapshotResponsesCount = snapshotResponses.count()
# Get unique list of primary goals and count each primary goal occurance.
# Then clean up names and change counts to percents
goals = list(snapshotResponses.values(goalName=F('primary_goal__name')).annotate(goalTotal=Count('primary_goal')).order_by('-goalTotal'))
# For each unique goal and count found:
for goal in goals:
goalResponses = snapshotResponses.filter(primary_goal__name=goal['goalName']).select_related('goal_completed')
responseYesCount = goalResponses.filter(goal_completed__name__iexact='yes').count()
responsePartiallyCount = goalResponses.filter(goal_completed__name__iexact='yartially').count()
responseNoCount = goalResponses.filter(goal_completed__name__iexact='no').count()
goal['Yes'] = responseYesCount
goal['Partially'] = responsePartiallyCount
goal['No'] = responseNoCount
goal['YesPercent'] = round((responseYesCount/goal['goalTotal'])*100)
goal['NoPercent'] = round((responseNoCount/goal['goalTotal'])*100)
goal['PartiallyPercent'] = round((responsePartiallyCount/goal['goalTotal'])*100)
goal['goalName'] = goal['goalName'].replace('_',' ').capitalize()
goal['goalPercent'] = round((goal['goalTotal']/respsnapshotResponsesCount)*100)
except Exception as ex:
goals = None
#print(json.dumps(data, indent=2))
return goals | 5,332,294 |
def hookes_law(receiver_nodes, sender_nodes, k, x_rest):
"""Applies Hooke's law to springs connecting some nodes.
Args:
receiver_nodes: Ex5 tf.Tensor of [x, y, v_x, v_y, is_fixed] features for the
receiver node of each edge.
sender_nodes: Ex5 tf.Tensor of [x, y, v_x, v_y, is_fixed] features for the
sender node of each edge.
k: Spring constant for each edge.
x_rest: Rest length of each edge.
Returns:
Nx2 Tensor of the force [f_x, f_y] acting on each edge.
"""
diff = receiver_nodes[..., 0:2] - sender_nodes[..., 0:2]
x = tf.norm(diff, axis=-1, keepdims=True)
force_magnitude = tf.multiply(k, (x - x_rest) / x)
force = -1 * force_magnitude * diff
return force | 5,332,295 |
def test_make_inverse_operator():
"""Test MNE inverse computation (precomputed and non-precomputed)
"""
# Test old version of inverse computation starting from forward operator
evoked = _get_evoked()
noise_cov = read_cov(fname_cov)
inverse_operator = read_inverse_operator(fname_inv)
fwd_op = read_forward_solution_meg(fname_fwd, surf_ori=True)
my_inv_op = make_inverse_operator(evoked.info, fwd_op, noise_cov,
loose=0.2, depth=0.8,
limit_depth_chs=False)
_compare_io(my_inv_op)
assert_true(inverse_operator['units'] == 'Am')
_compare_inverses_approx(my_inv_op, inverse_operator, evoked, 1e-2, 1e-2,
check_depth=False)
# Test MNE inverse computation starting from forward operator
my_inv_op = make_inverse_operator(evoked.info, fwd_op, noise_cov,
loose=0.2, depth=0.8)
_compare_io(my_inv_op)
_compare_inverses_approx(my_inv_op, inverse_operator, evoked, 1e-2, 1e-2)
assert_true('dev_head_t' in my_inv_op['info'])
assert_true('mri_head_t' in my_inv_op) | 5,332,296 |
def check_output_filepath(filepath):
"""
Check and return an appropriate output_filepath parameter.
Ensures the file is a csv file. Ensures a value is set. If
a value is not set or is not a csv, it will return a
default value.
:param filepath: string filepath name
:returns: a string representing a filepath location.
"""
if filepath.endswith('.csv'):
return filepath
return "clean_rules_report.csv" | 5,332,297 |
def mock_open_url(url, allow_local=False, timeout=None, verify_ssl=True, http_headers=None):
"""Open local files instead of URLs.
If it's a local file path, leave it alone; otherwise,
open as a file under ./files/
This is meant as a side effect for unittest.mock.Mock
"""
if re.match(r'https?:', url):
# Looks like a URL
filename = re.sub(r'^.*/([^/]+)$', '\\1', url)
path = resolve_path('files/mock/' + filename)
else:
# Assume it's a file
path = url
return (open(path, 'rb'), None, None, None) | 5,332,298 |
def compile_and_install(source, target, platform, arch, flag_mapping):
"""Compiles the given source directories into the mapped target directories.
- source directory must be configure/make/make install-able
- target is the directory where the products will be installed
- platform directory is searched for 'CONFIG_NAME' files and patches with
the same name as the source directory plus ".patch"
- arch is the architecture to build for
- flag_mapping may contain additional flags that will be applied when
configuring
"""
global CURRENTLY_BUILDING
CURRENTLY_BUILDING = os.path.abspath(source)
current_dir = os.getcwd()
parent, module_name = os.path.split(source)
abs_prefix = os.path.abspath(target)
# apply patches and find the config script
config_script = None
for filename in os.listdir(platform):
if len(filename) > 6 and '.patch' == filename[-6:]:
apply_patch(os.path.join(platform, filename), source)
elif CONFIG_NAME == filename:
config_script = os.path.join(platform, filename)
# copy the config script to the source dir, if we have a script
has_conf = False
if os.path.isfile(config_script):
try:
shutil.copy2(config_script, os.path.join(source, CONFIG_NAME))
has_conf = True
except OSError:
pass
# prepare to configure
config_name = './%s' % CONFIG_NAME if has_conf else './configure'
config = [config_name, '-arch', arch, '--prefix=%s' % abs_prefix]
if has_conf and SDK_VERSION:
config.extend(['-sdk', SDK_VERSION])
if has_conf and platform in PLATFORM_NAMES:
config.extend(['-platform', PLATFORM_NAMES[platform]])
# find additional flags
poss_flags = flag_mapping[module_name] if module_name in flag_mapping else None
if poss_flags and '*' in poss_flags:
config.extend(poss_flags['*'])
if poss_flags and platform in poss_flags:
config.extend(poss_flags[platform])
if poss_flags and arch in poss_flags:
config.extend(poss_flags[arch])
os.chdir(source)
# configure
print "---> Configuring %s" % source
c = subprocess.Popen(config, stderr=subprocess.STDOUT, stdout=subprocess.PIPE)
out = c.communicate()[0]
if 0 != c.returncode:
os.chdir(current_dir)
_compile_failed(config, out)
sys.exit(1)
# make
print "---> Building %s" % source
m = subprocess.Popen(['make'], stderr=subprocess.STDOUT, stdout=subprocess.PIPE)
out = m.communicate()[0]
if 0 != m.returncode:
os.chdir(current_dir)
_compile_failed(config, out)
sys.exit(1)
# make install
print "---> Installing %s" % source
i = subprocess.Popen(['make', 'install'], stderr=subprocess.STDOUT, stdout=subprocess.PIPE)
out = i.communicate()[0]
if 0 != i.returncode:
os.chdir(current_dir)
_compile_failed(config, out)
sys.exit(1)
os.chdir(current_dir)
CURRENTLY_BUILDING = None
# help libtool by fixing dependency paths
if FIX_DEP_LIBS:
for fix_platform in FIX_DEP_LIBS:
if platform == fix_platform:
for fix_module in FIX_DEP_LIBS[platform]:
if module_name == fix_module:
fix_las = FIX_DEP_LIBS[platform][module_name]
# loop the .la-files we need to fix
for fix_la in fix_las:
pat_from, pat_to = fix_las[fix_la]
fix_path = os.path.join(target, 'lib')
fix = os.path.join(fix_path, fix_la)
# edit in-place
if os.path.exists(fix):
print shell_color('---> Fixing dependency_libs in %s' % fix, 'yellow')
for line in fileinput.input(fix, inplace=1):
# like this: https://dev.openwrt.org/attachment/ticket/2233/glib2-dependencies.patch
if line.startswith('dependency_libs'):
print re.sub(pat_from, pat_to, line),
else:
print line,
else:
print shell_color("I'm told to fix an .la file that does not exist: %s" % fix, 'red', True) | 5,332,299 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.