content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def bad_request(message):
"""Responde em forma de json o erro not found"""
print("Teste")
response = jsonify({'error':'bad request' , 'message' : message})
response.status_code = 400
| 22,600
|
def get_valid_identifier(prop, replacement_character='', allow_unicode=False):
"""Given a string property, generate a valid Python identifier
Parameters
----------
replacement_character: string, default ''
The character to replace invalid characters with.
allow_unicode: boolean, default False
If True, then allow Python 3-style unicode identifiers.
Examples
--------
>>> get_valid_identifier('my-var')
'myvar'
>>> get_valid_identifier('if')
'if_'
>>> get_valid_identifier('$schema', '_')
'_schema'
>>> get_valid_identifier('$*#$')
'_'
"""
# First substitute-out all non-valid characters.
flags = re.UNICODE if allow_unicode else re.ASCII
valid = re.sub('\W', replacement_character, prop, flags=flags)
# If nothing is left, use just an underscore
if not valid:
valid = '_'
# first character must be a non-digit. Prefix with an underscore
# if needed
if re.match('^[\d\W]', valid):
valid = '_' + valid
# if the result is a reserved keyword, then add an underscore at the end
if keyword.iskeyword(valid):
valid += '_'
return valid
| 22,601
|
def main() -> None:
"""
direct program entry point
"""
argp = argparse.ArgumentParser(
description="use regular expressions to rename files",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
argp.add_argument(
"-d",
"--debug",
action="store_true",
help="enable debugging output",
)
argp.add_argument(
"-v",
"--verbose",
action="store_true",
help="print a description of all files renamed or skipped",
)
argp.add_argument(
"-n",
"--dry-run",
action="store_true",
dest="dryrun",
help="don't actually rename anything, just print what would be renamed",
)
argp.add_argument(
"-f",
"--force",
action="store_true",
help="renaming will be performed even if the new filename will overwrite an existing file",
)
argp.add_argument(
"-i",
"--ignorecase",
action="store_true",
help="perform case-insensitive matching",
)
argp.add_argument(
"-m",
"--maxreplace",
type=int,
default=0,
help="the number of replacements to perform per filename; 0 == replace all instances",
)
argp.add_argument(
"-s",
"--search",
type=str,
required=True,
help="search pattern, a regular expression to apply to each filename",
)
argp.add_argument(
"-r",
"--replace",
type=str,
required=True,
help="replacement text, in python string.format-style",
)
argp.add_argument(
"path",
nargs="+",
type=pathlib.Path,
help="the paths of files to consider for renaming",
)
args = argp.parse_args()
# build up the flags for the search regex
regex_flags: int = 0
regex_flags |= re.IGNORECASE if args.ignorecase else 0
regex_flags |= re.DEBUG if args.debug else 0
# compile the regex
search = re.compile(args.search, regex_flags)
# create a partial wrapper around the replacer function so we can conveniently
# use it with re.subn
replace = functools.partial(
replacer,
args.replace if not args.replace.startswith("\\-") else args.replace[1:],
counter=Counter(),
debug=args.debug,
)
vprint = functools.partial(cond_print, enable=args.verbose)
# the operations list will hold our rename task objects so they can be
# reviewed by the user before we perform them
operations: List[Rename] = []
for path in args.path:
new_name, match_count = search.subn(replace, path.name, args.maxreplace)
if not match_count:
vprint("non-match " + str(path))
continue
if new_name == path.name:
vprint("unchanged " + str(path))
continue
operations.append(Rename(path, new_name))
if not operations:
print("Nothing to do")
sys.exit(0)
if args.dryrun:
for operation in operations:
operation.perform(dry_run=True, force=args.force, verbose=args.verbose)
sys.exit(0)
# print the list of operations to perform, ask if the user wants to proceed
print("\n".join([str(op) for op in operations]))
for _ in range(3):
response = input("Perform these operations? (y/N)? >").strip().lower()
if response == "y":
for operation in operations:
operation.perform(force=args.force, verbose=args.verbose)
sys.exit(0)
elif response in ("n", ""):
sys.exit(0)
else:
print("Invalid response")
sys.exit(1)
| 22,602
|
def temporal_padding(x, paddings=(1, 0), pad_value=0):
"""Pad the middle dimension of a 3D tensor
with `padding[0]` values left and `padding[1]` values right.
Modified from keras.backend.temporal_padding
https://github.com/fchollet/keras/blob/3bf913d/keras/backend/theano_backend.py#L590
"""
if not isinstance(paddings, (tuple, list, np.ndarray)):
paddings = (paddings, paddings)
output = T.zeros(x.size(0), x.size(1) + sum(paddings), x.size(2)).to(dev)
output[:, :paddings[0], :] = pad_value
output[:, paddings[1]:, :] = pad_value
output[:, paddings[0]: paddings[0]+x.size(1), :] = x
return output
| 22,603
|
def install_dmm_test(node):
"""Prepare the DMM test envrionment.
Raise errors when failed.
:param node: Dictionary created from topology.
:type node: dict
:returns: nothing.
:raises RuntimeError: If install dmm failed.
"""
arch = Topology.get_node_arch(node)
logger.console('Install DMM on {0} ({1})'.format(node['host'], arch))
ssh = SSH()
ssh.connect(node)
(ret_code, _, stderr) = ssh.exec_command(
'cd {0}/{1} && ./install_prereq.sh {2} 2>&1 | tee '
'log_install_prereq.txt'
.format(con.REMOTE_FW_DIR, con.DMM_SCRIPTS, arch), timeout=600)
if ret_code != 0:
logger.error('Install DMM error: {0}'.format(stderr))
raise RuntimeError('Install prereq failed')
else:
logger.console('Install DMM on {0} success!'.format(node['host']))
| 22,604
|
def get_args_and_hdf5_file(cfg):
"""
Assembles the command line arguments for training and the filename for the hdf5-file
with the results
:return: args, filename
"""
common_parameters = [
"--train:mode", "world",
"--train:samples", "256**3",
"--train:batchsize", "64*64*128",
"--train:sampler_importance", "0.01",
'--rebuild_dataset', '51',
"--val:copy_and_split",
"--outputmode", "density:direct",
"--lossmode", "density",
"--activation", BEST_ACTIVATION,
"-l1", "1",
"--lr_step", "100",
"-i", "500",
"--logdir", 'volnet/results/eval_TimeVolumetricFeatures/log',
"--modeldir", 'volnet/results/eval_TimeVolumetricFeatures/model',
"--hdf5dir", 'volnet/results/eval_TimeVolumetricFeatures/hdf5',
'--save_frequency', '50'
]
def getNetworkParameters(network):
channels, layers, params = network
return ["--layers", ':'.join([str(channels)] * (layers - 1))]
def getFourierParameters(network, fourier):
channels, layers, params = network
std, count = fourier
return ['--fouriercount', str(count), '--fourierstd', str(std)]
config, network, fourier, volumetric, time, filename = cfg
launcher = [sys.executable, "volnet/train_volnet.py"]
args = launcher + config + \
common_parameters + \
getNetworkParameters(network) + \
getFourierParameters(network, fourier) + \
volumetric + time + ['--name', filename]
hdf5_file = os.path.join(BASE_PATH, 'hdf5', filename + ".hdf5")
return args, hdf5_file, filename
| 22,605
|
def click_monitoring(e):
"""
monitoring click event
@input: event body
@output: None
"""
global click_count
global prev_dot_x
global prev_dot_y
dot = GOval(SIZE, SIZE, x=e.x - SIZE/2, y=e.y-SIZE/2)
dot.filled = True
dot.fill_color = "#00FFFF"
if click_count % 2 == 0:
# if check the odd type of the click, then create a dot
window.add(dot)
else:
# if check the even type of the click, then remove prev dot and create a line
prev_dot = window.get_object_at(prev_dot_x, prev_dot_y)
window.remove(prev_dot)
create_line(prev_dot_x, prev_dot_y, e.x, e.y)
click_count += 1
prev_dot_x = e.x
prev_dot_y = e.y
| 22,606
|
def make_hidden_file(file_path: pathlib.Path) -> None:
"""Make hidden file."""
if not file_path.name.startswith('.') and not is_windows():
file_path = file_path.parent / ('.' + file_path.name)
file_path.touch()
if is_windows():
atts = win32api.GetFileAttributes(str(file_path))
win32api.SetFileAttributes(str(file_path), win32con.FILE_ATTRIBUTE_HIDDEN | atts)
| 22,607
|
def get_orientation(y, num_classes=8, encoding='one_hot'):
"""
Args:
y: [B, T, H, W]
"""
# [H, 1]
idx_y = np.arange(y.shape[2]).reshape([-1, 1])
# [1, W]
idx_x = np.arange(y.shape[3]).reshape([1, -1])
# [H, W, 2]
idx_map = np.zeros([y.shape[2], y.shape[3], 2])
idx_map[:, :, 0] += idx_y
idx_map[:, :, 1] += idx_x
# [1, 1, H, W, 2]
idx_map = idx_map.reshape([1, 1, y.shape[2], y.shape[3], 2])
# [B, T, H, W, 1]
y2 = np.expand_dims(y, 4)
# [B, T, H, W, 2]
y_map = idx_map * y2
# [B, T, 1]
y_sum = np.expand_dims(y.sum(axis=2).sum(axis=2), 3) + 1e-7
# [B, T, 2]
centroids = y_map.sum(axis=2).sum(axis=2) / y_sum
# [B, T, 1, 1, 2]
centroids = centroids.reshape([y.shape[0], y.shape[1], 1, 1, 2])
# Orientation vector
# [B, T, H, W, 2]
ovec = (y_map - centroids) * y2
# Normalize orientation [B, T, H, W, 2]
ovec = (ovec + 1e-8) / \
(np.sqrt((ovec * ovec).sum(axis=-1, keepdims=True)) + 1e-7)
# [B, T, H, W]
angle = np.arcsin(ovec[:, :, :, :, 0])
xpos = (ovec[:, :, :, :, 1] > 0).astype('float')
ypos = (ovec[:, :, :, :, 0] > 0).astype('float')
# [B, T, H, W]
angle = angle * xpos * ypos + (np.pi - angle) * (1 - xpos) * ypos + \
angle * xpos * (1 - ypos) + \
(-np.pi - angle) * (1 - xpos) * (1 - ypos)
angle = angle + np.pi / 8
# [B, T, H, W]
angle_class = np.mod(
np.floor((angle + np.pi) * num_classes / 2 / np.pi), num_classes)
if encoding == 'one_hot':
angle_class = np.expand_dims(angle_class, 4)
clazz = np.arange(num_classes).reshape(
[1, 1, 1, 1, -1])
angle_one_hot = np.equal(angle_class, clazz).astype('float32')
angle_one_hot = (angle_one_hot * y2).max(axis=1)
return angle_one_hot.astype('uint8')
elif encoding == 'class':
# [B, H, W]
return (angle_class * y).max(axis=1).astype('uint8')
else:
raise Exception('Unknown encoding type: {}'.format(encoding))
| 22,608
|
def plot_tempo_curve(f_tempo, t_beat, ax=None, figsize=(8, 2), color='k', logscale=False,
xlabel='Time (beats)', ylabel='Temp (BPM)', xlim=None, ylim=None,
label='', measure_pos=[]):
"""Plot a tempo curve
Notebook: C3/C3S3_MusicAppTempoCurve.ipynb
Args:
f_tempo: Tempo curve
t_beat: Time axis of tempo curve (given as sampled beat axis)
ax: Plot either as figure (ax==None) or into axis (ax==True) (Default value = None)
figsize: Size of figure (Default value = (8, 2))
color: Color of tempo curve (Default value = 'k')
logscale: Use linear (logscale==False) or logartihmic (logscale==True) tempo axis (Default value = False)
xlabel: Label for x-axis (Default value = 'Time (beats)')
ylabel: Label for y-axis (Default value = 'Temp (BPM)')
xlim: Limits for x-axis (Default value = None)
ylim: Limits for x-axis (Default value = None)
label: Figure labels when plotting into axis (ax==True) (Default value = '')
measure_pos: Plot measure positions as spefified (Default value = [])
Returns:
fig: figure handle
ax: axes handle
"""
fig = None
if ax is None:
fig = plt.figure(figsize=figsize)
ax = plt.subplot(1, 1, 1)
ax.plot(t_beat, f_tempo, color=color, label=label)
ax.set_title('Tempo curve')
if xlim is None:
xlim = [t_beat[0], t_beat[-1]]
if ylim is None:
ylim = [np.min(f_tempo) * 0.9, np.max(f_tempo) * 1.1]
ax.set_xlim(xlim)
ax.set_ylim(ylim)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.grid(True, which='both')
if logscale:
ax.set_yscale('log')
ax.yaxis.set_major_formatter(ScalarFormatter())
ax.yaxis.set_minor_formatter(ScalarFormatter())
# ax.set_yticks([], minor=True)
# yticks = np.arange(ylim[0], ylim[1]+1, 10)
# ax.set_yticks(yticks)
plot_measure(ax, measure_pos)
return fig, ax
| 22,609
|
def not_pathology(data):
"""Return false if the node is a pathology.
:param dict data: A PyBEL data dictionary
:rtype: bool
"""
return data[FUNCTION] != PATHOLOGY
| 22,610
|
def remove_index_fastqs(fastqs,fastq_attrs=IlluminaFastqAttrs):
"""
Remove index (I1/I2) Fastqs from list
Arguments:
fastqs (list): list of paths to Fastq files
fastq_attrs (BaseFastqAttrs): class to use for
extracting attributes from Fastq names
(defaults to IlluminaFastqAttrs)
Returns:
List: input Fastq list with any index read
Fastqs removed.
"""
return list(filter(lambda fq:
not fastq_attrs(fq).is_index_read,
fastqs))
| 22,611
|
def max(name: "Union[str, List[Expr]]") -> "Expr":
"""
Get maximum value
"""
if isinstance(name, list):
def max_(acc: Series, val: Series) -> Series:
mask = acc < val
return acc.zip_with(mask, val)
return fold(lit(0), max_, name).alias("max")
return col(name).max()
| 22,612
|
def initialize(Lx, Ly,
solutes, restart_folder,
field_to_subspace,
concentration_init, rad,
enable_NS, enable_EC,
dx,
surface_charge,
permittivity,
**namespace):
""" Create the initial state. """
w_init_field = dict()
if not restart_folder:
if enable_EC:
for solute in ["c_p", "c_m"]:
w_init_field[solute] = df.interpolate(
df.Constant(1e-4), field_to_subspace[solute].collapse())
c_init = df.interpolate(
df.Expression("1./(2*DOLFIN_PI*rad*rad)*exp("
"- (pow(x[0], 2) + pow(x[1], 2))/(2*rad*rad))",
Lx=Lx, Ly=Ly, rad=rad, degree=2),
field_to_subspace["c_n"].collapse())
C_tot = df.assemble(c_init*dx)
c_init.vector()[:] *= concentration_init*Lx*Ly/C_tot
w_init_field["c_n"] = c_init
V_0 = -surface_charge*Ly/permittivity[0]
w_init_field["V"] = df.interpolate(
df.Expression("V_0*(x[1]/Ly-0.5)", Ly=Ly, V_0=V_0, degree=1),
field_to_subspace["V"].collapse())
return w_init_field
| 22,613
|
def main() -> None:
"""Watch files for changes and rebuild."""
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
add_parser_arguments(parser)
args = parser.parse_args()
path_to_log, event_handler, exclude_list = watch_setup(
default_build_targets=args.default_build_targets,
build_directories=args.build_directories,
patterns=args.patterns,
ignore_patterns_string=args.ignore_patterns_string,
exclude_list=args.exclude_list,
restart=args.restart,
jobs=args.jobs,
serve_docs=args.serve_docs,
serve_docs_port=args.serve_docs_port,
serve_docs_path=args.serve_docs_path,
fullscreen=args.fullscreen,
banners=args.banners,
)
if args.fullscreen:
watch_logfile = (pw_console.python_logging.create_temp_log_file(
prefix=__package__))
pw_cli.log.install(
level=logging.DEBUG,
use_color=True,
hide_timestamp=False,
log_file=watch_logfile,
)
pw_console.python_logging.setup_python_logging(
last_resort_filename=watch_logfile)
watch_thread = Thread(target=watch,
args=(path_to_log, event_handler, exclude_list),
daemon=True)
watch_thread.start()
watch_app = WatchApp(event_handler=event_handler,
debug_logging=args.debug_logging,
log_file_name=watch_logfile)
event_handler.watch_app = watch_app
watch_app.run()
else:
pw_cli.log.install(
level=logging.DEBUG if args.debug_logging else logging.INFO,
use_color=True,
hide_timestamp=False,
)
watch(Path(path_to_log), event_handler, exclude_list)
| 22,614
|
def _get_sentry_sdk():
"""Creates raven.Client instance configured to work with cron jobs."""
# NOTE: this function uses settings and therefore it shouldn't be called
# at module level.
try:
sentry_sdk = __import__("sentry_sdk")
DjangoIntegration = __import__(
"sentry_sdk.integrations.django"
).integrations.django.DjangoIntegration
except ImportError:
raise MissingDependency(
"Unable to import sentry_sdk. "
"Sentry monitor requires this dependency."
)
for setting in (
"CRONMAN_SENTRY_CONFIG",
"SENTRY_CONFIG",
"RAVEN_CONFIG",
):
client_config = getattr(settings, setting, None)
if client_config is not None:
break
else:
client_config = app_settings.CRONMAN_SENTRY_CONFIG
sentry_sdk.init(integrations=[DjangoIntegration()], **client_config)
return sentry_sdk
| 22,615
|
def handle_network() -> None:
"""
Helper for handling network events on all backends
"""
for backend in dict(BACKENDS).values():
backend.handle_network()
| 22,616
|
def get_memos():
"""
Returns all memos in the database, in a form that
can be inserted directly in the 'session' object.
"""
records = [ ]
for record in collection.find( { "type": "dated_memo" } ):
record['date'] = arrow.get(record['date']).isoformat()
del record['_id']
records.append(record)
return sorted(records, key=lambda entry : entry['date'])
| 22,617
|
def filepath(folder, *args, ext='pkl'):
"""Returns the full path of the file with the calculated results
for the given dataset, descriptor, descriptor of the given dataset
Parameters
----------
folder : string
Full path of the folder where results are saved.
args : list or tuple
Instances of `TextureDataset`, `HEP`, `KNeighborsClassifier`, etc.
ext : string
File extension (default pkl).
Returns
-------
fullpath : string
The complete path of the file where features corresponding to the
given dataset and descriptor (and estimator) are stored.
"""
lst = []
for obj in args:
if hasattr(obj, 'acronym'):
item = obj.acronym
else:
item = obj.__name__
lst.append(item)
lst[-1] = lst[-1] + '.' + ext
fullpath = os.path.join(folder, '--'.join(lst))
return fullpath
| 22,618
|
def assert_type(name: str, val: Any, cls: Any):
"""
Raise an informative error if the passed value isn't of the given type.
Args:
name: User-friendly name of the value to be printed in an exception if raised.
val: The value to check type of.
cls: The type to compare the value's type against.
"""
if not isinstance(val, cls):
raise TypeError(f"{name} must be type '{cls.__name__}'; got '{type(val)}'")
| 22,619
|
def run_vacuum_tasks_table():
"""Run vacuum to reclaim storage."""
logger.info("Start vacuum on tasks table.")
p_con = auth.postgresDB()
# isolation_level 0 will move you out of a transaction block
old_isolation_level = p_con._db_connection.isolation_level
p_con._db_connection.set_isolation_level(0)
query = """
VACUUM tasks
"""
p_con.query(query)
# set isolation_level back to initial value
p_con._db_connection.set_isolation_level(old_isolation_level)
logger.info("Finish vacuum on tasks table.")
| 22,620
|
def psplit(df, idx, label):
"""
Split the participants with a positive label in df into two sets, similarly for participants with a negative label. Return two numpy arrays of participant ids, each array are the chosen id's to be removed from two dataframes to ensure no overlap of participants between the two sets, and keeping half of all participants in df with the same prevelance of event positive participants.
"""
pos = np.unique(df.loc[df[label] == 1].index.get_level_values(idx))
all_id = np.unique(df.index.get_level_values(idx))
neg = np.setdiff1d(all_id, pos)
np.random.shuffle(pos)
np.random.shuffle(neg)
rmv_1 = np.concatenate((pos[:len(pos)//2], neg[:len(neg)//2]))
rmv_2 = np.concatenate((pos[len(pos)//2:], neg[len(neg)//2:]))
return rmv_1, rmv_2
| 22,621
|
def se_block(inputs, out_node, scope=None):
# TODO: check feature shape and dimension
"""SENet"""
with tf.variable_scope(scope, "se_block", reuse=tf.AUTO_REUSE):
channel = inputs.get_shape().as_list()[3]
net = tf.reduce_mean(inputs, [1,2], keep_dims=False)
net = fc_layer(net, [channel, out_node], _std=1, scope="fc1")
net = tf.nn.relu(net)
net = fc_layer(net, [out_node, channel], _std=1, scope="fc2")
net = tf.nn.sigmoid(net)
net = inputs * net
return net
| 22,622
|
def deri_cie_ionfrac(Zlist, condifile='adia.exp_phy.info', \
condi_index=False, appfile=False, outfilename=False, rootpath=False):
"""
Derive the CIE ionic fraction based on the physical conditions in
an ASCII file. The only input parameter is the index (array) of
the elements.
Parameters
----------
Zlist: [int]
list of element nuclear charges
Keywords
--------
condifile: string or dictionary
the ASCII file containing the physical condition array. can also
pass in a structure read from the ASCII file;
condi_index: [int]
index array of at which condition position to derive the spectrum;
appfile: str or dictionary of ionic fraction
the pickle file that the new calculation will be appended into.
Could be the dictionary of loaded from the pickle file;
outfilename: str
the name of output pickle file recording the ionic fraction.
The name of the output file is adopted as following sequence:
1. specified by <outfilename>;
2. adopted from <appfile>, if applicable;
3. "tionfrac_Element.List.pkl".
Returns
-------
No return, but a pickle file containing derived CIE ionic fraction at
specified condition positions is created/updated.
"""
# System parameter
atomdbpath = os.environ['ATOMDB']
ionbalfile = atomdbpath+'APED/ionbal/v3.0.7_ionbal.fits'
if not pyatomdb.util.keyword_check(rootpath):
rootpath = os.getcwd()+'/'
NZlist = len(Zlist)
# Output file name
if not pyatomdb.util.keyword_check(outfilename):
if pyatomdb.util.keyword_check(appfile):
if isinstance(appfile, str):
outfilename = appfile
else:
outfilename = 'tciefrac_'
for Z in Zlist:
outfilename += pyatomdb.atomic.Ztoelsymb(Z)
outfilename += '.pkl'
# Check the setting of the condition array
if pyatomdb.util.keyword_check(condifile):
# If it is a string, look for the file name and read it if exists
if isinstance(condifile, str):
confile = os.path.expandvars(rootpath+condifile)
if not os.path.isfile(confile):
print("*** ERROR: no such condition file %s. Exiting ***" \
%(confile))
return -1
conditions = ascii.read(confile)
elif isinstance(condifile, astropy.table.table.Table):
conditions = condifile
else:
print("Unknown data type for condition file. Please pass a " \
"string or an ASCIIList")
return -1
ncondi = len(conditions)
if not pyatomdb.util.keyword_check(condi_index):
condi_index = range(0,ncondi)
else:
if max(condi_index) >= ncondi:
return -1
te_arr = conditions['kT']/pyatomdb.const.KBOLTZ #in K
ionfrac = {}
for Z in Zlist:
ionfrac[Z] = np.zeros([Z+1,ncondi],dtype=float)
for l in condi_index:
print('For Zone-%03d...' % l)
for Z in Zlist:
ionfrac[Z][:,l] = pyatomdb.atomdb.get_ionfrac(ionbalfile, \
Z, te_arr[l])
print('finished.')
# Save calculated ionic fraction as pickle file
tmp = open(outfilename,'wb')
pickle.dump(ionfrac,tmp)
tmp.close()
return 0
| 22,623
|
def get_numpy_include_dirs(sconscript_path):
"""Return include dirs for numpy.
The paths are relatively to the setup.py script path."""
from numscons import get_scons_build_dir
scdir = pjoin(get_scons_build_dir(), pdirname(sconscript_path))
n = scdir.count(os.sep)
dirs = _incdir()
rdirs = []
for d in dirs:
rdirs.append(pjoin(os.sep.join([os.pardir for i in range(n+1)]), d))
return rdirs
| 22,624
|
def invU(U):
"""
Calculate inverse of U Cell.
"""
nr, nc = U.getCellsShape()
mshape = U.getMatrixShape()
assert (nr == nc), "U Cell must be square!"
nmat = nr
u_tmp = admmMath.copyCell(U)
u_inv = admmMath.Cells(nmat, nmat)
for i in range(nmat):
for j in range(nmat):
if (i == j):
u_inv[i,j] = numpy.identity(mshape[0])
else:
u_inv[i,j] = numpy.zeros_like(U[0,0])
for j in range(nmat-1,0,-1):
for i in range(j-1,-1,-1):
tmp = u_tmp[i,j]
for k in range(nmat):
u_tmp[i,k] = u_tmp[i,k] - numpy.matmul(tmp, u_tmp[j,k])
u_inv[i,k] = u_inv[i,k] - numpy.matmul(tmp, u_inv[j,k])
return u_inv
| 22,625
|
def liq_g(drvt,drvp,temp,pres):
"""Calculate liquid water Gibbs energy using F03.
Calculate the specific Gibbs free energy of liquid water or its
derivatives with respect to temperature and pressure using the
Feistel (2003) polynomial formulation.
:arg int drvt: Number of temperature derivatives.
:arg int drvp: Number of pressure derivatives.
:arg float temp: Temperature in K.
:arg float pres: Pressure in Pa.
:returns: Gibbs free energy in units of
(J/kg) / K^drvt / Pa^drvp.
:raises ValueError: If drvt or drvp are negative.
:Examples:
>>> liq_g(0,0,300.,1e5)
-5.26505056073e3
>>> liq_g(1,0,300.,1e5)
-393.062597709
>>> liq_g(0,1,300.,1e5)
1.00345554745e-3
>>> liq_g(2,0,300.,1e5)
-13.9354762020
>>> liq_g(1,1,300.,1e5)
2.75754520492e-7
>>> liq_g(0,2,300.,1e5)
-4.52067557155e-13
"""
if drvt < 0 or drvp < 0:
errmsg = 'Derivatives {0} cannot be negative'.format((drvt,drvp))
raise ValueError(errmsg)
TRED, PRED = _C_F03[0]
y = (temp - _TCELS)/TRED
z = (pres - _PATM)/PRED
g = 0.
for (j,k,c) in _C_F03[1:]:
if y==0:
if j==drvt:
pwrt = 1.
else:
pwrt = 0.
else:
pwrt = y**(j-drvt)
for l in range(drvt):
pwrt *= j-l
if z==0:
if k==drvp:
pwrp = 1.
else:
pwrp = 0.
else:
pwrp = z**(k-drvp)
for l in range(drvp):
pwrp *= k-l
g += c * pwrt * pwrp
g /= TRED**drvt * PRED**drvp
return g
| 22,626
|
def read(fname):
"""
Utility function to read the README file.
Used for the long_description. It's nice, because now 1) we have a top
level README file and 2) it's easier to type in the README file than to put
a raw string in below ...
"""
with open(os.path.join(os.path.dirname(__file__), fname)) as f:
return f.read()
| 22,627
|
def get_tcp_client(tcp_server_address: str, tcp_server_port: int, session_handler: SessionHandler):
"""Returns the TCP client used in the 15118 communication.
:param tcp_server_address: The TCP server address.
:param tcp_server_port: The TCP server port.
:param session_handler: The session handler that manages the sessions.
:return: transport, protocol -- the objects associated with the TCP connection.
"""
loop = asyncio.get_event_loop()
logger.info("Starting TCP client.")
task = loop.create_connection(lambda: TCPClientProtocol(session_handler), tcp_server_address,
tcp_server_port, ssl=get_ssl_context())
# TODO: set tcp client port using config file
transport, protocol = loop.run_until_complete(task)
return transport, protocol
| 22,628
|
def extract_dates(obj):
"""extract ISO8601 dates from unpacked JSON"""
if isinstance(obj, dict):
new_obj = {} # don't clobber
for k,v in iteritems(obj):
new_obj[k] = extract_dates(v)
obj = new_obj
elif isinstance(obj, (list, tuple)):
obj = [ extract_dates(o) for o in obj ]
elif isinstance(obj, string_types):
obj = _parse_date(obj)
return obj
| 22,629
|
def construct_covariates(states, model_spec):
"""Construct a matrix of all the covariates
that depend only on the state space.
Parameters
---------
states : np.ndarray
Array with shape (num_states, 8) containing period, years of schooling,
the lagged choice, the years of experience in part-time, and the
years of experience in full-time employment, type, age of the youngest child,
indicator for the presence of a partner.
Returns
-------
covariates : np.ndarray
Array with shape (num_states, number of covariates) containing all additional
covariates, which depend only on the state space information.
"""
# Age youngest child
# Bins of age of youngest child based on kids age
# bin 0 corresponds to no kid, remaining bins as in Blundell
# 0-2, 3-5, 6-10, 11+
age_kid = pd.Series(states[:, 6])
bins = pd.cut(
age_kid,
bins=[-2, -1, 2, 5, 10, 11],
labels=[0, 1, 2, 3, 4],
).to_numpy()
# Male wages based on age and education level of the woman
# Wages are first calculated as hourly wages
log_wages = (
model_spec.partner_cf_const
+ model_spec.partner_cf_age * states[:, 0]
+ model_spec.partner_cf_age_sq * states[:, 0] ** 2
+ model_spec.partner_cf_educ * states[:, 1]
)
# Male wages
# Final input of male wages / partner income is calculated on a weekly
# basis. Underlying assumption that all men work full time.
male_wages = np.where(states[:, 7] == 1, np.exp(log_wages) * HOURS[2], 0)
# Equivalence scale
# Depending on the presence of a partner and a child each state is
# assigned an equivalence scale value following the modernized OECD
# scale: 1 for a single woman HH, 1.5 for a woman with a partner,
# 1.8 for a woman with a partner and a child and 1.3 for a woman with
# a child and no partner
equivalence_scale = np.full(states.shape[0], np.nan)
equivalence_scale = np.where(
(states[:, 6] == -1) & (states[:, 7] == 0), 1.0, equivalence_scale
)
equivalence_scale = np.where(
(states[:, 6] == -1) & (states[:, 7] == 1), 1.5, equivalence_scale
)
equivalence_scale = np.where(
(states[:, 6] != -1) & (states[:, 7] == 1), 1.8, equivalence_scale
)
equivalence_scale = np.where(
(states[:, 6] != -1) & (states[:, 7] == 0), 1.3, equivalence_scale
)
assert (
np.isnan(equivalence_scale).any() == 0
), "Some HH were not assigned an equivalence scale"
# Child benefits
# If a woman has a child she receives child benefits
child_benefits = np.where(states[:, 6] == -1, 0, model_spec.child_benefits)
# Collect in covariates vector
covariates = np.column_stack((bins, male_wages, equivalence_scale, child_benefits))
return covariates
| 22,630
|
def clone(pkgbase):
"""Clone or update a git repo.
.. versionadded:: 4.0.0
"""
if os.path.exists('./{0}/'.format(pkgbase)):
if os.path.exists('./{0}/.git'.format(pkgbase)):
# git repo, pull
try:
os.chdir(pkgbase)
subprocess.check_call(['git', 'pull', '--no-rebase'])
except subprocess.CalledProcessError as e:
raise pkgbuilder.exceptions.CloneError(e.returncode)
finally:
os.chdir('..')
else:
raise pkgbuilder.exceptions.ClonePathExists(pkgbase)
else:
repo_url = pkgbuilder.aur.AUR.base + '/' + pkgbase + '.git'
if DS.deepclone:
cloneargs = []
else:
cloneargs = ['--depth', '1']
try:
subprocess.check_call(['git', 'clone'] + cloneargs +
[repo_url, pkgbase])
except subprocess.CalledProcessError as e:
raise pkgbuilder.exceptions.CloneError(e.returncode)
| 22,631
|
def target_ok(target_file, *source_list):
"""Was the target file created after all the source files?
If so, this is OK.
If there's no target, or the target is out-of-date,
it's not OK.
"""
try:
mtime_target = datetime.datetime.fromtimestamp(
target_file.stat().st_mtime)
except FileNotFoundError:
logger.debug("File %s not found", target_file)
return False
logger.debug("Compare %s %s >ALL %s",
target_file, mtime_target, source_list)
# If a source doesn't exist, we have bigger problems.
times = (
datetime.datetime.fromtimestamp(source_file.stat().st_mtime)
for source_file in source_list
)
return all(mtime_target > mtime_source for mtime_source in times)
| 22,632
|
def check_oblique_montante(grille, x, y):
"""Alignements diagonaux montants (/) : allant du coin bas gauche au coin haut droit"""
symbole = grille.grid[y][x]
# Alignement diagonal montant de la forme XXX., le noeud (x,y) étant le plus bas et à gauche
if grille.is_far_from_top(y) and grille.is_far_from_right(x):
if all(symbole == grille.grid[y - i - 1][x + i + 1] for i in range(2)):
my_play = grille.play_if_possible(x + 3, y - 2)
if my_play is not None:
return my_play
# Alignements diagonaux montants, le noeud (x,y) étant le plus haut et à droite
if grille.is_far_from_bottom(y) and grille.is_far_from_left(x):
# Alignement diagonal de la forme .XXX
if all(symbole == grille.grid[y + i + 1][x - i - 1] for i in range(2)):
if grille.is_very_far_from_bottom(y):
my_play = grille.play_if_possible(x - 3, y + 3)
if my_play is not None:
return my_play
if symbole == grille.grid[y + 3][x - 3]:
# Alignement diagonal de la forme X.XX
if symbole == grille.grid[y + 2][x - 2]:
my_play = grille.play_if_possible(x - 1, y + 1)
if my_play is not None:
return my_play
# Alignement diagonal de la forme XX.X
if symbole == grille.grid[y + 1][x - 1]:
my_play = grille.play_if_possible(x - 2, y + 2)
if my_play is not None:
return my_play
return None
| 22,633
|
def main(argv, name):
"""wrapper to mkmasks"""
ntasks = -1
ncores = -1
groupsize = -1
htcores = False
readable = False
try:
opts, _ = getopt.getopt(argv, "ht:c:g:xv")
except getopt.GetoptError:
print_help(name)
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print_help(name)
sys.exit()
elif opt in "-c":
ncores = int(arg)
elif opt in "-t":
ntasks = int(arg)
elif opt in "-g":
groupsize = int(arg)
if opt == '-x':
htcores = True
if opt == '-v':
readable = True
if groupsize == -1:
groupsize = ntasks
if ntasks == -1:
print('Please, specify number of tasks!')
sys.exit(3)
if ncores == -1:
print('Please, specify number of cores!')
sys.exit(4)
mkmasks(ntasks, ncores, groupsize, htcores, readable)
| 22,634
|
def _rotate_the_grid(lon, lat, rot_1, rot_2, rot_3):
"""Rotate the horizontal grid at lon, lat, via rotation matrices rot_1/2/3
Parameters
----------
lon, lat : xarray DataArray
giving longitude, latitude in degrees of LLC horizontal grid
rot_1, rot_2, rot_3 : np.ndarray
rotation matrices
Returns
-------
xg, yg, zg : xarray DataArray
cartesian coordinates of the horizontal grid
"""
# Get cartesian of 1D view of lat/lon
xg, yg, zg = _convert_latlon_to_cartesian(lon.values.ravel(),lat.values.ravel())
# These rotations result in:
# xg = 0 at pt1
# yg = 1 at pt1
# zg = 0 at pt1 and pt2 (and the great circle that crosses pt1 & pt2)
xg, yg, zg = _apply_rotation_matrix(rot_1, (xg,yg,zg))
xg, yg, zg = _apply_rotation_matrix(rot_2, (xg,yg,zg))
xg, yg, zg = _apply_rotation_matrix(rot_3, (xg,yg,zg))
# Remake into LLC xarray DataArray
xg = llc_tiles_to_xda(xg, grid_da=lon, less_output=True)
yg = llc_tiles_to_xda(yg, grid_da=lat, less_output=True)
zg = llc_tiles_to_xda(zg, grid_da=lon, less_output=True)
return xg, yg, zg
| 22,635
|
def dict_mapper(data):
"""Mapper from `TypeValueMap` to :class`dict`"""
out = {}
for k, v in data.items():
if v.type in (iceint.TypedValueType.TypeDoubleComplex,
iceint.TypedValueType.TypeFloatComplex):
out[k] = complex(v.value.real, v.value.imag)
elif v.type in (iceint.TypedValueType.TypeDoubleComplexSeq,
iceint.TypedValueType.TypeFloatComplexSeq):
out[k] = [ complex(i.real, i.imag) for i in v.value ]
elif v.type == iceint.TypedValueType.TypeDirection:
out[k] = (v.value.coord1, v.value.coord2, str(v.value.sys))
elif v.type == iceint.TypedValueType.TypeNull:
out[k] = None
else:
out[k] = v.value
return out
| 22,636
|
def predict_walkthrough_actions():
""" Given the observation, predict the next action from the walkthrough. """
action_predictor = lambda obs: choice(['n','s','e','w'])
rom = '/home/matthew/workspace/text_agents/roms/zork1.z5'
bindings = load_bindings(rom)
env = FrotzEnv(rom, seed=bindings['seed'])
data = load_dataset()
correct = 0
for example in data:
state = pickle.load(open(example['state'],'rb'))
env.set_state(state)
gold_diff = example['walkthrough_diff']
action_prediction = action_predictor(example['obs'])
env.step(action_prediction)
actual_diff = str(env._get_world_diff())
if actual_diff == gold_diff:
correct += 1
print('Correctly predicted {} out of {} walkthrough actions'.format(correct, len(data)))
| 22,637
|
def parse_prophage_tbl(phispydir):
"""
Parse the prophage table and return a dict of objects
:param phispydir: The phispy directory to find the results
:return: dict
"""
if not os.path.exists(os.path.join(phispydir, "prophage.tbl")):
sys.stderr.write("FATAL: The file prophage.tbl does not exist\n")
sys.stderr.write("Please run create_prophage_tbl.py -d {}\n".format(phispydir))
sys.exit(-1)
p = re.compile('^(.*)_(\d+)_(\d+)$')
locations = {}
with open(os.path.join(phispydir, "prophage.tbl"), 'r') as f:
for l in f:
(ppid, location) = l.strip().split("\t")
m = p.search(location)
(contig, beg, end) = m.groups()
beg = int(beg)
end = int(end)
if beg > end:
(beg, end) = (end, beg)
if contig not in locations:
locations[contig] = []
locations[contig].append((beg, end))
return locations
| 22,638
|
async def get_odds(database, params):
"""Get odds based on parameters."""
LOGGER.info("generating odds")
start_time = time.time()
players = [dict(
civilization_id=data['civilization_id'],
user_id=data['user_id'],
winner=data['winner'],
team_id=data['team_id']
) for data in params['players']]
teams = by_key(players, 'team_id')
num_unique_civs = len({p['civilization_id'] for p in players if 'civilization_id' in p})
keys = []
queries = []
map_filter = ("matches.map_name=:map_name", {'map_name': params['map_name']})
if 'teams' in params:
keys.append('teams')
queries.append(odds_query(database, teams, params['type_id'], user_filter=True))
if 'map_name' in params:
keys.append('teams_and_map')
queries.append(odds_query(database, teams, params['type_id'], match_filters=map_filter, user_filter=True))
if num_unique_civs > 1:
keys.append('teams_and_civilizations')
queries.append(odds_query(database, teams, params['type_id'], civ_filter=True, user_filter=True))
keys.append('civilizations')
queries.append(odds_query(database, teams, params['type_id'], civ_filter=True))
if 'map_name' in params and num_unique_civs > 1:
keys.append('civilizations_and_map')
queries.append(odds_query(database, teams, params['type_id'], match_filters=map_filter, civ_filter=True))
results = await asyncio.gather(*queries)
LOGGER.debug("computed all odds in %f", time.time() - start_time)
return dict(zip(keys, results))
| 22,639
|
def split_by_normal(cpy):
"""split curved faces into one face per triangle (aka split by
normal, planarize). in place"""
for name, faces in cpy.iteritems():
new_faces = []
for points, triangles in faces:
x = points[triangles, :]
normals = np.cross(x[:, 1]-x[:, 0], x[:, 2]-x[:, 0])
normals /= np.sqrt(np.sum(np.square(normals), axis=1))[:, None]
if np.allclose(normals, normals[0][None, :]):
new_faces.append((points, triangles))
else:
for triangle in triangles:
new_faces.append((points[triangle, :],
np.arange(3, dtype=np.intc).reshape((1, 3))))
cpy[name] = new_faces
return cpy
| 22,640
|
def generate_index_distribution_from_blocks(numTrain, numTest, numValidation, params):
""" Generates a vector of indices to partition the data for training.
NO CHECKING IS DONE: it is assumed that the data could be partitioned
in the specified block quantities and that the block quantities describe a
coherent partition.
Parameters
----------
numTrain : int
Number of training data points
numTest : int
Number of testing data points
numValidation : int
Number of validation data points (may be zero)
params : dictionary with parameters
Contains the keywords that control the behavior of the function
(uq_train_bks, uq_valid_bks, uq_test_bks)
Return
----------
indexTrain : int numpy array
Indices for data in training
indexValidation : int numpy array
Indices for data in validation (if any)
indexTest : int numpy array
Indices for data in testing (if merging)
"""
# Extract required parameters
numBlocksTrain = params['uq_train_bks']
numBlocksValidation = params['uq_valid_bks']
numBlocksTest = params['uq_test_bks']
numBlocksTotal = numBlocksTrain + numBlocksValidation + numBlocksTest
# Determine data size and block size
if numBlocksTest > 0:
# Use all data and re-distribute the partitions
numData = numTrain + numValidation + numTest
else:
# Preserve test partition
numData = numTrain + numValidation
blockSize = (numData + numBlocksTotal // 2) // numBlocksTotal # integer division with rounding
remainder = numData - blockSize * numBlocksTotal
if remainder != 0:
print("Warning ! Requested partition does not distribute data evenly between blocks. "
"Testing (if specified) or Validation (if specified) will use different block size.")
sizeTraining = numBlocksTrain * blockSize
sizeValidation = numBlocksValidation * blockSize
# Fill partition indices
# Fill train partition
Folds = np.arange(numData)
np.random.shuffle(Folds)
indexTrain = Folds[:sizeTraining]
# Fill validation partition
indexValidation = None
if numBlocksValidation > 0:
indexValidation = Folds[sizeTraining:sizeTraining + sizeValidation]
# Fill test partition
indexTest = None
if numBlocksTest > 0:
indexTest = Folds[sizeTraining + sizeValidation:]
return indexTrain, indexValidation, indexTest
| 22,641
|
def sct2e(sc, sclkdp):
"""sct2e(SpiceInt sc, SpiceDouble sclkdp)"""
return _cspyce0.sct2e(sc, sclkdp)
| 22,642
|
def test_check_flow_data17():
""" input flow out of flow index"""
with pytest.raises(AssertionError) as err_info:
PyDampCheck.check_flow_data(flow_fail_17)
assert str(err_info.value) == 'component 3 function 2 input in not in range of the flow index'
| 22,643
|
def test_main(monkeypatch, test_dict: FullTestDict):
"""
- GIVEN a list of words
- WHEN the accent dict is generated
- THEN check all the jisho info is correct and complete
"""
word_list = convert_list_of_str_to_kaki(test_dict['input'])
sections = test_dict['jisho']['expected_sections']
expected_output = test_dict['jisho']['expected_output']
def get_word_from_jisho_url(url: URL) -> Kaki:
match = re.search(r"words\?keyword=(.+)", url)
assert match is not None
return Kaki(match.group(1))
def get_api_response(url: URL) -> str:
word = get_word_from_jisho_url(url)
return json.dumps(sections[word]["api_response"])
monkeypatch.setattr("requests.get", lambda url: FakeResponse(get_api_response(url)))
assert jisho.main(word_list) == expected_output
| 22,644
|
def main():
""" Create object and call apply """
ntp_obj = NetAppOntapNTPServer()
ntp_obj.apply()
| 22,645
|
def normalizeFilename(filename):
"""Take a given filename and return the normalized version of it.
Where ~/ is expanded to the full OS specific home directory and all
relative path elements are resolved.
"""
result = os.path.expanduser(filename)
result = os.path.abspath(result)
return result
| 22,646
|
def do_plugin_create(cc, args):
"""Register a new plugin with the Iotronic service."""
field_list = ['name', 'code', 'callable', 'public', 'extra']
fields = dict((k, v) for (k, v) in vars(args).items()
if k in field_list and not (v is None))
fields = utils.args_array_to_dict(fields, 'extra')
fl = fields['code']
with open(fl, 'r') as fil:
fields['code'] = fil.read()
if args.params:
fields['parameters'] = utils.json_from_file(args.params)
plugin = cc.plugin.create(**fields)
data = dict([(f, getattr(plugin, f, '')) for f in
res_fields.PLUGIN_DETAILED_RESOURCE.fields])
cliutils.print_dict(data, wrap=72, json_flag=args.json)
| 22,647
|
def get_tn(tp, fp, fn, _all):
"""
Args:
tp (Set[T]):
fp (Set[T]):
fn (Set[T]):
_all (Iterable[T]):
Returns:
Set[T]
"""
return set(_all) - tp - fp - fn
| 22,648
|
def test_posdef_symmetric3():
""" The test return 0 if the matrix has 0 eigenvalue.
Is this correct?
"""
data = np.array([[1.,1],[1,1]], dtype = theano.config.floatX)
assert mv.posdef(data) ==0
| 22,649
|
def download_cow_head():
"""Download cow head dataset."""
return _download_and_read('cowHead.vtp')
| 22,650
|
def select_tests(blocks, match_string_list, do_test):
"""Remove or keep tests from list in WarpX-tests.ini according to do_test variable"""
if do_test not in [True, False]:
raise ValueError("do_test must be True or False")
if (do_test == False):
for match_string in match_string_list:
print('Selecting tests without ' + match_string)
blocks = [ block for block in blocks if not match_string in block ]
else:
for match_string in match_string_list:
print('Selecting tests with ' + match_string)
blocks = [ block for block in blocks if match_string in block ]
return blocks
| 22,651
|
def get_last_ds_for_site(session, idescr: ImportDescription, col: ImportColumn, siteid: int):
"""
Returns the newest dataset for a site with instrument, valuetype and level fitting to the ImportDescription's column
To be used by lab imports where a site is encoded into the sample name.
"""
q = session.query(db.Dataset).filter(
db.Dataset._site == siteid,
db.Dataset._valuetype == col.valuetype,
db.Dataset._source == idescr.instrument,
)
if col.level is not None:
q = q.filter(db.Dataset.level == col.level)
return q.order_by(db.Dataset.end.desc()).limit(1).scalar()
| 22,652
|
def main():
""" Main Program """
pygame.init()
# Set the height and width of the screen
size = [SCREEN_WIDTH, SCREEN_HEIGHT]
screen = pygame.display.set_mode(size)
pygame.display.set_caption("Dino")
# Create all the levels
level_list = []
level_list.append(Level(VIEW_RECT, TILE_WIDTH, TILE_HEIGHT,
LEVEL_1_MAP, LEVEL_1_INFO))
level_list.append(Level(VIEW_RECT, TILE_WIDTH, TILE_HEIGHT,
LEVEL_2_MAP, LEVEL_2_INFO))
level_list.append(Level(VIEW_RECT, TILE_WIDTH, TILE_HEIGHT,
LEVEL_3_MAP, LEVEL_3_INFO))
level_list.append(Level(VIEW_RECT, TILE_WIDTH, TILE_HEIGHT,
LEVEL_4_MAP, LEVEL_4_INFO))
# Set the current level
current_level_no = 0
current_level = level_list[current_level_no]
# Loop until the user clicks the close button.
done = False
# Used to manage how fast the screen updates
clock = pygame.time.Clock()
# -------- Main Program Loop -----------
while not done:
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
current_level.player.go_left()
if event.key == pygame.K_RIGHT:
current_level.player.go_right()
if event.key == pygame.K_UP:
current_level.player.jump(current_level)
if event.type == pygame.KEYUP:
if event.key == pygame.K_LEFT\
and current_level.player.change_x < 0:
current_level.player.stop()
if event.key == pygame.K_RIGHT\
and current_level.player.change_x > 0:
current_level.player.stop()
# Update the level
current_level.update()
if current_level.done:
current_level_no += 1
if current_level_no >= len(level_list):
done = True
else:
current_level = level_list[current_level_no]
# Draw the level
current_level.draw(screen)
# ALL CODE TO DRAW SHOULD GO ABOVE THIS COMMENT
# Limit to 60 frames per second
clock.tick(60)
# Go ahead and update the screen with what we've drawn.
pygame.display.flip()
# Be IDLE friendly. If you forget this line, the program will 'hang'
# on exit.
pygame.quit()
| 22,653
|
def test_eq_issue62_last_component_not_va():
"""
VA is not last when components are sorted alphabetically.
"""
test_tdb = """
ELEMENT VA VACUUM 0.0000E+00 0.0000E+00 0.0000E+00!
ELEMENT AL FCC_A1 2.6982E+01 4.5773E+03 2.8322E+01!
ELEMENT CO HCP_A3 5.8933E+01 4.7656E+03 3.0040E+00!
ELEMENT CR BCC_A2 5.1996E+01 4.0500E+03 2.3560E+01!
ELEMENT W BCC_A2 1.8385E+02 4.9700E+03 3.2620E+01!
PHASE FCC_A1 % 2 1 1 !
CONSTITUENT FCC_A1 :AL,CO,CR,W : VA% : !
"""
equilibrium(Database(test_tdb), ['AL', 'CO', 'CR', 'W', 'VA'], ['FCC_A1'],
{"T": 1248, "P": 101325, v.X("AL"): 0.081, v.X("CR"): 0.020, v.X("W"): 0.094})
| 22,654
|
def get_cart_from_request(request, cart_queryset=Cart.objects.all()):
"""Get cart from database or return unsaved Cart
:type cart_queryset: saleor.cart.models.CartQueryset
:type request: django.http.HttpRequest
:rtype: Cart
"""
if request.user.is_authenticated():
cart = get_user_cart(request.user, cart_queryset)
user = request.user
else:
token = request.get_signed_cookie(Cart.COOKIE_NAME, default=None)
cart = get_anonymous_cart_from_token(token, cart_queryset)
user = None
if cart is not None:
return cart
else:
return Cart(user=user)
| 22,655
|
def _is_ge(series, value):
""" Returns the index of rows from series where series >= value.
Parameters
----------
series : pandas.Series
The data to be queried
value : list-like
The values to be tested
Returns
-------
index : pandas.index
The index of series for rows where series >= value.
"""
series = series[series.ge(value)]
return series.index
| 22,656
|
def preprocess_image(image, image_size, is_training=False, test_crop=True):
"""Preprocesses the given image.
Args:
image: `Tensor` representing an image of arbitrary size.
image_size: Size of output image.
is_training: `bool` for whether the preprocessing is for training.
test_crop: whether or not to extract a central crop of the images
(as for standard ImageNet evaluation) during the evaluation.
Returns:
A preprocessed image `Tensor` of range [0, 1].
"""
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
if is_training:
return preprocess_for_train(image, image_size, image_size)
else:
return preprocess_for_eval(image, image_size, image_size, crop=test_crop)
| 22,657
|
async def wait_for_reaction(self, message):
""" Assert that ``message`` is reacted to with any reaction.
:param discord.Message message: The message to test with
:returns: The reaction object.
:rtype: discord.Reaction
:raises NoReactionError:
"""
def check_reaction(reaction, user):
return (
reaction.message.id == message.id
and user == self.target
and reaction.message.channel == self.channel
)
try:
result = await self.client.wait_for(
"reaction_add", timeout=self.client.timeout, check=check_reaction
)
except TimeoutError:
raise NoResponseError
else:
return result
| 22,658
|
def test_get_active_invalid_key():
"""
gets the value of given key which is unavailable from
active section of given config store.
it should raise an error.
"""
with pytest.raises(ConfigurationStoreKeyNotFoundError):
config_services.get_active('environment', 'missing_key')
| 22,659
|
def test_isclose(func_interface):
"""Tests for numpoly.isclose."""
poly1 = numpoly.polynomial([1e10*X, 1e-7])
poly2 = numpoly.polynomial([1.00001e10*X, 1e-8])
assert_equal(func_interface.isclose(poly1, poly2), [True, False])
poly1 = numpoly.polynomial([1e10*X, 1e-8])
poly2 = numpoly.polynomial([1.00001e10*X, 1e-9])
assert_equal(func_interface.isclose(poly1, poly2), [True, True])
poly2 = numpoly.polynomial([1e10*Y, 1e-8])
assert_equal(func_interface.isclose(poly1, poly2), [False, True])
| 22,660
|
def _multivariate_normal_log_likelihood(X, means=None, covariance=None):
"""Calculate log-likelihood assuming normally distributed data."""
X = check_array(X)
n_samples, n_features = X.shape
if means is None:
means = np.zeros_like(X)
else:
means = check_array(means)
assert means.shape == X.shape
if covariance is None:
covariance = np.eye(n_features)
else:
covariance = check_array(covariance)
assert covariance.shape == (n_features, n_features)
log_likelihood = 0
for t in range(n_samples):
log_likelihood += ss.multivariate_normal.logpdf(
X[t], mean=means[t], cov=covariance)
return log_likelihood
| 22,661
|
def show_hdf5_structure(filename, filepath=''):
"""
Show madrigal hdf5 file structure in console.
Example:
fn = "/home/leicai/01_work/00_data/madrigal/DMSP/20151102/dms_20151102_16s1.001.hdf5"
show_structure(fn)
"""
with h5py.File(os.path.join(filepath, filename), 'r') as fh5:
pybasic.dict_print_tree(fh5, value_repr=True, dict_repr=True, max_level=None)
| 22,662
|
def morethan(kt,n):
"""
Arguments:
- `n`:
"""
total = 0
temp = 0
r = 0
for (word,k) in kt:
total += k
if k > n:
r += 1
temp += k
print "出现频率大于%s的词共有%s个"%(n,r)
print "占总的标记百分比%s"%(1.0*temp/total)
print 20*"="
| 22,663
|
def lemmatize(text):
"""
tokenize and lemmatize english messages
Parameters
----------
text: str
text messages to be lemmatized
Returns
-------
list
list with lemmatized forms of words
"""
def get_wordnet_pos(treebank_tag):
if treebank_tag.startswith('J'):
return wordnet.ADJ
if treebank_tag.startswith('V'):
return wordnet.VERB
if treebank_tag.startswith('N'):
return wordnet.NOUN
if treebank_tag.startswith('R'):
return wordnet.ADV
# try to transfer to Noun else
# else:
return wordnet.NOUN
# lemmatize
wordpos = nltk.pos_tag(tokenize(text))
lmtzer = WordNetLemmatizer()
return [lmtzer.lemmatize(word, pos=get_wordnet_pos(pos)) for word, pos in wordpos]
| 22,664
|
def prune_non_overlapping_boxes(boxes1, boxes2, min_overlap):
"""Prunes the boxes in boxes1 that overlap less than thresh with boxes2.
For each box in boxes1, we want its IOA to be more than min_overlap with
at least one of the boxes in boxes2. If it does not, we remove it.
Arguments:
boxes1: a float tensor with shape [N, 4].
boxes2: a float tensor with shape [M, 4].
min_overlap: minimum required overlap between boxes,
to count them as overlapping.
Returns:
boxes: a float tensor with shape [N', 4].
keep_indices: a long tensor with shape [N'] indexing kept bounding boxes in the
first input tensor ('boxes1').
"""
with tf.name_scope('prune_non_overlapping_boxes'):
overlap = ioa(boxes2, boxes1) # shape [M, N]
overlap = tf.reduce_max(overlap, axis=0) # shape [N]
keep_bool = tf.greater_equal(overlap, min_overlap)
keep_indices = tf.squeeze(tf.where(keep_bool), axis=1)
boxes = tf.gather(boxes1, keep_indices)
return boxes, keep_indices
| 22,665
|
def get_namespace_from_node(node):
"""Get the namespace from the given node
Args:
node (str): name of the node
Returns:
namespace (str)
"""
parts = node.rsplit("|", 1)[-1].rsplit(":", 1)
return parts[0] if len(parts) > 1 else u":"
| 22,666
|
def PricingStart(builder):
"""This method is deprecated. Please switch to Start."""
return Start(builder)
| 22,667
|
def test_to_string():
"""test if a credit card outputs the right to str value"""
credit_card = CreditCard(
number = '4111111111111111',
exp_mo = '02',
exp_yr = '2012',
first_name = 'John',
last_name = 'Doe',
cvv = '911',
strict = False
)
# safe check
assert_true(credit_card.is_valid())
# checking if our str() method (or repr()) is ok
final_str = '<CreditCard -- John Doe, visa, ************1111, expires: 02/2012>'
assert_equals(str(credit_card), final_str)
| 22,668
|
def get_structures(defect_name: str,
output_path: str,
bdm_increment: float=0.1,
bdm_distortions: list = None,
bdm_type="BDM",
):
"""Imports all the structures found with BDM and stores them in a dictionary matching BDM distortion to final structure.
Args:
defect_name (str) :
name of defect (e.g "vac_1_Sb_0")
output_path (str) :
path where material folder is
bdm_increment (float):
Distortion increment for BDM.
(default: 0.1)
bdm_distortions (list):
List of distortions applied to nearest neighbours instead of default ones. (e.g. [-0.5, 0.5])
(default: None)
bdm_type (str):
BDM or champion
(default: BDM)
Returns:
dictionary mathing BDM distortion to final structure"""
defect_structures = {}
try:
# Read BDM_parameters from BDM_metadata.json
with open(f"{output_path}/BDM_metadata.json") as json_file:
bdm_parameters = json.load(json_file)['BDM_parameters']
bdm_distortions = bdm_parameters['BDM_distortions']
bdm_distortions = [i*100 for i in bdm_distortions]
except: # if there's not a BDM metadata file
if bdm_distortions:
bdm_distortions = [i*100 for i in bdm_distortions]
else:
bdm_distortions = range(-60, 70, bdm_increment*100) # if user didn't specify BDM distortions
rattle_dir_path = output_path + "/"+ defect_name + "/" + bdm_type + "/" + defect_name +"_" + "only_rattled"
if os.path.isdir(rattle_dir_path): #check if rattle folder exists (if so, it means we only applied rattle (no BDM as 0 change in electrons),
# hence grab the rattle & Unperturbed, not BDM distortions)
try:
path= rattle_dir_path + "/vasp_gam/CONTCAR"
defect_structures['rattle'] = grab_contcar(path)
except:
print("Problems in get_structures")
defect_structures['rattle'] = "Not converged"
else:
for i in bdm_distortions:
key = i / 100 #key used in dictionary. Using the same format as the one in dictionary that matches distortion to final energy
i = '{:.1f}'.format(i)
if i == "0.0":
i = "-0.0" #this is the format used in defect file name
path = output_path + "/"+ defect_name + "/" + bdm_type + "/" + defect_name +"_" + str(i) + "%_BDM_Distortion/vasp_gam/CONTCAR"
try :
defect_structures[key] = grab_contcar(path)
except FileNotFoundError or IndexError or ValueError:
print("Error grabbing structure.")
print("Your defect path is: ", path)
defect_structures[key] = "Not converged"
except:
print("Problem in get_structures")
print("Your defect path is: ", path)
defect_structures[key] = "Not converged"
try:
defect_structures["Unperturbed"] = grab_contcar(output_path + "/"+ defect_name + "/" + bdm_type + "/" + defect_name +"_" + "Unperturbed_Defect" + "/vasp_gam/CONTCAR")
except FileNotFoundError:
print("Your defect path is: ", path)
defect_structures[key] = "Not converged"
return defect_structures
| 22,669
|
def create_whimsy_value_at_clients(number_of_clients: int = 3):
"""Returns a Python value and federated type at clients."""
value = [float(x) for x in range(10, number_of_clients + 10)]
type_signature = computation_types.at_clients(tf.float32)
return value, type_signature
| 22,670
|
def format_task_numbers_with_links(tasks):
"""Returns formatting for the tasks section of asana."""
project_id = data.get('asana-project', None)
def _task_format(task_id):
if project_id:
asana_url = tool.ToolApp.make_asana_url(project_id, task_id)
return "[#%d](%s)" % (task_id, asana_url)
else:
return "#%d" % task_id
return "\n".join([_task_format(tid) for tid in tasks])
| 22,671
|
def get_tick_indices(tickmode, numticks, coords):
"""
Ticks on the axis are a subset of the axis coordinates
This function returns the indices of y coordinates on which a tick should be displayed
:param tickmode: should be 'auto' (automatically generated) or 'all'
:param numticks: minimum number of ticks to display, only applies to 'auto' mode
:param coords: list of coordinates along the axis
:return indices: ticks indices in the input list of y coordinates
:return numchar: maximum number of characters required to display ticks, this is useful to preserve alignments
"""
if tickmode == 'all' or (tickmode == 'auto' and numticks >= len(coords)):
# Put a tick in front of each row
indices = list(range(len(coords)))
else:
# It tickmode is 'auto', put at least 'numticks' ticks
tick_spacing = 5 # default spacing between ticks
# Decrease the tick spacing progressively until we get the desired number of ticks
indices = []
while len(indices) < numticks:
indices = list(range(0, len(coords), tick_spacing))
tick_spacing -= 1
# Compute the number of characters required to display ticks
numchar = max(len(str(NiceNumber(coords[i]))) for i in indices)
return indices, numchar
| 22,672
|
def fpIsNormal(a, ctx=None):
"""Create a Z3 floating-point isNormal expression.
"""
return _mk_fp_unary_pred(Z3_mk_fpa_is_normal, a, ctx)
| 22,673
|
def test_bench():
"""Runs benchmarks before and after and compares the results."""
os.chdir(get_repo_root_path())
# Get numbers for current HEAD.
return_code, stdout, stderr = _run_cargo_bench(PR_BENCH_RESULTS_FILE)
# Even if it is the first time this test is run, the benchmark tests should pass.
# For this purpose, we need to explicitly check the return code.
assert return_code == 0, "stdout: {}\n stderr: {}".format(stdout, stderr)
# Get numbers from upstream tip, without the changes from the current PR.
_git_checkout_upstream_branch()
return_code, stdout, stderr = _run_cargo_bench(UPSTREAM_BENCH_RESULTS_FILE)
# Before checking any results, let's just go back to the PR branch.
# This way we make sure that the cleanup always happens even if the test fails.
_git_checkout_pr_branch()
if return_code == 0:
# In case this benchmark also ran successfully, we can call critcmp and compare the results.
_run_critcmp()
else:
# The benchmark did not run successfully, but it might be that it is because a benchmark does not exist.
# In this case, we do not want to fail the test.
if "error: no bench target named `main`" in stderr:
# This is a bit of a &*%^ way of checking if the benchmark does not exist.
# Hopefully it will be possible to check it in another way...soon
print("There are no benchmarks in master. No comparison can happen.")
else:
assert return_code == 0, "stdout: {}\n stderr: {}".format(stdout, stderr)
| 22,674
|
def test_extract_command():
"""
Test the command-line script extract feature
"""
with mock.patch('uflash.extract') as mock_extract:
uflash.main(argv=['-e', 'hex.hex', 'foo.py'])
mock_extract.assert_called_once_with('hex.hex', ['foo.py'])
| 22,675
|
def existingFile(filename):
""" 'type' for argparse - check that filename exists """
if not os.path.exists(filename):
raise argparse.ArgumentTypeError("{0} does not exist".format(filename))
return filename
| 22,676
|
def test_get_by_id() -> None:
"""Tests fetching a match by ID."""
match = Match()
assert Match.get_by_id(match.id) is None
match.put_in_pool()
assert Match.get_by_id(match.id) is match
| 22,677
|
def app():
"""
Setup our flask test app, this only gets executed once.
:return: Flask app
"""
_app = create_app("testing")
# Establish an application context before running the tests.
ctx = _app.app_context()
ctx.push()
yield _app
ctx.pop()
| 22,678
|
def siqs_find_next_poly(n, factor_base, i, g, B):
"""Compute the (i+1)-th polynomials for the Self-Initialising
Quadratic Sieve, given that g is the i-th polynomial.
"""
v = lowest_set_bit(i) + 1
z = -1 if math.ceil(i / (2 ** v)) % 2 == 1 else 1
b = (g.b + 2 * z * B[v - 1]) % g.a
a = g.a
b_orig = b
if (2 * b > a):
b = a - b
assert ((b * b - n) % a == 0)
g = Polynomial([b * b - n, 2 * a * b, a * a], a, b_orig)
h = Polynomial([b, a])
for fb in factor_base:
if a % fb.p != 0:
fb.soln1 = (fb.ainv * (fb.tmem - b)) % fb.p
fb.soln2 = (fb.ainv * (-fb.tmem - b)) % fb.p
return g, h
| 22,679
|
def get_column(value):
"""Convert column number on command line to Python index."""
if value.startswith("c"):
# Ignore c prefix, e.g. "c1" for "1"
value = value[1:]
try:
col = int(value)
except:
stop_err("Expected an integer column number, not %r" % value)
if col < 1:
stop_err("Expect column numbers to be at least one, not %r" % value)
return col - 1
| 22,680
|
def sdi(ts_split, mean=False, keys=None):
"""
Compute the Structural Decoupling Index (SDI).
i.e. the ratio between the norms of the "high" and the norm of the "low"
"graph-filtered" timeseries.
If the given dictionary does not contain the keywords "high" and "low",
the SDI is computed as the ratio between the norm of the second and
the norm of the first dictionary entry.
"keys" can be used to indicate the order of the two keys, or to select two
elements of a bigger dictionary.
Parameters
----------
ts_split : dict or numpy.ndarrays
A dictionary containing two entries. If the two entries are "low" and
"high", then SDI will be computed as the norm of the high vs the norm
of the low, oterwise as the ratio between the second (second key in
sorted keys) and the first.
mean : bool, optional
If True, compute mean over the last axis (e.g. between subjects)
keys : None or list of strings, optional
Can be used to select two entries from a bigger dictionary
and/or to specify the order in which the keys should be read (e.g.
forcing a different order from teh sorted keys).
Returns
-------
numpy.ndarray
Returns the structural decoupling index
Raises
------
ValueError
If keys are provided but not contained in the dictionary
If keys are not provided and the dictionary has more than 2 entries
"""
# #!# Implement acceptance of two matrices and not only dictionary
if keys is None:
keys = list(ts_split.keys())
else:
if all(item in list(ts_split.keys()) for item in keys) is False:
raise ValueError(f'The provided keys {keys} do not match the '
'keys of the provided dictionary '
f'({list(ts_split.keys())})')
if len(keys) != 2:
raise ValueError('`structural_decoupling_index` function requires '
'a dictionary with exactly two timeseries as input.')
check_keys = [item.lower() for item in keys]
if all(item in ['low', 'high'] for item in check_keys):
# Case insensitively reorder the items of dictionary as ['low', 'high'].
keys = [keys[check_keys.index('low')], keys[check_keys.index('high')]]
norm = dict.fromkeys(keys)
for k in keys:
norm[k] = np.linalg.norm(ts_split[k], axis=1)
LGR.info('Computing Structural Decoupling Index.')
sdi = norm[keys[1]] / norm[keys[0]]
if sdi.ndim >= 2 and mean:
sdi = sdi.mean(axis=1)
return sdi
| 22,681
|
def Class_Property (getter) :
"""Return a descriptor for a property that is accessible via the class
and via the instance.
::
>>> from _TFL._Meta.Property import *
>>> from _TFL._Meta.Once_Property import Once_Property
>>> class Foo (object) :
... @Class_Property
... def bar (cls) :
... "Normal method bar"
... print ("Normal method bar called")
... return 42
... @Class_Property
... @classmethod
... def baz (cls) :
... "classmethod baz"
... print ("classmethod baz called")
... return "Frozz"
... @Class_Property
... @Class_Method
... def foo (cls) :
... "Class_Method foo"
... print ("Class_Method foo called")
... return "Hello world"
... @Class_Property
... @Once_Property
... def qux (cls) :
... "Once property qux"
... print ("Once property qux")
... return 42 * 42
...
>>> foo = Foo ()
>>> Foo.bar
Normal method bar called
42
>>> foo.bar
Normal method bar called
42
>>> foo.bar = 137
>>> Foo.bar
Normal method bar called
42
>>> foo.bar
137
>>> Foo.bar = 23
>>> Foo.bar
23
>>> print (Foo.baz)
classmethod baz called
Frozz
>>> print (foo.baz)
classmethod baz called
Frozz
>>>
>>> print (Foo.foo)
Class_Method foo called
Hello world
>>> print (foo.foo)
Class_Method foo called
Hello world
>>>
>>> Foo.qux
Once property qux
1764
>>> foo.qux
1764
>>> foo2 = Foo ()
>>> foo2.qux
1764
>>> Foo.qux
1764
"""
if hasattr (getter, "__func__") :
return _Class_Property_Descriptor_ (getter)
else :
return _Class_Property_Function_ (getter)
| 22,682
|
def initialize_third_party():
"""Load common dependencies."""
spdlog()
catch2()
fmt()
| 22,683
|
def naive_act_norm_initialize(x, axis):
"""Compute the act_norm initial `scale` and `bias` for `x`."""
x = np.asarray(x)
axis = list(sorted(set([a + len(x.shape) if a < 0 else a for a in axis])))
min_axis = np.min(axis)
reduce_axis = tuple(a for a in range(len(x.shape)) if a not in axis)
var_shape = [x.shape[a] for a in axis]
var_shape_aligned = [x.shape[a] if a in axis else 1
for a in range(min_axis, len(x.shape))]
mean = np.reshape(np.mean(x, axis=reduce_axis), var_shape)
bias = -mean
scale = 1. / np.reshape(
np.sqrt(np.mean((x - np.reshape(mean, var_shape_aligned)) ** 2,
axis=reduce_axis)),
var_shape
)
return scale, bias, var_shape_aligned
| 22,684
|
def cmpTensors(t1, t2, atol=1e-5, rtol=1e-5, useLayout=None):
"""Compare Tensor list data"""
assert (len(t1) == len(t2))
for i in range(len(t2)):
if (useLayout is None):
assert(t1[i].layout == t2[i].layout)
dt1 = t1[i].dataAs(useLayout)
dt2 = t2[i].dataAs(useLayout)
if not np.allclose(dt1, dt2, atol=atol, rtol=rtol):
logger.error("Tensor %d mismatch!" % i)
return False
return True
| 22,685
|
def auth(body): # noqa: E501
"""Authenticate endpoint
Return a bearer token to authenticate and authorize subsequent calls for resources # noqa: E501
:param body: Request body to perform authentication
:type body: dict | bytes
:rtype: Auth
"""
db = get_db()
cust = db['Customer'].find_one({"email": body['username']})
try:
if cust is None:
user = db['User'].find_one({"email": body['username']})
if user is None:
return "Auth failed", 401
else:
if user['plain_password'] == body['password']:
return generate_response(generate_token(str(user['_id'])))
else:
if cust['plain_password'] == body['password']:
return generate_response(generate_token(str(cust['_id'])))
except Exception as e:
print (e)
return "Auth failed", 401
| 22,686
|
def make_feature(func, *argfuncs):
"""Return a customized feature function that adapts to different input representations.
Args:
func: feature function (callable)
argfuncs: argument adaptor functions (callable, take `ctx` as input)
"""
assert callable(func)
for argfunc in argfuncs:
assert callable(argfunc)
def _feature(ctx):
return func(*[argfunc(ctx) for argfunc in argfuncs])
return _feature
| 22,687
|
def from_numpy(np_array: np.ndarray):
"""Convert a numpy array to another type of dlpack compatible array.
Parameters
----------
np_array : np.ndarray
The source numpy array that will be converted.
Returns
-------
pycapsule : PyCapsule
A pycapsule containing a DLManagedTensor that can be converted
to other array formats without copying the underlying memory.
"""
holder = _Holder(np_array)
size = ctypes.c_size_t(ctypes.sizeof(DLManagedTensor))
dl_managed_tensor = DLManagedTensor.from_address(
ctypes.pythonapi.PyMem_RawMalloc(size)
)
dl_managed_tensor.dl_tensor.data = holder.data
dl_managed_tensor.dl_tensor.device = DLDevice(1, 0)
dl_managed_tensor.dl_tensor.ndim = np_array.ndim
dl_managed_tensor.dl_tensor.dtype = DLDataType.TYPE_MAP[str(np_array.dtype)]
dl_managed_tensor.dl_tensor.shape = holder.shape
dl_managed_tensor.dl_tensor.strides = holder.strides
dl_managed_tensor.dl_tensor.byte_offset = 0
dl_managed_tensor.manager_ctx = holder._as_manager_ctx()
dl_managed_tensor.deleter = _numpy_array_deleter
pycapsule = ctypes.pythonapi.PyCapsule_New(
ctypes.byref(dl_managed_tensor),
_c_str_dltensor,
_numpy_pycapsule_deleter,
)
return pycapsule
| 22,688
|
def rename(bot, msg):
"""Rename a shorturl."""
old_slug = msg.match.group(1)
new_slug = msg.match.group(2)
with shorturl_db(user='ocfircbot', password=bot.mysql_password) as ctx:
rename_shorturl(ctx, old_slug, new_slug)
msg.respond(f'shorturl `{old_slug}` has been renamed to `{new_slug}`')
| 22,689
|
def lmsSubstringsAreEqual(string, typemap, offsetA, offsetB):
"""
Return True if LMS substrings at offsetA and offsetB are equal.
"""
# No other substring is equal to the empty suffix.
if offsetA == len(string) or offsetB == len(string):
return False
i = 0
while True:
aIsLMS = isLMSChar(i + offsetA, typemap)
bIsLMS = isLMSChar(i + offsetB, typemap)
# If we've found the start of the next LMS substrings
if (i > 0 and aIsLMS and bIsLMS):
# then we made it all the way through our original LMS
# substrings without finding a difference, so we can go
# home now.
return True
if aIsLMS != bIsLMS:
# We found the end of one LMS substring before we reached
# the end of the other.
return False
if string[i + offsetA] != string[i + offsetB]:
# We found a character difference, we're done.
return False
i += 1
| 22,690
|
def validate_config(*, config: Any) -> None:
"""
Validate a config.
Args:
config: the config.
Raises:
InvalidConfig: when the config isn't valid.
"""
default_schema = schema.Schema(
{
"static_url": str,
schema.Optional("favicon_ico"): schema.Or(None, str),
schema.Optional("favicon_png"): schema.Or(None, str),
schema.Optional("favicon_svg"): schema.Or(None, str),
schema.Optional("preview_png"): schema.Or(None, str),
schema.Optional("google_tag_manager"): schema.Or(None, str),
schema.Optional("language"): schema.Or(None, str),
schema.Optional("territory"): schema.Or(None, str),
schema.Optional("domain"): schema.Or(None, str),
schema.Optional("text_dir"): schema.Or(None, str),
schema.Optional("title"): schema.Or(None, str),
schema.Optional("description"): schema.Or(None, str),
schema.Optional("subject"): schema.Or(None, str),
schema.Optional("main_color"): schema.Or(None, str),
schema.Optional("background_color"): schema.Or(None, str),
schema.Optional("author_name"): schema.Or(None, str),
schema.Optional("author_email"): schema.Or(None, str),
schema.Optional("facebook_app_id"): schema.Or(None, str),
schema.Optional("twitter_username"): schema.Or(None, str),
schema.Optional("twitter_user_id"): schema.Or(None, str),
schema.Optional("itunes_app_id"): schema.Or(None, str),
schema.Optional("itunes_affiliate_data"): schema.Or(None, str),
}
)
try:
default_schema.validate(config)
except (
schema.SchemaWrongKeyError,
schema.SchemaMissingKeyError,
schema.SchemaError,
) as exception:
raise exceptions.InvalidConfig(exception)
if not re.match("^((https?://|/).*)?$", config["static_url"]):
raise exceptions.InvalidConfig("The key static_url must starts with a slash, http://, https:// or be an empty string.")
if config["static_url"].endswith("/"):
raise exceptions.InvalidConfig("The key static_url can't end with a slash.")
hex_color = re.compile("^#(?:[0-9A-Fa-f]{3}){1,2}$")
for color_key in ("main_color", "background_color"):
if config.get(color_key) and not hex_color.match(config[color_key]):
raise exceptions.InvalidConfig(f"The key {color_key} must be a hex color code. If you don't want any value on this key, set the value to null.")
| 22,691
|
def synthesize_photometry(lbda, flux, filter_lbda, filter_trans,
normed=True):
""" Get Photometry from the given spectral information through the given filter.
This function converts the flux into photons since the transmission provides the
fraction of photons that goes though.
Parameters
-----------
lbda, flux: [array]
Wavelength and flux of the spectrum from which you want to synthetize photometry
filter_lbda, filter_trans: [array]
Wavelength and transmission of the filter.
normed: [bool] -optional-
Shall the fitler transmission be normalized?
Returns
-------
Float (photometric point)
"""
# ---------
# The Tool
from .tools import nantrapz
def integrate_photons(lbda, flux, step, flbda, fthroughput):
""" """
filter_interp = np.interp(lbda, flbda, fthroughput)
dphotons = (filter_interp * flux) * lbda * 5.006909561e7
return nantrapz(dphotons,lbda) if step is None else np.sum(dphotons*step)
# ---------
# The Code
normband = 1. if not normed else \
integrate_photons(lbda, np.ones(len(lbda)),None,filter_lbda,filter_trans)
return integrate_photons(lbda,flux,None,filter_lbda,filter_trans)/normband
| 22,692
|
def get_A_dash_floor_bath(house_insulation_type, floor_bath_insulation):
"""浴室の床の面積 (m2)
Args:
house_insulation_type(str): 床断熱住戸'または'基礎断熱住戸'
floor_bath_insulation(str): 床断熱住戸'または'基礎断熱住戸'または'浴室の床及び基礎が外気等に面していない'
Returns:
float: 浴室の床の面積 (m2)
"""
return get_table_3(15, house_insulation_type, floor_bath_insulation)
| 22,693
|
def cancel_task_async(hostname, task_id):
"""Cancels a swarming task."""
return _call_api_async(
None, hostname, 'task/%s/cancel' % task_id, method='POST')
| 22,694
|
def generate_two_files_both_stress_strain():
"""Generates two files that have both stress and strain in each file"""
fname = {'stress': 'resources/double_stress.json',
'strain': 'resources/double_strain.json'}
expected = [ # makes an array of two pif systems
pif.System(
properties=[
pif.Property(name='stress',
scalars=list(np.linspace(0, 100)),
conditions=pif.Value(
name='time',
scalars=list(np.linspace(0, 100)))),
pif.Property(name='strain',
scalars=list(np.linspace(0, 1)),
conditions=pif.Value(
name='time',
scalars=list(np.linspace(0, 100))))]),
pif.System(
properties=[
pif.Property(name='stress',
scalars=list(np.linspace(0, 100)),
conditions=pif.Value(
name='time',
scalars=list(np.linspace(0, 100)))),
pif.Property(name='strain',
scalars=list(np.linspace(0, 1)),
conditions=pif.Value(
name='time',
scalars=list(np.linspace(0, 100))))
])]
# dump the pifs into two seperate files
with open(fname['stress'], 'w') as stress_file:
pif.dump(expected[0], stress_file)
with open(fname['strain'], 'w') as strain_file:
pif.dump(expected[1], strain_file)
return fname
| 22,695
|
async def test_template_with_delay_on_based_on_input(hass):
"""Test binary sensor template with template delay on based on input number."""
config = {
"binary_sensor": {
"platform": "template",
"sensors": {
"test": {
"friendly_name": "virtual thingy",
"value_template": "{{ states.sensor.test_state.state == 'on' }}",
"device_class": "motion",
"delay_on": '{{ ({ "seconds": states("input_number.delay")|int }) }}',
}
},
}
}
await setup.async_setup_component(hass, binary_sensor.DOMAIN, config)
await hass.async_block_till_done()
await hass.async_start()
hass.states.async_set("sensor.test_state", "off")
await hass.async_block_till_done()
hass.states.async_set("input_number.delay", 3)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test")
assert state.state == "off"
hass.states.async_set("sensor.test_state", "on")
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test")
assert state.state == "off"
future = dt_util.utcnow() + timedelta(seconds=3)
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test")
assert state.state == "on"
# set input to 4 seconds
hass.states.async_set("sensor.test_state", "off")
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test")
assert state.state == "off"
hass.states.async_set("input_number.delay", 4)
await hass.async_block_till_done()
hass.states.async_set("sensor.test_state", "on")
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test")
assert state.state == "off"
future = dt_util.utcnow() + timedelta(seconds=2)
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test")
assert state.state == "off"
future = dt_util.utcnow() + timedelta(seconds=4)
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test")
assert state.state == "on"
| 22,696
|
def run_cli(entry_point, *arguments, **options):
"""
Test a command line entry point.
:param entry_point: The function that implements the command line interface
(a callable).
:param arguments: Any positional arguments (strings) become the command
line arguments (:data:`sys.argv` items 1-N).
:param options: The following keyword arguments are supported:
**capture**
Whether to use :class:`CaptureOutput`. Defaults
to :data:`True` but can be disabled by passing
:data:`False` instead.
**input**
Refer to :class:`CaptureOutput`.
**merged**
Refer to :class:`CaptureOutput`.
**program_name**
Used to set :data:`sys.argv` item 0.
:returns: A tuple with two values:
1. The return code (an integer).
2. The captured output (a string).
"""
# Add the `program_name' option to the arguments.
arguments = list(arguments)
arguments.insert(0, options.pop('program_name', sys.executable))
# Log the command line arguments (and the fact that we're about to call the
# command line entry point function).
logger.debug("Calling command line entry point with arguments: %s", arguments)
# Prepare to capture the return code and output even if the command line
# interface raises an exception (whether the exception type is SystemExit
# or something else).
returncode = 0
stdout = None
stderr = None
try:
# Temporarily override sys.argv.
with PatchedAttribute(sys, 'argv', arguments):
# Manipulate the standard input/output/error streams?
options['enabled'] = options.pop('capture', True)
with CaptureOutput(**options) as capturer:
try:
# Call the command line interface.
entry_point()
finally:
# Get the output even if an exception is raised.
stdout = capturer.stdout.getvalue()
stderr = capturer.stderr.getvalue()
# Reconfigure logging to the terminal because it is very
# likely that the entry point function has changed the
# configured log level.
configure_logging()
except BaseException as e:
if isinstance(e, SystemExit):
logger.debug("Intercepting return code %s from SystemExit exception.", e.code)
returncode = e.code
else:
logger.warning("Defaulting return code to 1 due to raised exception.", exc_info=True)
returncode = 1
else:
logger.debug("Command line entry point returned successfully!")
# Always log the output captured on stdout/stderr, to make it easier to
# diagnose test failures (but avoid duplicate logging when merged=True).
is_merged = options.get('merged', False)
merged_streams = [('merged streams', stdout)]
separate_streams = [('stdout', stdout), ('stderr', stderr)]
streams = merged_streams if is_merged else separate_streams
for name, value in streams:
if value:
logger.debug("Output on %s:\n%s", name, value)
else:
logger.debug("No output on %s.", name)
return returncode, stdout
| 22,697
|
def recipe_edit(username, pk):
"""Page showing the possibility to edit the recipe."""
recipe_manager = RecipeManager(api_token=g.user_token)
response = recipe_manager.get_recipe_response(pk)
recipe = response.json()
# shows 404 if there is no recipe, response status code is 404 or user is not the author
if not recipe or response.status_code == 404 or username != g.username:
abort(404)
# checking form validation
form = RecipeAddForm(data=recipe)
if form.validate_on_submit():
try:
if form.image.data != DEFAULT_RECIPE_IMAGE_PATH: # if the user has uploaded a picture file
image = images.save(form.image.data)
image_path = f'app/media/recipe_images/{image}'
else:
image_path = None # set image_path to None so as not to alter the image
except UploadNotAllowed: # if the user uploaded a file that is not a picture
flash('Incorrect picture format', 'error')
else: # if there is no exception edit recipe data and image
recipe_data, recipe_files = recipe_manager.get_form_data(form, image_path)
recipe_manager.edit(recipe_data, recipe_files, pk, username)
return redirect('/recipes/')
return render_template('recipe_edit.html', form=form)
| 22,698
|
def test_document_request_get_issuer():
"""Test the `DocumentRequest.get_issuer()` method"""
document_request = factories.DocumentRequestFactory(
issuer="marion.issuers.DummyDocument",
context_query={"fullname": "Richie Cunningham"},
)
assert isinstance(document_request.get_issuer(), issuers.DummyDocument)
| 22,699
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.