content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def _all_steps_multiples_of_min_step(rows: np.ndarray) -> bool:
"""
Are all steps integer multiples of the smallest step?
This is used in determining whether the setpoints correspond
to a regular grid
Args:
rows: the output of _rows_from_datapoints
Returns:
The answer to the question
"""
steps: List[np.ndarray] = []
for row in rows:
# TODO: What is an appropriate precision?
steps += list(np.unique(np.diff(row).round(decimals=15)))
steps = np.unique(steps)
remainders = np.mod(steps[1:]/steps[0], 1)
# TODO: What are reasonable tolerances for allclose?
asmoms = bool(np.allclose(remainders, np.zeros_like(remainders)))
return asmoms | 5,328,300 |
def black_config(
config: c2cciutils.configuration.ChecksBlackConfigurationConfig,
full_config: c2cciutils.configuration.Configuration,
args: Namespace,
) -> bool:
"""
Check the black configuration.
config is like:
properties: # dictionary of properties to check
Arguments:
config: The check section config
full_config: All the CI config
args: The parsed command arguments
"""
del full_config, args
# If there is no python file the check is disabled
python = False
for filename in subprocess.check_output(["git", "ls-files"]).decode().strip().split("\n"):
if (
os.path.isfile(filename)
and magic.from_file(filename, mime=True) == "text/x-python" # type: ignore
):
python = True
break
if python:
if not os.path.exists("pyproject.toml"):
c2cciutils.error(
"black_config",
"The file 'pyproject.toml' with a section tool.black is required",
"pyproject.toml",
)
return False
config_parser = configparser.ConfigParser()
config_parser.read("pyproject.toml")
if "tool.black" not in config_parser.sections():
c2cciutils.error(
"black_config",
"The 'tool.black' section is required in the 'pyproject.toml' file",
"pyproject.toml",
)
return False
if isinstance(config, dict):
for key, value in config.get("properties", {}).items():
if config_parser.get("tool.black", key) != str(value):
c2cciutils.error(
"black_config",
f"The property '{key}' should have the value, '{value}', "
f"but is '{config_parser.get('tool.black', key)}'",
"pyproject.toml",
)
return False
return True | 5,328,301 |
def consumer(id, num_thread=1):
"""
Main loop to consume messages from the Rucio Cache producer.
"""
logging.info('Rucio Cache consumer starting')
brokers_alias = []
brokers_resolved = []
try:
brokers_alias = [b.strip() for b in config_get('messaging-cache', 'brokers').split(',')]
except:
raise Exception('Could not load rucio cache brokers from configuration')
logging.info('resolving rucio cache broker dns alias: %s' % brokers_alias)
brokers_resolved = []
for broker in brokers_alias:
brokers_resolved.append([str(tmp_broker) for tmp_broker in dns.resolver.query(broker, 'A')])
brokers_resolved = [item for sublist in brokers_resolved for item in sublist]
logging.debug('Rucio cache brokers resolved to %s', brokers_resolved)
conns = {}
for broker in brokers_resolved:
conn = stomp.Connection(host_and_ports=[(broker, config_get_int('messaging-cache', 'port'))],
use_ssl=True,
ssl_key_file=config_get('messaging-cache', 'ssl_key_file'),
ssl_cert_file=config_get('messaging-cache', 'ssl_cert_file'),
ssl_version=ssl.PROTOCOL_TLSv1)
conns[conn] = Consumer(conn.transport._Transport__host_and_ports[0], account=config_get('messaging-cache', 'account'), id=id, num_thread=num_thread)
logging.info('consumer started')
while not GRACEFUL_STOP.is_set():
for conn in conns:
if not conn.is_connected():
logging.info('connecting to %s' % conn.transport._Transport__host_and_ports[0][0])
record_counter('daemons.messaging.cache.reconnect.%s' % conn.transport._Transport__host_and_ports[0][0].split('.')[0])
conn.set_listener('rucio-cache-messaging', conns[conn])
conn.start()
conn.connect()
conn.subscribe(destination=config_get('messaging-cache', 'destination'),
id='rucio-cache-messaging',
ack='auto')
time.sleep(1)
logging.info('graceful stop requested')
for conn in conns:
try:
conn.disconnect()
except:
pass
logging.info('graceful stop done') | 5,328,302 |
def get_full_test_names(testargs, machine, compiler):
###############################################################################
"""
Return full test names in the form:
TESTCASE.GRID.COMPSET.MACHINE_COMPILER.TESTMODS
Testmods are optional
Testargs can be categories or test names and support the NOT symbol '^'
>>> get_full_test_names(["cime_tiny"], "melvin", "gnu")
['ERS.f19_g16_rx1.A.melvin_gnu', 'NCK.f19_g16_rx1.A.melvin_gnu']
>>> get_full_test_names(["cime_tiny", "PEA_P1_M.f45_g37_rx1.A"], "melvin", "gnu")
['ERS.f19_g16_rx1.A.melvin_gnu', 'NCK.f19_g16_rx1.A.melvin_gnu', 'PEA_P1_M.f45_g37_rx1.A.melvin_gnu']
>>> get_full_test_names(['ERS.f19_g16_rx1.A', 'NCK.f19_g16_rx1.A', 'PEA_P1_M.f45_g37_rx1.A'], "melvin", "gnu")
['ERS.f19_g16_rx1.A.melvin_gnu', 'NCK.f19_g16_rx1.A.melvin_gnu', 'PEA_P1_M.f45_g37_rx1.A.melvin_gnu']
>>> get_full_test_names(["cime_tiny", "^NCK.f19_g16_rx1.A"], "melvin", "gnu")
['ERS.f19_g16_rx1.A.melvin_gnu']
>>> get_full_test_names(["cime_test_multi_inherit"], "melvin", "gnu")
['TESTBUILDFAILEXC_P1.f19_g16_rx1.A.melvin_gnu', 'TESTBUILDFAIL_P1.f19_g16_rx1.A.melvin_gnu', 'TESTMEMLEAKFAIL_P1.f09_g16.X.melvin_gnu', 'TESTMEMLEAKPASS_P1.f09_g16.X.melvin_gnu', 'TESTRUNDIFF_P1.f19_g16_rx1.A.melvin_gnu', 'TESTRUNFAILEXC_P1.f19_g16_rx1.A.melvin_gnu', 'TESTRUNFAIL_P1.f19_g16_rx1.A.melvin_gnu', 'TESTRUNPASS_P1.f19_g16_rx1.A.melvin_gnu', 'TESTRUNPASS_P1.f45_g37_rx1.A.melvin_gnu', 'TESTRUNPASS_P1.ne30_g16_rx1.A.melvin_gnu', 'TESTRUNPASS_P2.ne30_g16_rx1.A.melvin_gnu', 'TESTRUNPASS_P4.f45_g37_rx1.A.melvin_gnu', 'TESTRUNSTARCFAIL_P1.f19_g16_rx1.A.melvin_gnu', 'TESTTESTDIFF_P1.f19_g16_rx1.A.melvin_gnu']
"""
expect(machine is not None, "Must define a machine")
expect(compiler is not None, "Must define a compiler")
e3sm_test_suites = get_test_suites()
tests_to_run = set()
negations = set()
for testarg in testargs:
# remove any whitespace in name
testarg = testarg.strip()
if (testarg.startswith("^")):
negations.add(testarg[1:])
elif (testarg in e3sm_test_suites):
tests_to_run.update(get_test_suite(testarg, machine, compiler))
else:
try:
tests_to_run.add(CIME.utils.get_full_test_name(testarg, machine=machine, compiler=compiler))
except Exception:
if "." not in testarg:
expect(False, "Unrecognized test suite '{}'".format(testarg))
else:
raise
for negation in negations:
if (negation in e3sm_test_suites):
tests_to_run -= set(get_test_suite(negation, machine, compiler))
else:
fullname = CIME.utils.get_full_test_name(negation, machine=machine, compiler=compiler)
if (fullname in tests_to_run):
tests_to_run.remove(fullname)
return list(sorted(tests_to_run)) | 5,328,303 |
def prod_time_series(qa_prod, qatype, metric, xlim=None, outfile=None, close=True, pp=None,
bright_dark=0):
""" Generate a time series plot for a production
Args:
qa_prod:
qatype:
metric:
xlim:
outfile:
close:
pp:
bright_dark: int, optional; (flag: 0=all; 1=bright; 2=dark)
Returns:
"""
from astropy.time import Time
log = get_logger()
# Setup
fig = plt.figure(figsize=(8, 5.0))
gs = gridspec.GridSpec(3,1)
# Loop on channel
clrs = get_channel_clrs()
# Grab QA
all_times = []
all_ax = []
for cc, channel in enumerate(['b','r','z']):
ax = plt.subplot(gs[cc])
qa_tbl = qa_prod.get_qa_table(qatype, metric, channels=channel)
'''
# Check for nans
isnan = np.isnan(qa_arr)
if np.sum(isnan) > 0:
log.error("NAN in qatype={:s}, metric={:s} for channel={:s}".format(
qatype, metric, channel))
qa_arr[isnan] = -999.
'''
# Convert Date to MJD
atime = Time(qa_tbl['DATE-OBS'], format='isot', scale='utc')
atime.format = 'mjd'
mjd = atime.value
# Bright dark
if bright_dark == 0: # All
pass
elif bright_dark == 1: # Bright
log.info("Using a bright/dark kludge for now")
bright = qa_tbl['EXPTIME'] < 1200.
qa_tbl = qa_tbl[bright]
mjd = mjd[bright]
elif bright_dark == 2: # Dark
log.info("Using a bright/dark kludge for now")
dark = qa_tbl['EXPTIME'] > 1200.
qa_tbl = qa_tbl[dark]
mjd = mjd[dark]
# Scatter me
ax.scatter(mjd, qa_tbl[metric], color=clrs[channel], s=4.)
# Axes
ax.set_ylabel('Metric')
if cc < 2:
ax.get_xaxis().set_ticks([])
if cc ==0:
ax.set_title('{:s} :: {:s}'.format(qatype,metric))
all_times.append(mjd)
all_ax.append(ax)
# Label
#ax.text(0.05, 0.85, channel, color='black', transform=ax.transAxes, ha='left')
ax.set_xlabel('MJD')
all_times = np.concatenate(all_times)
xmin, xmax = np.min(all_times), np.max(all_times)
for cc in range(3):
all_ax[cc].set_xlim(xmin,xmax)
# Finish
plt.tight_layout(pad=0.1,h_pad=0.0,w_pad=0.0)
if outfile is not None:
plt.savefig(outfile)
print("Wrote QA file: {:s}".format(outfile))
if close:
plt.close()
elif pp is not None:
pp.savefig()
if close:
plt.close()
pp.close()
else: # Show
plt.show() | 5,328,304 |
def record_or_not(record_mode, line, start_block, end_block):
""" """
if not record_mode:
if start_block in line:
record_mode = True
elif end_block in line:
record_mode = False
return record_mode | 5,328,305 |
def protocol_increasing_persistence_change_pas():
"""
Change leak conductance (input resistance) of persistent synapse model
"""
config_functions = {config_persistent_synapses: change_persist_numbers}
run_protocol(compare_pas, root="persistent_pas", timestamp=False, filenames=["distal", "distal_KCC2"],
param_list=None,
config_functions=config_functions) | 5,328,306 |
def compute_angle_stats(vec_mat, unit='deg'):
""" Get mean of angles the successif vectors used in the reconstruction.
return mean an variance of the angles.
"""
angles = []
for i in range(vec_mat.shape[1] - 1):
aux = 0
dot_prod = np.dot(vec_mat[i] / np.linalg.norm(vec_mat[i]),
vec_mat[i + 1] / np.linalg.norm(vec_mat[i + 1]))
if dot_prod < 0:
aux = np.pi
angles.append(np.arccos(dot_prod) + aux)
angles = np.asarray(angles)
if unit == 'deg':
angles *= 180 / np.pi
mean = np.mean(angles)
std = np.std(angles)
return (mean, std) | 5,328,307 |
def id_test_data(value):
"""generate id"""
return f"action={value.action_name} return={value.return_code}" | 5,328,308 |
def AppcommandsUsage(shorthelp=0, writeto_stdout=0, detailed_error=None,
exitcode=None, show_cmd=None, show_global_flags=False):
"""Output usage or help information.
Extracts the __doc__ string from the __main__ module and writes it to
stderr. If that string contains a '%s' then that is replaced by the command
pathname. Otherwise a default usage string is being generated.
The output varies depending on the following:
- FLAGS.help
- FLAGS.helpshort
- show_cmd
- show_global_flags
Args:
shorthelp: print only command and main module flags, rather than all.
writeto_stdout: write help message to stdout, rather than to stderr.
detailed_error: additional details about why usage info was presented.
exitcode: if set, exit with this status code after writing help.
show_cmd: show help for this command only (name of command).
show_global_flags: show help for global flags.
"""
if writeto_stdout:
stdfile = sys.stdout
else:
stdfile = sys.stderr
prefix = ''.rjust(GetMaxCommandLength() + 2)
# Deal with header, containing general tool documentation
doc = sys.modules['__main__'].__doc__
if doc:
help_msg = flags.DocToHelp(doc.replace('%s', sys.argv[0]))
stdfile.write(flags.TextWrap(help_msg, flags.GetHelpWidth()))
stdfile.write('\n\n\n')
if not doc or doc.find('%s') == -1:
synopsis = 'USAGE: ' + GetSynopsis()
stdfile.write(flags.TextWrap(synopsis, flags.GetHelpWidth(), ' ',
''))
stdfile.write('\n\n\n')
# Special case just 'help' registered, that means run as 'tool --help'.
if len(GetCommandList()) == 1:
cmd_names = []
else:
# Show list of commands
if show_cmd is None or show_cmd == 'help':
cmd_names = sorted(GetCommandList().keys())
stdfile.write('Any of the following commands:\n')
doc = ', '.join(cmd_names)
stdfile.write(flags.TextWrap(doc, flags.GetHelpWidth(), ' '))
stdfile.write('\n\n\n')
# Prepare list of commands to show help for
if show_cmd is not None:
cmd_names = [show_cmd] # show only one command
elif FLAGS.help or FLAGS.helpshort or shorthelp:
cmd_names = []
else:
cmd_names = sorted(GetCommandList().keys()) # show all commands
# Show the command help (none, one specific, or all)
for name in cmd_names:
command = GetCommandByName(name)
try:
cmd_help = command.CommandGetHelp(GetCommandArgv(), cmd_names=cmd_names)
except Exception as error: # pylint: disable=broad-except
cmd_help = "Internal error for command '%s': %s." % (name, six.text_type(error))
cmd_help = cmd_help.strip()
all_names = ', '.join(
[command.CommandGetName()] + (command.CommandGetAliases() or []))
if len(all_names) + 1 >= len(prefix) or not cmd_help:
# If command/alias list would reach over help block-indent
# start the help block on a new line.
stdfile.write(flags.TextWrap(all_names, flags.GetHelpWidth()))
stdfile.write('\n')
prefix1 = prefix
else:
prefix1 = all_names.ljust(GetMaxCommandLength() + 2)
if cmd_help:
stdfile.write(flags.TextWrap(cmd_help, flags.GetHelpWidth(), prefix,
prefix1))
stdfile.write('\n\n')
else:
stdfile.write('\n')
# When showing help for exactly one command we show its flags
if len(cmd_names) == 1:
# Need to register flags for command prior to be able to use them.
# We do not register them globally so that they do not reappear.
# pylint: disable=protected-access
cmd_flags = command._command_flags
if cmd_flags.RegisteredFlags():
stdfile.write('%sFlags for %s:\n' % (prefix, name))
stdfile.write(cmd_flags.GetHelp(prefix+' '))
stdfile.write('\n\n')
stdfile.write('\n')
# Now show global flags as asked for
if show_global_flags:
stdfile.write('Global flags:\n')
if shorthelp:
stdfile.write(FLAGS.MainModuleHelp())
else:
stdfile.write(FLAGS.GetHelp())
stdfile.write('\n')
else:
stdfile.write("Run '%s --help' to get help for global flags."
% GetAppBasename())
stdfile.write('\n%s\n' % _UsageFooter(detailed_error, cmd_names))
if exitcode is not None:
sys.exit(exitcode) | 5,328,309 |
def kneeJointCenter(frame, hip_JC, delta, vsk=None):
"""Calculate the knee joint center and axis.
Takes in a dictionary of marker names to x, y, z positions, the hip axis
and pelvis axis. Calculates the knee joint axis and returns the knee origin
and axis.
Markers used: RTHI, LTHI, RKNE, LKNE, hip_JC
Subject Measurement values used: RightKneeWidth, LeftKneeWidth
Knee joint center: Computed using Knee Axis Calculation [1]_.
Parameters
----------
frame : dict
dictionaries of marker lists.
hip_JC : array
An array of hip_JC containing the x,y,z axes marker positions of the
hip joint center.
delta : float, optional
The length from marker to joint center, retrieved from subject
measurement file.
vsk : dict, optional
A dictionary containing subject measurements.
Returns
-------
R, L, axis : array
Returns an array that contains the knee axis center in a 1x3 array of
xyz values, which is then followed by a 2x3x3
array composed of the knee axis center x, y, and z axis components. The
xyz axis components are 2x3 arrays consisting of the
axis center in the first dimension and the direction of the axis in the
second dimension.
References
----------
.. [1] Baker, R. (2013). Measuring walking : a handbook of clinical gait
analysis. Mac Keith Press.
Notes
-----
delta is changed suitably to knee.
Examples
--------
>>> import numpy as np
>>> from .pyCGM import kneeJointCenter
>>> vsk = { 'RightKneeWidth' : 105.0, 'LeftKneeWidth' : 105.0 }
>>> frame = { 'RTHI': np.array([426.50, 262.65, 673.66]),
... 'LTHI': np.array([51.93, 320.01, 723.03]),
... 'RKNE': np.array([416.98, 266.22, 524.04]),
... 'LKNE': np.array([84.62, 286.69, 529.39])}
>>> hip_JC = [[182.57, 339.43, 935.52],
... [309.38, 32280342417, 937.98]]
>>> delta = 0
>>> [arr.round(2) for arr in kneeJointCenter(frame,hip_JC,delta,vsk)] #doctest: +NORMALIZE_WHITESPACE
[array([413.2 , 266.22, 464.66]), array([143.55, 279.91, 524.77]), array([[[414.2 , 266.22, 464.6 ],
[413.14, 266.22, 463.66],
[413.2 , 267.22, 464.66]],
[[143.65, 280.89, 524.62],
[142.56, 280.02, 524.85],
[143.65, 280.05, 525.76]]])]
"""
#Get Global Values
mm = 7.0
R_kneeWidth = vsk['RightKneeWidth']
L_kneeWidth = vsk['LeftKneeWidth']
R_delta = (R_kneeWidth/2.0)+mm
L_delta = (L_kneeWidth/2.0)+mm
#REQUIRED MARKERS:
# RTHI
# LTHI
# RKNE
# LKNE
# hip_JC
RTHI = frame['RTHI']
LTHI = frame['LTHI']
RKNE = frame['RKNE']
LKNE = frame['LKNE']
R_hip_JC = hip_JC[1]
L_hip_JC = hip_JC[0]
# Determine the position of kneeJointCenter using findJointC function
R = findJointC(RTHI,R_hip_JC,RKNE,R_delta)
L = findJointC(LTHI,L_hip_JC,LKNE,L_delta)
# Knee Axis Calculation(ref. Clinical Gait Analysis hand book, Baker2013)
#Right axis calculation
thi_kne_R = RTHI-RKNE
# Z axis is Thigh bone calculated by the hipJC and kneeJC
# the axis is then normalized
axis_z = R_hip_JC-R
# X axis is perpendicular to the points plane which is determined by KJC, HJC, KNE markers.
# and calculated by each point's vector cross vector.
# the axis is then normalized.
# axis_x = cross(axis_z,thi_kne_R)
axis_x = cross(axis_z,RKNE-R_hip_JC)
# Y axis is determined by cross product of axis_z and axis_x.
# the axis is then normalized.
axis_y = cross(axis_z,axis_x)
Raxis = np.asarray([axis_x,axis_y,axis_z])
#Left axis calculation
thi_kne_L = LTHI-LKNE
# Z axis is Thigh bone calculated by the hipJC and kneeJC
# the axis is then normalized
axis_z = L_hip_JC-L
# X axis is perpendicular to the points plane which is determined by KJC, HJC, KNE markers.
# and calculated by each point's vector cross vector.
# the axis is then normalized.
# axis_x = cross(thi_kne_L,axis_z)
#using hipjc instead of thigh marker
axis_x = cross(LKNE-L_hip_JC,axis_z)
# Y axis is determined by cross product of axis_z and axis_x.
# the axis is then normalized.
axis_y = cross(axis_z,axis_x)
Laxis = np.asarray([axis_x,axis_y,axis_z])
# Clear the name of axis and then nomalize it.
R_knee_x_axis = Raxis[0]
R_knee_x_axis = R_knee_x_axis/norm3d(R_knee_x_axis)
R_knee_y_axis = Raxis[1]
R_knee_y_axis = R_knee_y_axis/norm3d(R_knee_y_axis)
R_knee_z_axis = Raxis[2]
R_knee_z_axis = R_knee_z_axis/norm3d(R_knee_z_axis)
L_knee_x_axis = Laxis[0]
L_knee_x_axis = L_knee_x_axis/norm3d(L_knee_x_axis)
L_knee_y_axis = Laxis[1]
L_knee_y_axis = L_knee_y_axis/norm3d(L_knee_y_axis)
L_knee_z_axis = Laxis[2]
L_knee_z_axis = L_knee_z_axis/norm3d(L_knee_z_axis)
#Put both axis in array
# Add the origin back to the vector
y_axis = R_knee_y_axis+R
z_axis = R_knee_z_axis+R
x_axis = R_knee_x_axis+R
Raxis = np.asarray([x_axis,y_axis,z_axis])
# Add the origin back to the vector
y_axis = L_knee_y_axis+L
z_axis = L_knee_z_axis+L
x_axis = L_knee_x_axis+L
Laxis = np.asarray([x_axis,y_axis,z_axis])
axis = np.asarray([Raxis,Laxis])
return [R,L,axis] | 5,328,310 |
def uniform(low: float = 0.0,
high: float = 1.0,
size: tp.Optional[SIZE_TYPE] = None):
"""
Draw samples from a uniform distribution.
"""
if high < low:
raise ValueError("high must not be less than low")
u = _draw_and_reshape(size, rand)
return u * (high - low) + low | 5,328,311 |
def toInt():
"""This built-in function casts the current value to Int and returns the result.
"""
def transform_function(current_value: object, record: dict, complete_transform_schema: dict,
custom_variables: dict):
value_to_return = None
if current_value is not None:
try:
clean_current_value = current_value.replace(",", ".")
float_current_value = float(clean_current_value)
value_to_return = int(math.floor(float_current_value))
except:
value_to_return = None
return value_to_return
return transform_function | 5,328,312 |
def public_upload(request):
"""Public form to upload missing images
:param request: current user request
:type request: django.http.request
:return: rendered response
:rtype: HttpResponse
"""
upload_success = False
if request.method == "POST":
document = Document.objects.get(id=request.POST.get("inputDocument", None))
if document:
uploaded_image = request.FILES.get("inputFile", None)
if uploaded_image:
image = DocumentImage(
document=document,
image=uploaded_image,
name=document.word,
confirmed=False,
)
image.save()
upload_success = True
missing_images = Document.objects.values_list(
"id", "word", "article", "training_sets"
).filter(document_image__isnull=True)
training_sets = (
TrainingSet.objects.values_list("id", "title")
.filter(documents__document_image__isnull=True)
.distinct()
)
context = {
"documents": json.dumps(list(missing_images)),
"training_sets": json.dumps(list(training_sets)),
"upload_success": upload_success,
}
return render(request, "public_upload.html", context) | 5,328,313 |
def setup_image_service():
"""Provisions image services in openstack nodes"""
if env.roledefs['openstack']:
execute("setup_image_service_node", env.host_string) | 5,328,314 |
def load_classes(fstem):
"""Load all classes from a python file."""
all_classes = []
header = []
forward_refs = []
class_text = None
done_header = False
fname = pathlib.Path('trestle/oscal/tmp') / (fstem + '.py')
with open(fname, 'r', encoding='utf8') as infile:
for r in infile.readlines():
# collect forward references
if r.find('.update_forward_refs()') >= 0:
forward_refs.append(r)
elif r.find(class_header) == 0: # start of new class
done_header = True
if class_text is not None: # we are done with current class so add it
all_classes.append(class_text)
class_text = ClassText(r, fstem)
else:
if not done_header: # still in header
header.append(r.rstrip())
else:
# this may not be needed
p = re.compile(r'.*Optional\[Union\[([^,]+),.*List\[Any\]')
refs = p.findall(r)
if len(refs) == 1:
logger.info(f'Replaced Any with {refs[0]} in {fstem}')
r_orig = r
r = r.replace('List[Any]', f'List[{refs[0]}]')
logger.info(f'{r_orig} -> {r}')
class_text.add_line(r.rstrip())
all_classes.append(class_text) # don't forget final class
# force all oscal versions to the current one
all_classes = constrain_oscal_version(all_classes)
return all_classes | 5,328,315 |
def findAllSubstrings(string, substring):
""" Returns a list of all substring starting positions in string or an empty
list if substring is not present in string.
:param string: a template string
:param substring: a string, which is looked for in the ``string`` parameter.
:returns: a list of substring starting positions in the template string
"""
#TODO: solve with regex? what about '.':
#return [m.start() for m in re.finditer('(?='+substring+')', string)]
start = 0
positions = []
while True:
start = string.find(substring, start)
if start == -1:
break
positions.append(start)
#+1 instead of +len(substring) to also find overlapping matches
start += 1
return positions | 5,328,316 |
def create_repo(path: str):
"""Creates a new repository at path"""
repo = Repository(path, True)
assert(repo_dir(repo, "branches", mkdir=True))
assert(repo_dir(repo, "objects", mkdir=True))
assert(repo_dir(repo, "refs/heads", mkdir=True))
assert(repo_dir(repo, "refs/tags", mkdir=True))
fpath = repo_file(repo, "description")
if fpath:
with open(fpath, "w") as f:
f.write(
"Unnamed repository; edit this file 'description' to name the repository.\n")
fpath = repo_file(repo, "HEAD")
if fpath:
with open(fpath, "w") as f:
f.write("ref: refs/heads/master\n")
fpath = repo_file(repo, "config")
if fpath:
with open(fpath, "w") as f:
config = create_default_config()
config.write(f) | 5,328,317 |
def build_optimizer(name, lr=0.001, **kwargs):
"""Get an optimizer for TensorFlow high-level API Estimator.
Args:
name (str): Optimizer name. Note, to use 'Momentum', should specify
lr (float): Learning rate.
kwargs (dictionary): Optimizer arguments.
Returns:
tf.train.Optimizer
"""
if name == 'Adadelta':
optimizer = tf.train.AdadeltaOptimizer(learning_rate=lr, **kwargs)
elif name == 'Adagrad':
optimizer = tf.train.AdagradOptimizer(learning_rate=lr, **kwargs)
elif name == 'Adam':
optimizer = tf.train.AdamOptimizer(learning_rate=lr, **kwargs)
elif name == 'Ftrl':
optimizer = tf.train.FtrlOptimizer(learning_rate=lr, **kwargs)
elif name == 'Momentum':
if 'momentum' in kwargs:
optimizer = tf.train.MomentumOptimizer(learning_rate=lr, **kwargs)
else:
optimizer = tf.train.MomentumOptimizer(learning_rate=lr, momentum=0.9, **kwargs)
elif name == 'RMSProp':
optimizer = tf.train.RMSPropOptimizer(learning_rate=lr, **kwargs)
elif name == 'SGD':
optimizer = tf.train.GradientDescentOptimizer(learning_rate=lr, **kwargs)
else:
raise ValueError(
"""Optimizer name should be either 'Adadelta', 'Adagrad', 'Adam',
'Ftrl', 'Momentum', 'RMSProp', or 'SGD'"""
)
return optimizer | 5,328,318 |
def generate_pruning_config(model_name,
sparsity,
begin_step=0,
end_step=-1,
schedule='ConstantSparsity',
granularity='BlockSparsity',
respect_submatrix=False,
two_over_four_chin=False,
ch_share=True,
path=None):
"""Generate a model pruning config out of sparsity configuration.
Arguments:
model_name: A `str`. 'mnist', 'resnet56' (CIFAR-10), 'resnet50' (ImageNet),
or 'mobilenetV1'.
sparsity: A `dict`. Keys are `str` representing layer names (or possibly a
regexp pattern), and values are sparsity (must be convertible to float).
begin_step: Step at which to begin pruning. `0` by default.
end_step: Step at which to end pruning. `-1` by default. `-1` implies
continuing to prune till the end of training (available only for
'ConstantSparsity' schedule).
schedule: 'ConstantSparsity' or 'PolynomialDecay'.
granularity: 'ArayaMag', 'BlockSparsity', 'ChannelPruning', 'KernelLevel',
or 'QuasiCyclic'.
respect_submatrix: A `bool`. Whether or not to mask weight tensors
submatrix-wise.
two_over_four_chin: A `bool`. Whether or not to realize two-out-of-four
sparsity pattern along input channels. Defaults to `False`, in which case
the sparsity pattern is achieved along the output channels.
ch_share: A `bool`. Whether or not to share masks ac
path: `None` or a `str`. If `str`, saves the model pruning config as YAML
file.
Returns:
A ModelPruningConfig instance.
"""
def get_pruning_schedule_config(_sparsity):
_sparsity = float(_sparsity)
config = dict(begin_step=begin_step, end_step=end_step, frequency=100)
if schedule == 'ConstantSparsity':
config['target_sparsity'] = _sparsity
elif schedule == 'PolynomialDecay':
config['initial_sparsity'] = 0.
config['final_sparsity'] = _sparsity
config['power'] = 3
else:
raise ValueError
return pruning_base_configs.PruningScheduleConfig(
class_name=schedule,
config=config
)
def get_pruning_granularity_config(_sparsity):
_sparsity = float(_sparsity)
config = dict()
if granularity in ('ArayaMag', 'QuasiCyclic'):
config['gamma'] = int(1/(1.0 - _sparsity))
if respect_submatrix:
config['respect_submatrix'] = True
elif granularity == 'BlockSparsity':
config['block_size'] = [1, 1]
config['block_pooling_type'] = 'AVG'
elif granularity == 'ChannelPruning':
config['ch_axis'] = -1
elif granularity == 'KernelLevel':
config['ker_axis'] = [0, 1]
elif granularity == 'TwoOutOfFour':
block_axis = -2 if two_over_four_chin else -1
config['block_axis'] = block_axis
if respect_submatrix:
config['respect_submatrix'] = True
else:
raise ValueError
return pruning_base_configs.PruningGranularityConfig(
class_name=granularity,
config=config,
)
def get_pruning_config(_sparsity):
return pruning_base_configs.PruningConfig(
pruning_schedule=get_pruning_schedule_config(_sparsity),
pruning_granularity=get_pruning_granularity_config(_sparsity),
)
model_pruning_config = pruning_base_configs.ModelPruningConfig(
model_name=model_name,
pruning=[]
)
for layer_name, _sparsity in sparsity.items():
layer_pruning_config = pruning_base_configs.LayerPruningConfig(
layer_name=layer_name,
pruning = [
pruning_base_configs.WeightPruningConfig(
weight_name='kernel',
pruning=get_pruning_config(_sparsity),
)
]
)
model_pruning_config.pruning.append(layer_pruning_config)
if granularity == 'ChannelPruning' and ch_share:
if model_name.startswith('resnet'):
model_pruning_config.share_mask = _get_resnet_share_mask(model_name)
if path:
def save_params_dict_to_yaml(params, file_path):
"""Saves the input ParamsDict to a YAML file.
Taken from params_dict.save_params_dict_to_yaml.
"""
with tf.io.gfile.GFile(file_path, 'w') as f:
#def _my_list_rep(dumper, data):
# # u'tag:yaml.org,2002:seq' is the YAML internal tag for sequence.
# return dumper.represent_sequence(
# u'tag:yaml.org,2002:seq', data, flow_style=True)
#
#yaml.add_representer(list, _my_list_rep)
yaml.dump(params.as_dict(), f, default_flow_style=False)
save_params_dict_to_yaml(model_pruning_config, path)
return model_pruning_config | 5,328,319 |
def collect_FR_dev(stim_array,stim_dt,sim_dt,spikemon,n,return_spikes=False):
"""
get all firing rates for a given spikemon
stim_array: array of stimulation time/strengths, e.g., [0,0,0,0,1,0,0,0,1,0,0]
stim_dt: time interval of stimulation
sim_dt: time interval of simulation
spikemon_t: time array from spike monitor
returns:
spikelist: (n,len(stim_start_times)) matrix of spike counts.
"""
#print 'spikemon.i min',np.amin(spikemon.i),np.amax(spikemon.i)
spikemon_t = spikemon
# get all stim start times (index position*stim_dt)
stim_start_times = np.where(stim_array!=0)[0]*stim_dt
# preallocate firing rate array and standard deviation
FR_array = np.zeros(len(stim_start_times))
dev_array = np.zeros(len(stim_start_times))
spikelist = np.zeros((n,len(stim_start_times)))
for i in range(len(stim_start_times)):
FR_array[i],dev_array[i],spikelist[:,i] = get_FR_dev(stim_start_times[i],stim_start_times[i]+stim_dt,sim_dt,spikemon,n)
#print 'type',type(dev_array)
return FR_array,dev_array,spikelist | 5,328,320 |
def assertDict(s):
""" Assert that the input is a dictionary. """
if isinstance(s,str):
try:
s = json.loads(s)
except:
raise AssertionError('String "{}" cannot be json-decoded.'.format(s))
if not isinstance(s,dict): raise AssertionError('Variable "{}" is not a dictionary.'.format(s))
return s | 5,328,321 |
def _consolidate_subdivide_geometry(geometry, max_query_area_size):
"""
Consolidate and subdivide some geometry.
Consolidate a geometry into a convex hull, then subdivide it into smaller
sub-polygons if its area exceeds max size (in geometry's units).
Parameters
----------
geometry : shapely Polygon or MultiPolygon
the geometry to consolidate and subdivide
max_query_area_size : float
max area for any part of the geometry in geometry's units:
any polygon bigger will get divided up for multiple queries to API
Returns
-------
geometry : Polygon or MultiPolygon
"""
# let the linear length of the quadrats (with which to subdivide the
# geometry) be the square root of max area size
quadrat_width = math.sqrt(max_query_area_size)
if not isinstance(geometry, (Polygon, MultiPolygon)):
raise TypeError("Geometry must be a shapely Polygon or MultiPolygon")
# if geometry is a MultiPolygon OR a single Polygon whose area exceeds the
# max size, get the convex hull around the geometry
if isinstance(geometry, MultiPolygon) or (
isinstance(geometry, Polygon) and geometry.area > max_query_area_size
):
geometry = geometry.convex_hull
# if geometry area exceeds max size, subdivide it into smaller sub-polygons
if geometry.area > max_query_area_size:
geometry = _quadrat_cut_geometry(geometry, quadrat_width=quadrat_width)
if isinstance(geometry, Polygon):
geometry = MultiPolygon([geometry])
return geometry | 5,328,322 |
def deleteAllPatientes():
"""Delete all patient record
Raises:
HTTPException: raises if there is any error in underlying CRUD
operation.
Returns:
null: success response if all the records are successfully deleted
"""
logging.debug("Router: /patient/all")
logging.debug(f"Router deleteAllPatientes")
try:
return PatientController.delete_all_patients()
except PatientException as response_error:
raise HTTPException(response_error.status_code, response_error.error_detail) | 5,328,323 |
def get_function_args(node: ast.FunctionDef) -> Tuple[List[Any], List[Any]]:
"""
This functon will process function definition and will extract all
arguments used by a given function and return all optional and non-optional
args used by the function.
Args:
node: Function node containing function that needs to be analyzed
Returns:
(non_optional_args, optional_args): named function args
"""
assert (
type(node) == ast.FunctionDef
), "Incorrect node type. Expected ast.FunctionDef, got {}".format(type(node))
total_args = len(node.args.args)
default_args = len(node.args.defaults)
optional_args = []
non_optional_args = []
# Handle positional args
for i in range(total_args):
if i + default_args < total_args:
non_optional_args.append(node.args.args[i].arg)
else:
optional_args.append(node.args.args[i].arg)
# Handle named args
for arg in node.args.kwonlyargs:
optional_args.append(arg.arg)
return non_optional_args, optional_args | 5,328,324 |
def test_view_change_after_some_txns(txnPoolNodesLooper, txnPoolNodeSet,
some_txns_done, testNodeClass, viewNo, # noqa
sdk_pool_handle, sdk_wallet_client,
node_config_helper_class, tconf, tdir,
allPluginsPath, tmpdir_factory):
"""
Check that view change is done after processing some of txns
"""
ensure_view_change(txnPoolNodesLooper, txnPoolNodeSet)
ensureElectionsDone(looper=txnPoolNodesLooper, nodes=txnPoolNodeSet)
ensure_all_nodes_have_same_data(txnPoolNodesLooper, nodes=txnPoolNodeSet)
sdk_send_random_and_check(txnPoolNodesLooper, txnPoolNodeSet, sdk_pool_handle,
sdk_wallet_client, 10)
ensure_all_nodes_have_same_data(txnPoolNodesLooper, txnPoolNodeSet)
for node in txnPoolNodeSet:
txnPoolNodesLooper.removeProdable(node)
node.stop()
config = getConfigOnce()
reload_modules_for_replay(tconf)
replayable_node_class, basedirpath = get_replayable_node_class(
tmpdir_factory, tdir, testNodeClass, config)
print('-------------Replaying now---------------------')
for node in txnPoolNodeSet:
create_replayable_node_and_check(txnPoolNodesLooper, txnPoolNodeSet,
node, replayable_node_class,
node_config_helper_class, tconf,
basedirpath, allPluginsPath) | 5,328,325 |
def svn_client_copy3(*args):
"""
svn_client_copy3(svn_commit_info_t commit_info_p, char src_path, svn_opt_revision_t src_revision,
char dst_path,
svn_client_ctx_t ctx, apr_pool_t pool) -> svn_error_t
"""
return apply(_client.svn_client_copy3, args) | 5,328,326 |
def delete(server = None, keys = None):
"""
Marks an entity or entities as deleted on the server. Until an entity
is permanently deleted (an administrative operation, not available
through the RESTful API), it can still be accessed, but will not turn
up in search results.
:param server: a :class:`~pyCoalesce.coalesce_request.CoalesceServer`
object or the URL of a Coalesce server
:param keys: a UUID key of the entity to be deleted, or an iterable of
such keys. Each key can be an instance of the :class:`uuid.UUID`
class, or any string or integer that could serve as input to the
:class:`UUID <uuid.UUID>` class constructor.
:returns: ``True`` if the returned status code is 204 (indicating a
successful deletion), ``False`` (with a warning) in the unlikely
event that the server returns another status code in the 200's.
(Any value outside the 200's will cause an exception.)
"""
if isinstance(server, str):
server_obj = CoalesceServer(server)
else:
server_obj = server
# Figure out whether we have one key or an iterable of them, check the
# validity of each, and transform them into a JSON array.
if keys:
# Test for a single key--a list of keys or a JSON array as a string
# will cause "_test_key" to throw an error.
try:
key_str = _test_key(keys)
except TypeError: # "keys" is probably a list of keys.
keys_list = [_test_key(key) for key in keys]
keys_str = json.dumps(keys_list)
except ValueError: # "keys" is probably a JSON array of keys.
json.loads(keys) # Make sure that "keys" is valid JSON.
keys_str = keys
else:
keys_str = '["' + key_str + '"]'
operation = "delete"
try:
API_URL = _construct_URL(server_obj = server_obj,
operation = operation)
except AttributeError as err:
raise AttributeError(str(err) + '\n.This error can occur if the ' +
'argument "server" is not either a URL or a ' +
'CoalesceServer object.')
method = OPERATIONS[operation]["method"]
headers = copy(server_obj.base_headers)
headers["Content-type"] = "application/json"
# Submit the request.
response = get_response(URL = API_URL, method = method, data = keys_str,
headers = headers, delay = 1, max_attempts = 4)
# Check for the appropriate status code.
status = response.status_code
if status == 204:
success = True
else:
warn("The API server returned an unexpected status code, " + status +
". However, the entity might have been deleted on the server, " +
"or might be deleted after a delay.", UnexpectedResponseWarning)
success = False
return success | 5,328,327 |
def ParseMemCsv(f):
"""Compute summary stats for memory.
vm5_peak_kib -> max(vm_peak_kib) # over 5 second intervals. Since it uses
the kernel, it's accurate except for takes that spike in their last 4
seconds.
vm5_mean_kib -> mean(vm_size_kib) # over 5 second intervals
"""
peak_by_pid = collections.defaultdict(list)
size_by_pid = collections.defaultdict(list)
# Parse columns we care about, by PID
c = csv.reader(f)
for i, row in enumerate(c):
if i == 0:
continue # skip header
# looks like timestamp, pid, then (rss, peak, size)
_, pid, _, peak, size = row
if peak != '':
peak_by_pid[pid].append(int(peak))
if size != '':
size_by_pid[pid].append(int(size))
mem_by_pid = {}
# Now compute summaries
pids = peak_by_pid.keys()
for pid in pids:
peaks = peak_by_pid[pid]
vm5_peak_kib = max(peaks)
sizes = size_by_pid[pid]
vm5_mean_kib = sum(sizes) / len(sizes)
mem_by_pid[pid] = (vm5_peak_kib, vm5_mean_kib)
return mem_by_pid | 5,328,328 |
def display(training_data, vis_data, interp):
"""Display training samples, true PSF, model PSF, and residual over field-of-view.
"""
import matplotlib.pyplot as plt
interpstars = params_to_stars(vis_data, noise=0.0)
interpstars = interp.interpolateList(interpstars)
fig, axarr = plt.subplots(5, 4, figsize=(7, 10))
rows = ['u0', 'v0', 'hlr', 'g1', 'g2']
for irow, var in enumerate(rows):
# Make a grid of output locations to visualize GP interpolation performance (for g1).
ctruth = np.array(vis_data[var]).ravel()
cinterp = np.array([s.fit.params[irow] for s in interpstars])
vmin = np.min(ctruth)
vmax = np.max(ctruth)
if vmin == vmax:
vmin -= 0.01
vmax += 0.01
ax1 = axarr[irow, 0]
ax1.set_title("sampling")
ax1.set_xlim((-0.2,1.2))
ax1.set_ylim((-0.2,1.2))
ax1.scatter(training_data['u'], training_data['v'],
c=training_data[var], vmin=vmin, vmax=vmax)
ax2 = axarr[irow, 1]
ax2.set_title("truth")
ax2.set_xlim((-0.2,1.2))
ax2.set_ylim((-0.2,1.2))
ax2.scatter(vis_data['u'], vis_data['v'], c=ctruth, vmin=vmin, vmax=vmax)
ax3 = axarr[irow, 2]
ax3.set_title("interp")
ax3.set_xlim((-0.2,1.2))
ax3.set_ylim((-0.2,1.2))
ax3.scatter(vis_data['u'], vis_data['v'], c=cinterp, vmin=vmin, vmax=vmax)
ax4 = axarr[irow, 3]
ax4.set_title("resid")
ax4.set_xlim((-0.2,1.2))
ax4.set_ylim((-0.2,1.2))
ax4.scatter(vis_data['u'], vis_data['v'], c=(cinterp-ctruth),
vmin=vmin/10, vmax=vmax/10)
for ax in axarr.ravel():
ax.xaxis.set_ticks([])
ax.yaxis.set_ticks([])
plt.show() | 5,328,329 |
def index_containing_substring(list_str, substring):
"""For a given list of strings finds the index of the element that contains the
substring.
Parameters
----------
list_str: list of strings
substring: substring
Returns
-------
index: containing the substring or -1
"""
for i, s in enumerate(list_str):
if substring in s:
return i
return -1 | 5,328,330 |
def _localized_country_list_inner(locale):
"""
Inner function supporting :func:`localized_country_list`.
"""
if locale == 'en':
countries = [(country.name, country.alpha_2) for country in pycountry.countries]
else:
pycountry_locale = gettext.translation('iso3166-1', pycountry.LOCALES_DIR, languages=[locale])
if six.PY2:
countries = [(pycountry_locale.gettext(country.name).decode('utf-8'), country.alpha_2) for country in pycountry.countries]
else:
countries = [(pycountry_locale.gettext(country.name), country.alpha_2) for country in pycountry.countries]
countries.sort()
return [(code, name) for (name, code) in countries] | 5,328,331 |
def ingest_questions(questions: dict, assignment: Assignment):
"""
questions: [
{
sequence: int
questions: [
{
q: str // what is 2*2
a: str // 4
},
]
},
...
]
response = {
rejected: [ ... ]
ignored: [ ... ]
accepted: [ ... ]
}
:param questions:
:param assignment:
:return:
"""
question_shape = {"questions": {"q": str, "a": str}, "sequence": int}
if questions is None:
return
# Iterate over questions
rejected, ignored, accepted = [], [], []
for question_sequence in questions:
shape_good, err_path = _verify_data_shape(question_sequence, question_shape)
if not shape_good:
# Reject the question if the shape is bad and continue
rejected.append(
{
"question": question_sequence,
"reason": "could not verify data shape " + err_path,
}
)
continue
pool = question_sequence["pool"]
for question in question_sequence["questions"]:
# Check to see if question already exists for the current
# assignment
exists = AssignmentQuestion.query.filter(
AssignmentQuestion.assignment_id == assignment.id,
AssignmentQuestion.question == question["q"],
).first()
if exists is not None:
# If the question exists, ignore it and continue
ignored.append({"question": question, "reason": "already exists"})
continue
# Create the new question from posted data
assignment_question = AssignmentQuestion(
assignment_id=assignment.id,
question=question["q"],
solution=question["a"],
pool=pool,
)
db.session.add(assignment_question)
accepted.append({"question": question})
# Commit additions
db.session.commit()
return accepted, ignored, rejected | 5,328,332 |
def render_book_template(book_id):
"""
Find a specific book in the database.
Locate the associated reviews (sorted by score and date).
Create the purchase url.
Check whether the user has saved the book to their wishlist.
"""
# Find the book document in the database
this_book = mongo.db.books.find_one({"_id": ObjectId(book_id)})
# Find the reviews that relate to that book
this_book_reviews = list(
mongo.db.reviews.find({"book_id": ObjectId(book_id)})
)
# Sort by review score and then by date added
sorted_book_reviews = sorted(
this_book_reviews,
key=lambda b: (-b["review_score"], -b["review_date"]),
)
# Create the book purchase url
# by adding the book title and author to the url
this_book_title = this_book["title"].replace(" ", "+")
this_book_author = this_book["authors"][0].replace(" ", "+")
book_purchase_url = (
"https://www.amazon.com/s?tag=falsetag&k=" +
this_book_title + "+" + this_book_author
)
# Create a list of users who have reviewed this book already
reviewers = []
for book_review in this_book_reviews:
# Convert floats to datetime format in each book review
book_review["review_date"] = datetime.datetime.fromtimestamp(
book_review["review_date"]
).strftime("%a, %b %d, %Y")
# Add reviewers to the reviewers list
reviewers.append(book_review["created_by"])
bookmark = False
purchased = False
# If the session cookie exists then the user is logged in
if session:
# Grab the session user's wishlist from the database
wishlist = mongo.db.users.find_one({"username": session["user"]})[
"wishlist"
]
# Check to see whether the current user
# has already saved this book to their wishlist
# If so, remove the bookmark
if this_book["_id"] in wishlist:
bookmark = True
# Check and see whether the current user has reviewed this book
# If they have presumably they don't want to purchase the book
if session["user"] in reviewers:
purchased = True
return render_template(
"view_book.html",
this_book=this_book,
this_book_reviews=sorted_book_reviews,
book_purchase_url=book_purchase_url,
reviewers=reviewers,
bookmark=bookmark,
purchased=purchased,
) | 5,328,333 |
async def replace_dispatcher(client: ClientAsync, replacement: Dispatcher):
"""
replace a dispatcher
"""
response = await client.replace_dispatcher(replacement)
assert (
response.status_code == 200
), f"failed to replace the dispatcher ({response.json()}" | 5,328,334 |
def iatan2(y,x):
"""One coordinate must be zero"""
if x == 0:
return 90 if y > 0 else -90
else:
return 0 if x > 0 else 180 | 5,328,335 |
def fuse_bn_sequential(model):
"""
This function takes a sequential block and fuses the batch normalization with convolution
:param model: nn.Sequential. Source resnet model
:return: nn.Sequential. Converted block
"""
if not isinstance(model, torch.nn.Sequential):
return model
stack = []
for m in model.children():
if isinstance(m, torch.nn.BatchNorm2d):
if isinstance(stack[-1], torch.nn.Conv2d):
bn_st_dict = m.state_dict()
conv_st_dict = stack[-1].state_dict()
# BatchNorm params
eps = m.eps
mu = bn_st_dict['running_mean']
var = bn_st_dict['running_var']
gamma = bn_st_dict['weight']
if 'bias' in bn_st_dict:
beta = bn_st_dict['bias']
else:
beta = torch.zeros(gamma.size(0)).float().to(gamma.device)
# Conv params
W = conv_st_dict['weight']
if 'bias' in conv_st_dict:
bias = conv_st_dict['bias']
else:
bias = torch.zeros(W.size(0)).float().to(gamma.device)
denom = torch.sqrt(var + eps)
b = beta - gamma.mul(mu).div(denom)
A = gamma.div(denom)
bias *= A
A = A.expand_as(W.transpose(0, -1)).transpose(0, -1)
W.mul_(A)
bias.add_(b)
stack[-1].weight.data.copy_(W)
if stack[-1].bias is None:
stack[-1].bias = torch.nn.Parameter(bias)
else:
stack[-1].bias.data.copy_(bias)
else:
stack.append(m)
if len(stack) > 1:
return torch.nn.Sequential(*stack)
else:
return stack[0] | 5,328,336 |
def get_baseconf_settings( baseconf_settings_filename = None ):
"""
Returns the basic configuration settings as a parameter structure.
:param baseconf_settings_filename: loads the settings from the specified filename, otherwise from the default filename or in the absence of such a file creates default settings from scratch.
:return: parameter structure
"""
# These are the parameters for the general I/O and example cases
baseconf_params = pars.ParameterDict()
baseconf_params[('baseconf',{},'determines if settings should be loaded from file and visualization options')]
if baseconf_settings_filename is not None:
print( 'Loading baseconf configuration from: ' + baseconf_settings_filename )
baseconf_params.load_JSON( baseconf_settings_filename )
return baseconf_params
else:
print( 'Using default baseconf settings from config_parser.py')
baseconf_params['baseconf'][('load_default_settings_from_default_setting_files',False,'if set to True default configuration files (in settings directory) are first loaded')]
baseconf_params['baseconf'][('load_settings_from_file',True,'if set to True configuration settings are loaded from file')]
baseconf_params['baseconf'][('save_settings_to_file',True,'if set to True configuration settings are saved to file')]
if not baseconf_params['baseconf']['load_default_settings_from_default_setting_files']:
print('HINT: Only compute_settings.json and baseconf_settings.json will be read from file by default.')
print('HINT: Set baseconf.load_default_settings_from_default_setting_files to True if you want to use the other setting files in directory settings.')
print('HINT: Otherwise the defaults will be as defined in config_parser.py.')
return baseconf_params | 5,328,337 |
def get_variants(df, space_order, point_type, axis, stencils, weights):
"""
Get the all the stencil variants associated with the points, evaluate them,
and fill the respective positions in the weight function.
Parameters
----------
df : pandas DataFrame
The dataframe of boundary-adjacent points
space_order : int
The order of the function for which stencils are to be generated
point_type : string
The category of the points. Can be 'first', 'last', 'double',
'paired_left', or 'paired_right'.
axis : str
The axis along which the stencils are orientated. Can be 'x', 'y', or
'z'.
stencils : ndarray
The functions for stencils to be evaluated
weights : devito Function
The Function to fill with stencil coefficients
"""
if point_type == 'first':
n_pts = np.minimum(int(space_order/2), 1-df.dist.to_numpy())
# Modifier for points which lie within half a grid spacing of the boundary
modifier_right = np.where(df.eta_r.to_numpy() - 0.5 < _feps, 0, 1)
# Starting point for the right stencil (moving from left to right)
start_right = space_order-2*(n_pts-1)-modifier_right
i_min = np.amin(n_pts)
i_max = np.amax(n_pts)
for i in np.linspace(i_min, i_max, 1+i_max-i_min, dtype=int):
mask = n_pts == i
mask_size = np.count_nonzero(mask)
left_variants = np.zeros((mask_size, i), dtype=int)
# This is capped at space_order to prevent invalid variant numbers
right_variants = np.minimum(np.tile(2*np.arange(i), (mask_size, 1))
+ start_right[mask, np.newaxis],
space_order)
# Iterate over left and right variants
eval_stencils = evaluate_stencils(df[mask], 'first', i,
left_variants, right_variants,
space_order, stencils)
# Insert the stencils into the weight function
fill_weights(df[mask], eval_stencils, 'first',
weights, axis, n_pts=i)
elif point_type == 'last':
n_pts = np.minimum(int(space_order/2), 1+df.dist.to_numpy())
# Modifier for points which lie within half a grid spacing of the boundary
modifier_left = np.where(df.eta_l.to_numpy() - -0.5 > _feps, 0, 1)
start_left = space_order-modifier_left
i_min = np.amin(n_pts)
i_max = np.amax(n_pts)
for i in np.linspace(i_min, i_max, 1+i_max-i_min, dtype=int):
mask = n_pts == i
mask_size = np.count_nonzero(mask)
# This is capped at space_order to prevent invalid variant numbers
left_variants = np.minimum(np.tile(-2*np.arange(i), (mask_size, 1))
+ start_left[mask, np.newaxis],
space_order)
right_variants = np.zeros((mask_size, i), dtype=int)
# Iterate over left and right variants
eval_stencils = evaluate_stencils(df[mask], 'last', i,
left_variants, right_variants,
space_order, stencils)
# Insert the stencils into the weight function
fill_weights(df[mask], eval_stencils, 'last',
weights, axis, n_pts=i)
elif point_type == 'double':
n_pts = 1
# Modifier for points which lie within half a grid spacing of the boundary
modifier_left = np.where(df.eta_l.to_numpy() - -0.5 > _feps, 0, 1)
modifier_right = np.where(df.eta_r.to_numpy() - 0.5 < _feps, 0, 1)
# Mask for where both etas are zero (points on boundary)
zero_mask = np.logical_and(np.abs(df.eta_l.to_numpy()) < _feps,
np.abs(df.eta_r.to_numpy()) < _feps)
# Stencil wants to be zero for points exactly on boundary, so set invalid variant numbers
modifier_zero = np.where(zero_mask, np.NaN, 0)
# This will cause stencil to default to zero
start_left = space_order-modifier_left+modifier_zero
start_right = space_order-modifier_right+modifier_zero
# This is capped at space_order to prevent invalid variant numbers
left_variants = np.minimum(start_left[:, np.newaxis], space_order)
right_variants = np.minimum(start_right[:, np.newaxis], space_order)
# Iterate over left and right variants
eval_stencils = evaluate_stencils(df, 'double', 1,
left_variants, right_variants,
space_order, stencils)
# Insert the stencils into the weight function
fill_weights(df, eval_stencils, 'double', weights, axis)
elif point_type == 'paired_left':
n_pts = np.minimum(int(space_order/2), df.dist.to_numpy())
# Modifier for points which lie within half a grid spacing of the boundary
modifier_left = np.where(df.eta_l.to_numpy() - -0.5 > _feps, 0, 1)
modifier_right = np.where(df.eta_r.to_numpy() - 0.5 < _feps, 0, 1)
start_left = space_order-modifier_left
start_right = space_order-2*df.dist.to_numpy()-modifier_right
i_min = np.amin(n_pts)
i_max = np.amax(n_pts)
for i in np.linspace(i_min, i_max, 1+i_max-i_min, dtype=int):
mask = n_pts == i
mask_size = np.count_nonzero(mask)
# This is capped at space_order to prevent invalid variant numbers
left_variants = np.minimum(np.tile(-2*np.arange(i), (mask_size, 1))
+ start_left[mask, np.newaxis],
space_order)
right_variants = np.minimum(np.maximum(np.tile(2*np.arange(i), (mask_size, 1))
+ start_right[mask, np.newaxis], 0),
space_order)
# Iterate over left and right variants
eval_stencils = evaluate_stencils(df[mask], 'paired_left', i,
left_variants, right_variants,
space_order, stencils)
# Insert the stencils into the weight function
fill_weights(df[mask], eval_stencils, 'paired_left',
weights, axis, n_pts=i)
elif point_type == 'paired_right':
n_pts = np.minimum(int(space_order/2),
1-df.dist.to_numpy()-np.minimum(int(space_order/2),
-df.dist.to_numpy()))
# Modifier for points which lie within half a grid spacing of the boundary
modifier_left = np.where(df.eta_l.to_numpy() - -0.5 > _feps, 0, 1)
modifier_right = np.where(df.eta_r.to_numpy() - 0.5 < _feps, 0, 1)
start_left = space_order+2*df.dist.to_numpy()-modifier_left
start_right = space_order-2*(n_pts-1)-modifier_right
i_min = np.amin(n_pts)
i_max = np.amax(n_pts)
for i in np.linspace(i_min, i_max, 1+i_max-i_min, dtype=int):
mask = n_pts == i
mask_size = np.count_nonzero(mask)
# This is capped at space_order to prevent invalid variant numbers
left_variants = np.minimum(np.maximum(np.tile(-2*np.arange(i), (mask_size, 1))
+ start_left[mask, np.newaxis], 0),
space_order)
right_variants = np.minimum(np.tile(2*np.arange(i), (mask_size, 1))
+ start_right[mask, np.newaxis],
space_order)
# Iterate over left and right variants
eval_stencils = evaluate_stencils(df[mask], 'paired_right', i,
left_variants, right_variants,
space_order, stencils)
# Insert the stencils into the weight function
fill_weights(df[mask], eval_stencils, 'paired_right',
weights, axis, n_pts=i) | 5,328,338 |
def create_app(test_config=None):
"""Create and configure an instance of the Flask application."""
app = Flask(__name__, instance_relative_config=True)
app.logger.debug('app.instance_path = %s', app.instance_path)
app.config.from_mapping(
SECRET_KEY="$%px0vz%84j2y9ztqg^8k8_!8*-372g85z73(art-z#+5l5h1w'",)
if test_config is None:
# load the instance config, if it exists, when not testing
app.config.from_pyfile("config.py", silent=True)
else:
# load the test config if passed in
app.config.update(test_config)
# ensure the instance folder exists
try:
os.makedirs(app.instance_path)
except OSError:
pass
@app.route("/hello")
def hello():
return "Hello, World!"
# register the database commands
from blog import blog, user, api,auth
app.register_blueprint(blog.bp)
app.register_blueprint(user.bp)
app.register_blueprint(api.bp)
app.register_blueprint(auth.bp)
app.add_url_rule("/", endpoint="index")
return app | 5,328,339 |
def subtract_background(image, background_image):
"""Subtracts background image from a specified image.
Returns
-------
bs_image : np.ndarray of type np.int | shape = [image.shape]
Background-subtracted image.
"""
image = image.copy().astype(np.int)
background = background_image.copy().astype(np.int)
bs_image = image - background
return bs_image.astype(np.int) | 5,328,340 |
def preprocess_testing():
"""
"""
FLAGS = tf.app.flags.FLAGS
description = {
'source_path': FLAGS.source_csv_path,
'image_size': FLAGS.image_size,
'perturb': FLAGS.perturb,
}
examples = preprocess(description)
# NOTE: write gzip
options = tf.python_io.TFRecordOptions(
tf.python_io.TFRecordCompressionType.GZIP)
with tf.python_io.TFRecordWriter(FLAGS.result_tfr_path, options=options) as writer:
for example in examples:
writer.write(example.SerializeToString())
print('done: {}'.format(FLAGS.result_tfr_path)) | 5,328,341 |
def batch_genomes(genomes, num_batches, order):
"""
Populates 2D numpy array with len(rows)==num_batches in {order} major order.
Using col is for when you know you are using X number of nodes, and want-
to evenly distribute genomes across each node
Use row when you want to fill each node, i.e. you give each node 16 cores-
and would rather have 3 at 32 and 1 at 16 than 4 at 28
"""
total_genomes = len(genomes)
# num_batches designates number of rows in col major order
# but number of cols in row major order
genomes_per_batch = math.ceil(total_genomes/num_batches)
batches = np.empty([num_batches, genomes_per_batch], dtype=object)
if order == 'col':
for i, genome in enumerate(genomes):
batches[i%num_batches][i//num_batches] = genome
elif order == 'row':
for i, genome in enumerate(genomes):
batches[i//genomes_per_batch][i%genomes_per_batch] = genome
else:
raise Exception("Order must be specified as 'col' or 'row'")
return batches | 5,328,342 |
def single_gate_params(gate, params=None):
"""Apply a single qubit gate to the qubit.
Args:
gate(str): the single qubit gate name
params(list): the operation parameters op['params']
Returns:
a tuple of U gate parameters (theta, phi, lam)
"""
if gate == 'U' or gate == 'u3':
return (params[0], params[1], params[2])
elif gate == 'u2':
return (np.pi/2, params[0], params[1])
elif gate == 'u1':
return (0., 0., params[0])
elif gate == 'id':
return (0., 0., 0.) | 5,328,343 |
def main():
"""
:return: Place and magnitude, where magnitude is greater than 1.0.
"""
start = time.time()
data = os.path.join(root, path)
df = sqlContext.read.json(data)
df.createOrReplaceTempView('earthquakes')
earthquakes_df = sqlContext.sql("SELECT properties.mag, properties.place "
"FROM earthquakes "
"WHERE properties.mag > 1.0")
earthquakes_df.show()
end = time.time()
print('Time spent', end - start, 'seconds') | 5,328,344 |
def pickleAllFeatureFilesFromDir(in_path, out_path, is_malware=False):
"""
Creates pickle files with ML features for all the mmt-probe .csv reports in the given folder
:param in_path: folder with .csv reports
:param out_path: folder where pickle files are to be written
:param is_malware: label (normal 0 or malicious 1) to be added into dataframe in the pickle
:return:
"""
for i in os.listdir(in_path):
print("Processing {}".format(i))
trafficToFeatures(str(in_path + i), str(out_path + i + '.pkl'), is_malware) | 5,328,345 |
def check_copr_build(build_id: int) -> bool:
"""
Check the copr_build with given id and refresh the status if needed.
Used in the babysit task.
:param build_id: id of the copr_build (CoprBuildModel.build.id)
:return: True if in case of successful run, False when we need to retry
"""
logger.debug(f"Getting copr build ID {build_id} from DB.")
builds = CoprBuildModel.get_all_by_build_id(build_id)
if not builds:
logger.warning(f"Copr build {build_id} not in DB.")
return True
copr_client = CoprClient.create_from_config_file()
build_copr = copr_client.build_proxy.get(build_id)
if not build_copr.ended_on:
logger.info("The copr build is still in progress.")
return False
logger.info(f"The status is {build_copr.state!r}.")
for build in builds:
if build.status != "pending":
logger.info(
f"DB state says {build.status!r}, "
"things were taken care of already, skipping."
)
continue
chroot_build = copr_client.build_chroot_proxy.get(build_id, build.target)
event = CoprBuildEvent(
topic=FedmsgTopic.copr_build_finished.value,
build_id=build_id,
build=build,
chroot=build.target,
status=(
COPR_API_SUCC_STATE
if chroot_build.state == COPR_SUCC_STATE
else COPR_API_FAIL_STATE
),
owner=build.owner,
project_name=build.project_name,
pkg=build_copr.source_package.get(
"name", ""
), # this seems to be the SRPM name
timestamp=chroot_build.ended_on,
)
job_configs = get_config_for_handler_kls(
handler_kls=CoprBuildEndHandler,
event=event,
package_config=event.get_package_config(),
)
for job_config in job_configs:
CoprBuildEndHandler(
package_config=event.package_config,
job_config=job_config,
data=EventData.from_event_dict(event.get_dict()),
copr_event=event,
).run()
return True | 5,328,346 |
def net_to_graph(net):
"""
Convert Net object from parse_net_file to graph represented
(as per dijkstra.py) as dict of dicts where G[v][w] for any v,w
is cost of edge from v to w. Here v and w are just integers (node numbers).
Parameters:
net (in/OUT) - Net object as returned by parse_net_file()
duplicate entries in the links list are removed
Return value:
graph (dict of dicts) as described above
"""
sys.stderr.write('[debug]: net_to_graph edges = ' + str(len(net.links))+'\n')
netgraph = dict((i, {}) for i in xrange(1,net.num_nodes+1))
delete_links = {} # dict { (initnode,termnode):seen } to delete after first
for link in net.links:
if (netgraph.has_key(link.init_node) and
netgraph[link.init_node].has_key(link.term_node)):
sys.stderr.write('WARNING: duplicate link %d -> %d\n' %
(link.init_node, link.term_node))
sys.stderr.write(' using first link only\n')
delete_links[(link.init_node, link.term_node)] = False
else:
netgraph[link.init_node][link.term_node] = link.cost
# now rebuild net.links without duplicate links (this happend on
# Berlin data)
net_links_copy = []
for link in net.links:
# net_links_copy.append(copy.deepcopy(link))
# neither copy.copy() nor copy.deepcopy() actually seem to work at all
# whether on lists or the objects in the list, have to do it manually.
copylink = Link()
copylink.init_node = link.init_node
copylink.term_node = link.term_node
copylink.capacity = link.capacity
copylink.length = link.length
copylink.free_flow_time = link.free_flow_time
copylink.B = link.B
copylink.power = link.power
copylink.speed_limit = link.speed_limit
copylink.toll = link.toll
copylink.linktype = link.linktype
copylink.cost = link.cost
net_links_copy.append(copylink)
net.links = []
for link in net_links_copy:
if (not delete_links.has_key((link.init_node, link.term_node))
or not delete_links[(link.init_node, link.term_node)]):
net.links.append(link)
delete_links[(link.init_node,link.term_node)] = True
if net.num_links != len(net.links):
sys.stderr.write('WARNING: had %d links, now %d after deleting duplicates\n' % (len(net_links_copy), len(net.links)))
net.num_links = len(net.links)
return netgraph | 5,328,347 |
def runTests(data, targets, pipeline, parameters):
""" Perform grid search with specified pipeline and parameters
on data training set with targets as labels
Evaluate performance based on precision and print parameters
for best estimator
grid search object is returned for further analysis"""
grid_search = GridSearchCV(pipeline, parameters, verbose=1, cv=10, scoring='precision')
print("Performing grid search...")
print("pipeline:", [name for name, _ in pipeline.steps])
print("parameters:")
print(parameters)
t0 = time()
grid_search.fit(data, targets)
print("done in %0.3fs" % (time() - t0))
print()
print("Best score: %0.3f" % grid_search.best_score_)
print("Best parameters set:")
best_parameters = grid_search.best_estimator_.get_params()
for param_name in sorted(parameters.keys()):
print("\t%s: %r" % (param_name, best_parameters[param_name]))
return grid_search | 5,328,348 |
def isMultipleTagsInput(item):
"""
Returns True if the argument datatype is not a column or a table, and if it allows lists and if it has no permitted value.
This function is used to check whether the argument values have to be delimited by the null character (returns True) or not.
:param item: Table argument.
"""
return item.get('datatype', 'STRING') in ['STRING','DOUBLE','INTEGER','DRIVER','SQLEXPR', 'LONG']\
and item.get('allowsLists', False)\
and not item.get('permittedValues', []) | 5,328,349 |
def _setup_default_prefixer():
"""`reverse` depends on a prefixer being set for an app and/or locale in the url,
and for non-requests (i.e. cron) this isn't set up."""
request = HttpRequest()
request.META['SCRIPT_NAME'] = ''
prefixer = amo.urlresolvers.Prefixer(request)
prefixer.app = settings.DEFAULT_APP
prefixer.locale = settings.LANGUAGE_CODE
amo.reverse.set_url_prefix(prefixer) | 5,328,350 |
def _send():
"""Polls the mg9_send_q queue, sending requests to margo. If the margo
process is not running _send() starts it and sets the PROC_ATTR_NAME attr.
"""
# TODO: REFACTOR.
while True:
try:
try:
method, arg, cb = gs.mg9_send_q.get()
# CEV: proc should be the margo process.
proc = gs.attr(PROC_ATTR_NAME)
# CEV: Looks like this starts/restarts the margo process.
if not proc or proc.poll() is not None:
# print("###: PROC DIED") # WARN
killSrv()
if _inst_state() != "busy":
maybe_install()
# TODO: Improve the handling of install state.
while _inst_state() == "busy":
time.sleep(0.100)
# Margo path and command line options.
mg_bin = _margo_bin()
cmd = [
mg_bin,
"-oom",
gs.setting("margo_oom", 0),
"-poll",
30,
"-tag",
TAG,
]
c = sh.Command(cmd)
c.stderr = gs.LOGFILE
# WARN WARN WARN WARN WARN WARN WARN WARN WARN WARN WARN
# WARN WARN WARN WARN WARN WARN WARN WARN WARN WARN WARN
# WARN (CEV): seeting GOGC
# c.env = {"GOGC": 10, "XDG_CONFIG_HOME": gs.home_path()}
c.env = {"XDG_CONFIG_HOME": gs.home_path()}
pr = c.proc()
if pr.ok:
proc = pr.p
err = ""
else:
proc = None
err = "Exception: %s" % pr.exc
if err or not proc or proc.poll() is not None:
killSrv()
_call(cb, {}, "Abort. Cannot start MarGo: %s" % err)
continue
# Set the process name
gs.set_attr(PROC_ATTR_NAME, proc)
# Launch stdout feed.
gsq.launch(DOMAIN, lambda: _read_stdout(proc))
# WARN WARN WARN WARN
# gsq.launch(DOMAIN, lambda: _read_stderr(proc))
req = Request(callback=cb, method=method)
gs.set_attr(REQUEST_PREFIX + req.token, req)
# header, err = gs.json_encode(req.header())
# if err:
# _cb_err(cb, "Failed to construct ipc header: %s" % err)
# continue
#
# body, err = gs.json_encode(arg)
# if err:
# _cb_err(cb, "Failed to construct ipc body: %s" % err)
# continue
#
# ev.debug(DOMAIN, "margo request: %s " % header)
#
# try:
# # TODO (CEV): make this one object and encode it to bytes here
# proc.stdin.write(("%s %s\n" % (header, body)).encode('utf-8'))
# except Exception as ex:
# _cb_err(cb, "Cannot talk to MarGo: %s" % ex)
# killSrv()
# gs.println(gs.traceback())
# WARN WARN WARN WARN WARN WARN WARN WARN WARN WARN WARN WARN
# WARN WARN WARN WARN WARN WARN WARN WARN WARN WARN WARN WARN
#
# Warning Use communicate() rather than .stdin.write, .stdout.read
# or .stderr.read to avoid deadlocks due to any of the other OS
# pipe buffers filling up and blocking the child process.
#
# WARN WARN WARN WARN WARN WARN WARN WARN WARN WARN WARN WARN
# WARN WARN WARN WARN WARN WARN WARN WARN WARN WARN WARN WARN
ev.debug(DOMAIN, "margo request: {}".format(req.header()))
data, err = gs.json_encode({
"method": req.method,
"token": req.token,
"body": arg,
})
if err:
_cb_err(cb, "Failed to construct request body (%s): %s" %
(req.method, err))
continue
try:
# x = (data + "\n").encode("utf-8")
# print("#### DEBUG: sending request: {}".format(data))
# proc.stdin.write(x)
proc.stdin.write((data + "\n").encode("utf-8"))
except Exception as e:
print("## Exception 1: {}".format(e))
_cb_err(cb, "Cannot talk to MarGo: %s" % e)
killSrv()
gs.println(gs.traceback())
except Exception as e:
print("## Exception 2: {}".format(e))
killSrv()
gs.println(gs.traceback())
except Exception as e:
print("## Exception 3: {}".format(e))
gs.println(gs.traceback())
break | 5,328,351 |
def imagenet_vgg_compression(compression_config, var_config, overall_model, muVes, strategy, optimizer, verbose=True):
"""
:param compression_config:
:param var_config:
:param overall_model:
:param muVes:
:param strategy:
:param optimizer:
:param verbose:
:return:
"""
cc = compression_config
# Get information about the layers in the original network
all_layers = []
n_full = {}
last_conv = ""
for l in overall_model.layers: # note: the input layer is not included in overall_model.layers
if 'conv' in l.name or 'fc' in l.name or 'output' in l.name:
n_full[l.name] = l.output_shape[-1]
if 'conv' in l.name:
last_conv = l.name
all_layers.append(l.name)
# Verify necessary config constraints
cc_keys = list(cc.keys())
cc_values = list(cc.values())
assert cc_keys == list(n_full.keys()), 'compression config must have keys for all compute layers (conv, fc, output)'
assert cc_values[0][0] is None, 'currently we are not considering compression of the network input'
for index in range(len(cc_keys)):
if cc_keys[index] in ['output']:
continue
if cc_values[index][1] is not None:
assert cc_values[index+1][0] is not None, 'to kill outputs the next layer must perform PCA compression'
assert cc_values[-1][1] is None, 'can not kill columns in the output layer'
assert len(cc_keys)-1 == len(muVes), 'PCA for each compute layer (except conv1) should already be computed'
# Go through the layers and create the (possibly) compressed layers
# TODO: model creation inside distribute strategy
with strategy.scope():
compressed_model = tf.keras.Sequential()
compressed_model.add(tf.keras.layers.InputLayer(input_shape=overall_model.layers[0].input_shape[1:],
name='input'))
mu_c, V_c, W_n = None, None, None
for index in range(len(cc_keys)):
prev_layer = cc_keys[index-1] if index-1 >= 0 else None
curr_layer = cc_keys[index]
next_layer = cc_keys[index+1] if index+1 <= len(cc_keys)-1 else None
n_c = n_full[curr_layer]
W_c = overall_model.get_layer(curr_layer).weights[0] if W_n is None else W_n
b_c = overall_model.get_layer(curr_layer).weights[1]
W_n = overall_model.get_layer(next_layer).weights[0] if next_layer is not None else None
mu_p, V_p = mu_c, V_c # previous layer could have modified mu, V if it killed columns
conv_c = True if 'conv' in curr_layer else False
# generic iteration
if cc[curr_layer][0] is not None and cc[prev_layer][1] is None:
mu_p, V_p, e_p = muVes[index-1] # want output of previous layer (input to this layer), recall offset
if cc[curr_layer][1] is not None:
mu_c, V_c, e_c = muVes[index] # want mu, V output of this layer (input to next layer), recall offset
num, ut = decode_cc(cc[curr_layer][1])
n_c, W_c, b_c, mu_c, V_c, W_n = tf_kill_outputs(W_c, b_c, mu_c, V_c, W_n, num, num_as_threshold=ut,
conv=conv_c, conv_to_dense=curr_layer == last_conv,
verbose=verbose, prefix=' {}'.format(curr_layer))
else:
mu_c, V_c = None, None
# add layer
activation = None if curr_layer == 'output' else 'relu'
if conv_c and cc[curr_layer][0] is not None:
W_c_p, b_c_p = tf_transform_conv_weights(mu_p, V_p, W_c, b_c)
compressed_model.add(Conv2DPCALayer(int(n_c), 3, mu_p, V_p, kernel_initializer=cift(W_c_p),
bias_initializer=cift(b_c_p), activation='relu', name=curr_layer))
elif conv_c:
compressed_model.add(tf.keras.layers.Conv2D(int(n_c), 3, padding='same', kernel_initializer=cift(W_c),
bias_initializer=cift(b_c), activation='relu',
name=curr_layer))
elif cc[curr_layer][0] is not None:
W_c_p, b_c_p = tf_transform_dense_weights(mu_p, V_p, W_c, b_c)
compressed_model.add(DensePCALayer(int(n_c), mu_p, V_p, kernel_initializer=cift(W_c_p),
bias_initializer=cift(tf.squeeze(b_c_p)), activation=activation,
name=curr_layer))
else:
compressed_model.add(tf.keras.layers.Dense(int(n_c), kernel_initializer=cift(W_c),
bias_initializer=cift(b_c), activation=activation,
name=curr_layer))
# add extra layers if necessary
c_i = all_layers.index(curr_layer)
n_i = all_layers.index(next_layer) if next_layer is not None else len(all_layers)
while c_i < n_i - 1:
c_i += 1
if 'mp' in all_layers[c_i]:
compressed_model.add(tf.keras.layers.MaxPooling2D((2, 2), name=all_layers[c_i]))
elif 'flatten' in all_layers[c_i]:
compressed_model.add(tf.keras.layers.Flatten(name=all_layers[c_i]))
# TODO: do you want to add dropout back in, does dropout scaling affect compression??
elif 'dropout' in all_layers[c_i] and var_config['add_dropout_to_compressed_model'] is True:
compressed_model.add(tf.keras.layers.Dropout(0.5))
elif 'softmax' in all_layers[c_i]:
compressed_model.add(tf.keras.layers.Softmax(name=all_layers[c_i]))
# Optimizer
# TODO: fix this
compressed_model.compile(loss=tf.keras.losses.SparseCategoricalCrossentropy(),
optimizer=optimizer, metrics=['accuracy'])
# compressed_model.compile(loss=tf.keras.losses.CategoricalCrossentropy(),
# optimizer=tf.keras.optimizers.Adam(), metrics=['accuracy'])
return compressed_model | 5,328,352 |
def remove_url(url: str = Form(...)):
"""
Remove url from the url json file
:param url: api url in the format: http://ip:port/
:return: ApiResponse
"""
try:
payload = helpers.parse_json(url_config_path)
except Exception as e:
return ApiResponse(success=False, error=e)
if url in payload['urls']:
payload['urls'].remove(url)
helpers.write_json(payload, url_config_path)
return ApiResponse(data={"url removed successfully"})
else:
return ApiResponse(success=False, error="url is not present in config file") | 5,328,353 |
def test_check_size() -> None:
"""
Test `check_size_matches` function.
"""
a1 = np.zeros((2, 3, 4))
a2 = np.zeros((5, 2, 3, 4))
check_size_matches(a1, a1)
check_size_matches(a1, a2, matching_dimensions=[-3, -2, -1])
check_size_matches(a1, a2, dim1=3, dim2=4, matching_dimensions=[-3, -2, -1])
check_size_matches(a1, a1, matching_dimensions=[0, 1])
def throws(func: Callable[..., None]) -> None:
with pytest.raises(ValueError) as e:
func()
print("Exception message: {}".format(e.value))
# Can't compare arrays of different dimension
throws(lambda: check_size_matches(a1, a2)) # type: ignore
# a2 has wrong dimension
throws(lambda: check_size_matches(a1, a2, dim1=3, dim2=3)) # type: ignore
# a1 has wrong dimension
throws(lambda: check_size_matches(a1, a2, dim1=4, dim2=4)) # type: ignore
# a1 has wrong dimension [0]
throws(lambda: check_size_matches(a1, a2, dim1=4, dim2=4)) | 5,328,354 |
def load_CSVdata(messages_filepath, categories_filepath):
"""
Load and merge datasets messages and categories
Inputs:
Path to the CSV file containing messages
Path to the CSV file containing categories
Output:
dataframe with merged data containing messages and categories
"""
#reading messages and categories
messages = pd.read_csv(messages_filepath)
categories = pd.read_csv(categories_filepath)
#merge datasets
df = pd.merge(messages,categories,on='id')
return df | 5,328,355 |
def run(report_name):
"""Run a report and output to a text file."""
now = datetime.datetime.now().strftime('%Y-%m-%d')
filename = '{} {}.txt'.format(now, report_name)
outputs = [settings for (rep, settings) in reports.items() if report_name in ('all', rep)]
if not outputs:
click.echo('No report found named "{}"'.format(report_name))
return
with open(filename, 'w') as report_file:
gcs = guild_collection_summary()
for i, report in enumerate(outputs):
if i > 0:
report_file.write('\n\n\n')
report_file.write(run_report(gcs, report))
click.echo('Report was output to: {}'.format(filename)) | 5,328,356 |
def test_normalize_resource():
"""
Test that the normalize_resource method is properly called on
resources returned from get_resource.
"""
class Normalize(MockPool):
def normalize_resource(self, resource):
setattr(resource, 'one', 1)
pool = Normalize(mockresource.factory, capacity=1)
r = pool.get_resource()
r_id = id(r._resource)
r.one = 2
assert r.one == 2
r.close()
r2 = pool.get_resource()
r2_id = id(r2._resource)
assert (r2.one == 1 and r_id == r2_id) | 5,328,357 |
def cli(ctx, proxy, login):
"""
NokDoc CLI Tool is exposing a set of commands to interact with
Nokia documentation portal.
It offers CLI experience for tasks like
- getting links to the docs aggregated into HTML file
- downloading docs collections automatically
It works for authorized users and guests.
"""
ctx.obj = {'LOGGED_IN': False}
# defining a proxy
if proxy:
proxies['https'] = proxy
# creating requests session object
s = requests.session()
s.proxies.update(proxies)
s.verify = certifi.where()
# buiding a context object to pass session object
# log in and get cookies to get "protected" docs
if login:
click.echo('\n ####### LOGIN #######')
pwd = click.prompt(
' Please enter your password for a "{}" user'.format(login), hide_input=True)
s = user_auth(s, login, pwd)
ctx.obj['LOGGED_IN'] = True
ctx.obj['USERNAME'] = login
ctx.obj['SESSION'] = s | 5,328,358 |
def test_post_415(
api_client: TestClient,
access_token,
) -> None:
"""Test a post request with invalid media type."""
response = api_client.post(
URL,
files={
"file": (Path(__file__).name, open(__file__).read(), "image/png"),
},
headers=access_token,
)
assert response.status_code == 415
assert response.json()["detail"] == "only .png images are supported" | 5,328,359 |
def accept_invite(payload, user):
"""
Accepts an invite
args: payload, user
ret: response
"""
try:
invite = Invites.get(payload['invite'])[0]
except:
return Message(
Codes.NOT_FOUND,
{ 'message': 'There isn\'t any active invite with the given id.' }
)
if user['id'] != invite[1]:
return Message(
Codes.FORBIDDEN,
{ 'message': 'This invitation was sent to another user.' }
)
UsersGroups.insert(user['id'], invite[2])
Invites.close(invite[0])
return Message(
Codes.SUCCESS,
{ 'message': 'You have successfully joined this group.' }
) | 5,328,360 |
def clean_status_output(
input: str,
) -> Tuple[bool, Dict[str, str], List[Dict[str, str]]]:
# example input
"""
# Health check:
# - dns: rename /etc/resolv.conf /etc/resolv.pre-tailscale-backup.conf: device or resource busy
100.64.0.1 test_domain_1 omnet linux -
100.64.0.2 test_network_1 omnet linux active; relay "syd", tx 1188 rx 1040
"""
up = False
peers: List[Dict[str, str]] = []
host: Dict[str, str] = {}
if "Tailscale is stopped." in input:
return up, host, peers
elif "unexpected state: NoState" in input:
return up, host, peers
count = 0
for line in str(input).split("\n"):
matches = re.match(r"^\d.+", line)
if matches is not None:
try:
stat_parts = re.split(r"(\s+)", matches.string)
entry = {}
entry["ip"] = stat_parts[0]
entry["hostname"] = stat_parts[2]
entry["network"] = stat_parts[4]
entry["os"] = stat_parts[6]
connection_info_parts = matches.string.split(entry["os"])
entry["connection_info"] = "n/a"
connection_info = ""
if len(connection_info_parts) > 1:
connection_info = connection_info_parts[1].strip()
entry["connection_info"] = connection_info
entry["connection_status"] = "n/a"
if "active" in connection_info:
entry["connection_status"] = "active"
if "idle" in connection_info:
entry["connection_status"] = "idle"
entry["connection_type"] = "n/a"
if "relay" in connection_info:
entry["connection_type"] = "relay"
if "direct" in connection_info:
entry["connection_type"] = "direct"
if count == 0:
host = entry
count += 1
up = True
else:
peers.append(entry)
except Exception as e:
print("Error parsing tailscale status output", e)
pass
return up, host, peers | 5,328,361 |
def test_tensor_method_mul():
"""test_tensor_method_mul"""
class Net(Cell):
def __init__(self):
super(Net, self).__init__()
self.sub = P.Sub()
def construct(self, x, y):
out = x * (-y)
return out.transpose()
net = Net()
x = ms.Tensor(np.ones([5, 3], np.float32))
y = ms.Tensor(np.ones([8, 5, 3], np.float32))
_executor.compile(net, x, y) | 5,328,362 |
def verify_target_catalog(df, metadf):
"""
Check that each entry in the (pre magnitude cut) target catalog has
a source_id that matches the original catalog. (i.e., ensure that no
int/int64/str lossy conversion bugs have happened).
"""
print(79*'-')
print('Beginning verification...')
print(79*'-')
for ix, r in metadf.sort_values('Nstars').iterrows():
print(f'{r.reference_id} (Nstars={r.Nstars})...')
sel = df.reference_id.str.contains(r.reference_id)
df_source_ids = np.array(df.loc[sel, 'source_id']).astype(np.int64)
csvpath = os.path.join(clusterdatadir, r.csv_path)
df_true = pd.read_csv(csvpath)
if 'source_id' not in df_true.columns:
df_true = df_true.rename(columns={"source":"source_id"})
true_source_ids = (
np.unique(np.array(df_true.source_id).astype(np.int64))
)
np.testing.assert_array_equal(
np.sort(df_source_ids), np.sort(true_source_ids)
)
print('Verified that the pre-mag cut target catalog has source_ids that '
'correctly match the original. ')
print(79*'-') | 5,328,363 |
def _get_top_artists(session: Session, limit=100):
"""Gets the top artists by follows of all of Audius"""
top_artists = (
session.query(User)
.select_from(AggregateUser)
.join(User, User.user_id == AggregateUser.user_id)
.filter(AggregateUser.track_count > 0, User.is_current)
.order_by(desc(AggregateUser.follower_count), User.user_id)
.limit(limit)
.all()
)
return helpers.query_result_to_list(top_artists) | 5,328,364 |
def run_test_spin_to_track_beacon(robot):
"""
Tests the spin_until_beacon_seen and spin_to_track_beacon methods of the class.
:type robot: rosebot.RoseBot
"""
print('--------------------------------------------------')
print('Testing the spin_to_track_beacon method of the BeaconSeeker')
print('--------------------------------------------------')
while True:
print()
speed = int(input("Enter an integer for the max wheel speed (1 to 100): "))
if speed == 0:
break
tracking_duration_s = int(input("How long would you like to track the beacon (seconds)? "))
if tracking_duration_s == 0:
break
input("Press the ENTER key when ready for the robot to start moving.")
# -------------------------------------------------------------------------
# TODO: 11. Call the spin_to_track_beacon method of the beacon_seeker
# of the robot passing in the tracking_duration_s
# Once the tracking_duration_s is over make the robot beep.
# Info:
# - The heading is in degrees in the range -25 to 25 with:
# - 0 means straight ahead
# - negative degrees mean the Beacon is to the left
# - positive degrees mean the Beacon is to the right
# - Distance is from 0 to 100, where 100 is about 70 cm
# -------------------------------------------------------------------------
# Solution to be removed
robot.beacon_seeker.spin_to_track_beacon(speed, tracking_duration_s)
robot.sound.beep() | 5,328,365 |
def test_interpolation_option_contract():
""" Tests for InterpolationOption pseudo-type """
obj = putil.ptypes.interpolation_option
check_contract(obj, 'interpolation_option', 5)
exmsg = (
"[START CONTRACT MSG: interpolation_option]Argument "
"`*[argument_name]*` is not one of ['STRAIGHT', 'STEP', 'CUBIC', "
"'LINREG'] (case insensitive)[STOP CONTRACT MSG]"
)
AE(obj, ValueError, exmsg, obj='x')
obj(None)
for item in ['STRAIGHT', 'STEP', 'CUBIC', 'LINREG']:
obj(item)
obj(item.lower()) | 5,328,366 |
def get_imagenet_lmdb(train_transform, val_transform, test_transform, CONFIG):
"""
Load lmdb imagenet dataset
https://github.com/Fangyh09/Image2LMDB
"""
train_path = os.path.join(CONFIG.dataset_dir, "train_lmdb", "train.lmdb")
val_path = os.path.join(CONFIG.dataset_dir, "val_lmdb", "val.lmdb")
test_path = os.path.join(CONFIG.dataset_dir, "test_lmdb", "test.lmdb")
train_data = ImageFolderLMDB(train_path, train_transform, None)
val_data = ImageFolderLMDB(val_path, val_transform, None)
test_data = ImageFolderLMDB(test_path, test_transform, None)
return train_data, val_data, test_data | 5,328,367 |
def _get_other_locations():
"""Returns all locations except convention venues."""
if 'all' not in location_cache.keys():
conv_venue = LocationType.objects.get(name='Convention venue')
location_cache['all'] = Location.objects.exclude(loc_type=conv_venue)
return location_cache['all'] | 5,328,368 |
def _check_cuda_version():
"""
Make sure that CUDA versions match between the pytorch install and torchvision install
"""
if not _HAS_OPS:
return -1
import torch
_version = torch.ops.torchvision._cuda_version()
if _version != -1 and torch.version.cuda is not None:
tv_version = str(_version)
if int(tv_version) < 10000:
tv_major = int(tv_version[0])
tv_minor = int(tv_version[2])
else:
tv_major = int(tv_version[0:2])
tv_minor = int(tv_version[3])
t_version = torch.version.cuda
t_version = t_version.split('.')
t_major = int(t_version[0])
t_minor = int(t_version[1])
if t_major != tv_major or t_minor != tv_minor:
raise RuntimeError("Detected that PyTorch and torchvision were compiled with different CUDA versions. "
"PyTorch has CUDA Version={}.{} and torchvision has CUDA Version={}.{}. "
"Please reinstall the torchvision that matches your PyTorch install."
.format(t_major, t_minor, tv_major, tv_minor))
return _version | 5,328,369 |
def test_dcgain_consistency():
"""Test to make sure that DC gain is consistently evaluated"""
# Set up transfer function with pole at the origin
sys_tf = ctrl.tf([1], [1, 0])
assert 0 in sys_tf.pole()
# Set up state space system with pole at the origin
sys_ss = ctrl.tf2ss(sys_tf)
assert 0 in sys_ss.pole()
# Finite (real) numerator over 0 denominator => inf + nanj
np.testing.assert_equal(
sys_tf(0, warn_infinite=False), complex(np.inf, np.nan))
np.testing.assert_equal(
sys_ss(0, warn_infinite=False), complex(np.inf, np.nan))
np.testing.assert_equal(
sys_tf(0j, warn_infinite=False), complex(np.inf, np.nan))
np.testing.assert_equal(
sys_ss(0j, warn_infinite=False), complex(np.inf, np.nan))
np.testing.assert_equal(
sys_tf.dcgain(), np.inf)
np.testing.assert_equal(
sys_ss.dcgain(), np.inf)
# Set up transfer function with pole, zero at the origin
sys_tf = ctrl.tf([1, 0], [1, 0])
assert 0 in sys_tf.pole()
assert 0 in sys_tf.zero()
# Pole and zero at the origin should give nan + nanj for the response
np.testing.assert_equal(
sys_tf(0, warn_infinite=False), complex(np.nan, np.nan))
np.testing.assert_equal(
sys_tf(0j, warn_infinite=False), complex(np.nan, np.nan))
np.testing.assert_equal(
sys_tf.dcgain(), np.nan)
# Set up state space version
sys_ss = ctrl.tf2ss(ctrl.tf([1, 0], [1, 1])) * \
ctrl.tf2ss(ctrl.tf([1], [1, 0]))
# Different systems give different representations => test accordingly
if 0 in sys_ss.pole() and 0 in sys_ss.zero():
# Pole and zero at the origin => should get (nan + nanj)
np.testing.assert_equal(
sys_ss(0, warn_infinite=False), complex(np.nan, np.nan))
np.testing.assert_equal(
sys_ss(0j, warn_infinite=False), complex(np.nan, np.nan))
np.testing.assert_equal(
sys_ss.dcgain(), np.nan)
elif 0 in sys_ss.pole():
# Pole at the origin, but zero elsewhere => should get (inf + nanj)
np.testing.assert_equal(
sys_ss(0, warn_infinite=False), complex(np.inf, np.nan))
np.testing.assert_equal(
sys_ss(0j, warn_infinite=False), complex(np.inf, np.nan))
np.testing.assert_equal(
sys_ss.dcgain(), np.inf)
else:
# Near pole/zero cancellation => nothing sensible to check
pass
# Pole with non-zero, complex numerator => inf + infj
s = ctrl.tf('s')
sys_tf = (s + 1) / (s**2 + 1)
assert 1j in sys_tf.pole()
# Set up state space system with pole on imaginary axis
sys_ss = ctrl.tf2ss(sys_tf)
assert 1j in sys_tf.pole()
# Make sure we get correct response if evaluated at the pole
np.testing.assert_equal(
sys_tf(1j, warn_infinite=False), complex(np.inf, np.inf))
# For state space, numerical errors come into play
resp_ss = sys_ss(1j, warn_infinite=False)
if np.isfinite(resp_ss):
assert abs(resp_ss) > 1e15
else:
if resp_ss != complex(np.inf, np.inf):
pytest.xfail("statesp evaluation at poles not fully implemented")
else:
np.testing.assert_equal(resp_ss, complex(np.inf, np.inf))
# DC gain is finite
np.testing.assert_almost_equal(sys_tf.dcgain(), 1.)
np.testing.assert_almost_equal(sys_ss.dcgain(), 1.)
# Make sure that we get the *signed* DC gain
sys_tf = -1 / (s + 1)
np.testing.assert_almost_equal(sys_tf.dcgain(), -1)
sys_ss = ctrl.tf2ss(sys_tf)
np.testing.assert_almost_equal(sys_ss.dcgain(), -1) | 5,328,370 |
def test_exists(value: t.Any, exp: bool) -> None:
"""It exists if it's not None."""
assert exists(value) is exp | 5,328,371 |
def largest(layer,field):
"""largest(layer,field)
Returns the largest area significant class in the study area.
"""
theitems = []
rows = arcpy.SearchCursor(layer)
for row in rows:
theitems.append(row.getValue(field))
del rows
theitems.sort()
max1= theitems[-1]
return max1 | 5,328,372 |
async def double_up(ctx):
"""
「ダブルアップチャンス!」を開始します。
"""
depth = 1 # 現在の階層
HOLE = "\N{HOLE}\N{VARIATION SELECTOR-16}"
LEFT_ARROW = "\N{LEFTWARDS BLACK ARROW}\N{VARIATION SELECTOR-16}"
RIGHT_ARROW = "\N{BLACK RIGHTWARDS ARROW}\N{VARIATION SELECTOR-16}"
TOP_ARROW = "\N{UPWARDS BLACK ARROW}\N{VARIATION SELECTOR-16}"
emojis = [LEFT_ARROW,RIGHT_ARROW] # 通常の穴選択用絵文字リスト
final_emojis = [LEFT_ARROW,TOP_ARROW,RIGHT_ARROW] # 最後の穴選択用絵文字リスト
def gold_check(msg):
# 掛け金の入力チェック用
return msg.author == ctx.author and msg.channel == ctx.channel and msg.content.isdecimal()
embed = discord.Embed(title="ダブルアップ",description=f"{ctx.author.mention} 掛け金を入力してください。",color=0x0000ff)
await ctx.send(embed=embed)
try:
gold_msg = await bot.wait_for("message",check=gold_check,timeout=30.0)
except asyncio.TimeoutError:
embed = discord.Embed(title="エラー",
description=f"{ctx.author.mention} 掛け金の正常な入力が確認されませんでした。コマンドの処理を終了します。",
color=0xff0000)
await ctx.send(embed=embed)
return
gold = int(gold_msg.content)
embed = discord.Embed(title=f"どちらの穴に入るか選ぼう!(このテキストのリアクションをタッチして選択)({depth}回目)",
description=f"{HOLE}\t{HOLE}\n{LEFT_ARROW}\t{RIGHT_ARROW}",color=0x00ff00)
embed.set_footer(text=f"掛け金:{gold * 2} G")
game_msg = await ctx.send(embed=embed) # ゲーム用メッセージ。以降はこれを編集してゲームを表現する。
while depth < 5:
await game_msg.edit(embed=embed)
for emoji in emojis:
await game_msg.add_reaction(emoji) # 穴選択用絵文字でリアクションする
def hole_check(reaction,user):
# 穴の入力チェック用
react_msg = reaction.message
are_same_msgs = react_msg.id == game_msg.id and react_msg.channel == game_msg.channel # メッセージの同一性
return are_same_msgs and user == ctx.author and str(reaction.emoji) in emojis
try:
hole_react,user = await bot.wait_for("reaction_add",check=hole_check,timeout=30.0)
except asyncio.TimeoutError:
embed = discord.Embed(title="エラー",
description=f"{ctx.author.mention} 穴の選択が正常に行われませんでした。コマンドの処理を終了します。",
color=0xff0000)
await ctx.send(embed=embed)
return
if random.randrange(2) == 0:
# 2分の1の確率ではずれを引く
embed = discord.Embed(title=f"はずれー!!",
description=f"{ctx.author.mention} 懲りずに、また挑戦してみてね!",color=0x00ff00)
await ctx.send(embed=embed)
return
depth += 1
gold *= 2
await hole_react.remove(user)
embed = discord.Embed(title=f"当たり!次の穴を選んでね!({depth}回目)",
description=f"{ctx.author.mention}\n{HOLE}\t{HOLE}\n{LEFT_ARROW}\t{RIGHT_ARROW}",
color=random.randrange(0xffffff))
embed.set_footer(text=f"次の掛け金:{gold * 2} G")
embed = discord.Embed(title=f"当たり!次の穴が最後!({depth}回目)",
description=f"{HOLE}\t{HOLE}\t{HOLE}\n{LEFT_ARROW}\t{TOP_ARROW}\t{RIGHT_ARROW}",
color=random.randrange(0xffffff))
embed.set_footer(text=f"掛け金:{gold * 2} G")
await game_msg.edit(embed=embed)
await game_msg.clear_reactions() # 最後は中間にもう一つ穴が追加されるので、全てのリアクションを削除しておく
for emoji in final_emojis:
await game_msg.add_reaction(emoji)
def hole_check_final(reaction,user):
# 最後の穴の入力チェック用
react_msg = reaction.message
are_same_msgs = react_msg.id == game_msg.id and react_msg.channel == game_msg.channel
return are_same_msgs and user == ctx.author and str(reaction.emoji) in final_emojis
try:
await bot.wait_for("reaction_add",check=hole_check_final,timeout=30.0)
except asyncio.TimeoutError:
embed = discord.Embed(title="エラー",
description=f"{ctx.author.mention} 穴の選択が正常に行われませんでした。コマンドの処理を終了します。",
color=0xff0000)
await ctx.send(embed=embed)
return
if random.randrange(3) != 2:
# 3分の2の確率ではずれを引く
embed = discord.Embed(title="はずれ。",
description=f"{ctx.author.mention} 君たちは一体今までにいくら貢いだんだろうね",color=0x00ff00)
await ctx.send(embed=embed)
return
gold *= 2
embed = discord.Embed(title="おめでとう!",
description=f"{ctx.author.mention} **{gold}** G入手したよ!\n達成できたのは今回で…何回目だったっけ",color=0x0000ff)
await ctx.send(embed=embed) | 5,328,373 |
def read_version(file_contents):
"""Read the project setting from pyproject.toml."""
data = tomlkit.loads(file_contents)
details = data["tool"]["poetry"]
return details["version"] | 5,328,374 |
def schedule_time(check_start_time, check_end_time, time_duaration=7) -> dict:
""" Returns dictionary of earliest available time within the next week """
all_busy_events = get_busy_events()
for d in range(1,time_duaration):
# Increment by one day throughout the week
check_day = datetime.today().date() + timedelta(d)
if all_busy_events:
# ! still something wrong
is_day_free = []
is_time_overlapping = False
is_time_free = True
for start,end in [event for event in all_busy_events if event[0].date() == check_day]:
is_time_overlapping = is_time_between(check_start_time, check_end_time, start.time()) and is_time_between(check_start_time, check_end_time, end.time())
is_time_free = not is_time_between(start.time(), end.time(), check_start_time) and not is_time_between(start.time(), end.time(), check_end_time)
is_day_free.append(is_time_free)
if all(is_day_free) and not is_time_overlapping:
appointment_start = datetime.combine(check_day, check_start_time)
appointment_end = datetime.combine(check_day, check_end_time)
return {"start": appointment_start, "end": appointment_end}
else:
# Schedule time for tomorrrow if no busy events within the next week
return {"start": datetime.combine(check_day, check_start_time), "end": datetime.combine(check_day, check_end_time)} | 5,328,375 |
def _to_array(value):
"""When `value` is a plain Python sequence, return it as a NumPy array."""
if not hasattr(value, 'shape') and hasattr(value, '__len__'):
return array(value)
else:
return value | 5,328,376 |
def dict_pix_to_deg(input_dict, changeN):
"""Convert pix to deg for a given dictionary format,
changeN is 1 or 2, to let the function works for the first
or both elements of the tuple"""
dict_deg = {}
for key, values in input_dict.items():
new_display = []
for display in values:
new_posi = []
for posi in display:
new_posi.append(__pix_to_deg_tuple(posi, changeN))
new_display.append(new_posi)
dict_deg.update({key: new_display})
return dict_deg | 5,328,377 |
def remove_container_name_from_blob_path(blob_path, container_name):
"""
Get the bit of the filepath after the container name.
"""
# container name will often be part of filepath - we want
# the blob name to be the bit after that
if not container_name in blob_path:
return blob_path
blob_name_parts = []
filepath_parts = split_filepath(blob_path)
container_name_found = False
for path_part in filepath_parts:
if container_name_found:
blob_name_parts.append(path_part)
if path_part == container_name:
container_name_found = True
if len(blob_name_parts) == 0:
return ""
return "/".join(blob_name_parts) | 5,328,378 |
def _complex_ar_from_dict(dic: Dict[str, List]) -> np.ndarray:
"""Construct complex array from dictionary of real and imaginary parts"""
out = np.array(dic["real"], dtype=complex)
out.imag = np.array(dic["imag"], dtype=float)
return out | 5,328,379 |
def endpoint(url_pattern, method="GET"):
"""
:param url_pattern:
:param method:
:param item:
:return:
"""
def wrapped_func(f):
@wraps(f)
def inner_func(self, *args, **kwargs):
"""
:param self:
:param args:
:param kwargs:
:return:
"""
func_params = translate_params(f, *args, **kwargs)
params = translate_special_params(func_params, self.special_attributes_map)
response = None
if method == "GET":
response = self._get(url_pattern, params=params)
elif method == "POST":
response = self._post(url_pattern, params=params)
if response:
try:
if response.headers["Content-Type"] == "application/json":
return response.json()
else:
return response.text
except Exception as e:
return response.content
return inner_func
return wrapped_func | 5,328,380 |
def test_benchmarks_bad_user(client, mocker):
"""Tests the benchmarks route with an invalid user."""
mocked_user = mocker.patch('app.routes.AppUser')
mocked_user.query.filter_by.return_value.first.return_value = None
response = client.get('/benchmarks/foo')
assert response.status_code == 403 | 5,328,381 |
def elastic_transform(
image,
alpha,
sigma,
alpha_affine,
interpolation=cv2.INTER_LINEAR,
border_mode=cv2.BORDER_REFLECT_101,
random_state=None,
approximate=False,
):
"""Elastic deformation of images as described in [Simard2003]_ (with modifications).
Based on https://gist.github.com/erniejunior/601cdf56d2b424757de5
.. [Simard2003] Simard, Steinkraus and Platt, "Best Practices for
Convolutional Neural Networks applied to Visual Document Analysis", in
Proc. of the International Conference on Document Analysis and
Recognition, 2003.
"""
if random_state is None:
random_state = np.random.RandomState(1234)
height, width = image.shape[:2]
# Random affine
center_square = np.float32((height, width)) // 2
square_size = min((height, width)) // 3
alpha = float(alpha)
sigma = float(sigma)
alpha_affine = float(alpha_affine)
pts1 = np.float32(
[
center_square + square_size,
[center_square[0] + square_size, center_square[1] - square_size],
center_square - square_size,
]
)
pts2 = pts1 + random_state.uniform(
-alpha_affine, alpha_affine, size=pts1.shape
).astype(np.float32)
matrix = cv2.getAffineTransform(pts1, pts2)
image = cv2.warpAffine(
image, matrix, (width, height), flags=interpolation, borderMode=border_mode
)
if approximate:
# Approximate computation smooth displacement map with a large enough kernel.
# On large images (512+) this is approximately 2X times faster
dx = random_state.rand(height, width).astype(np.float32) * 2 - 1
cv2.GaussianBlur(dx, (17, 17), sigma, dst=dx)
dx *= alpha
dy = random_state.rand(height, width).astype(np.float32) * 2 - 1
cv2.GaussianBlur(dy, (17, 17), sigma, dst=dy)
dy *= alpha
else:
dx = np.float32(
gaussian_filter((random_state.rand(height, width) * 2 - 1), sigma) * alpha
)
dy = np.float32(
gaussian_filter((random_state.rand(height, width) * 2 - 1), sigma) * alpha
)
x, y = np.meshgrid(np.arange(width), np.arange(height))
mapx = np.float32(x + dx)
mapy = np.float32(y + dy)
return cv2.remap(image, mapx, mapy, interpolation, borderMode=border_mode) | 5,328,382 |
def crystal_atnum(list_AtomicName, unique_AtomicName, unique_Zatom,list_fraction, f0coeffs):
"""
To get the atom and fractional factor in diffierent sites
list_AtomicName: list of all atoms in the crystal
unique_AtomicName: list of unique atomicname in the list
unique_Zatom: list of unique atomic number
list_fraction: list of unique fractial factor
return: num_e, fract, n_atom, list of number of electrons for atom with same fractional factor, and corresponding fractional factor, atomic number
"""
import re
from orangecontrib.xoppy.util.xoppy_xraylib_util import f0_xop
num_e = []
fract = []
n_atom = []
n_ATUM = []
for k,x in enumerate(unique_AtomicName):
tmp1 = re.search('(^[a-zA-Z]*)',x)
if tmp1.group(0) == x: #AtomicName only, without valence info (i.e., B, Y, O)
f0 = f0_xop(unique_Zatom[k])
else:
#f0 = f0_xop(0,AtomicName=x)
f0 = f0coeffs[x]
icentral = int(len(f0)/2)
F000 = f0[icentral]
for i in range(icentral):
F000 += f0[i]
a=[list_fraction[i] for i,v in enumerate(list_AtomicName) if v==x]
fac = list(set(a))
for y in fac:
n = a.count(y)
num_e.append(F000)
fract.append(y)
n_atom.append(n)
n_ATUM.append(unique_Zatom[k])
return num_e.copy(), fract.copy(), n_atom.copy(),n_ATUM.copy() | 5,328,383 |
def graph_fft_signals(x: np.ndarray, y1: np.ndarray, y2: np.ndarray,
y3: np.ndarray, frequencies: np.ndarray,
fft: np.ndarray) -> None:
"""Displays 4 graphs: two containing signals, one containing the sum of
the previous signals, and another containing the fft of the sum-signal.
"""
fig = plt.figure(figsize=(7, 7))
ax1 = fig.add_subplot(4, 1, 1)
ax2 = fig.add_subplot(4, 1, 2, sharex=ax1, sharey=ax1)
ax3 = fig.add_subplot(4, 1, 3, sharex=ax1, sharey=ax1)
ax4 = fig.add_subplot(4, 1, 4)
fig.tight_layout(pad=4)
ax1.plot(x, y1, color=orange, linewidth=0.4)
ax1.set_title("Signal 1")
_style_axis2d(ax1, "Time (Seconds)", "Amplitude")
ax2.plot(x, y2, color=orange, linewidth=0.4)
ax2.set_title("Signal 2")
_style_axis2d(ax2, "Time (Seconds)", "Amplitude")
ax3.plot(x, y3, color=orange, linewidth=0.4)
ax3.set_title("Signal 3 = Signal 1 + Signal 2")
_style_axis2d(ax3, "Time (Seconds)", "Amplitude")
ax4.plot(frequencies, fft, color=orange, linewidth=0.4)
ax4.set_title("FFT of Signal 3")
_style_axis2d(ax4, "Frequency (Hz)", "Magnitude")
plt.show() | 5,328,384 |
def integrate(name, var):
""" given filename and var, generate profile """
d = vtk.vtkExodusIIReader()
d.SetFileName(name)
d.UpdateInformation()
d.SetPointResultArrayStatus(var,1)
d.Update()
blocks = d.GetOutput().GetNumberOfBlocks()
data = d.GetOutput()
# range to integrate at
height = 0.804380714893
thresh = 0.004
rmin = 0.0
rmax = 1.0
nr = 10
dr = (rmax-rmin)/nr
rint = np.zeros(nr)
rn = np.ones(nr)
for j in xrange(blocks):
blk = data.GetBlock(0).GetBlock(j)
pts = blk.GetNumberOfPoints()
pt_data = blk.GetPointData().GetArray(var)
for i in xrange(pts):
# gather x,y,z location
z,y,x = blk.GetPoint(i)
# gather point scalar value
u = pt_data.GetValue(i)
# now, find all values near the target height
# (convert to cylindrical)
if(abs(z - height) < thresh):
r = np.sqrt((x)**2 + (y)**2)
fr = np.floor(r/dr)
rint[fr] += u
rn [fr] += 1
return rint/rn | 5,328,385 |
def rename_files_in_dir(path, config, only_print=False):
"""
please run in python3 if your os is windows, cause os.walk has a encoding bug
:param only_print:
:type path: str
:type config: dict
:param path:
:param config:
:return:
"""
refuse_suffix = config.get('refuse_suffix', [])
refuse_dir = config.get('refuse_dir', [])
re_sub = config.get('re_sub', [])
replaces = config.get('replaces', [])
for root, dirs, files in os.walk(path):
rename_dirs = dict()
new_dirs = list() # type: list[str]
for name in dirs:
if name in refuse_dir:
continue
new_name = name
new_name = delete_replace_str(new_name, replaces)
new_name = delete_re_str(new_name, re_sub)
new_name = new_name.strip("@#. -")
new_dirs.append(name)
print("rename dir '{}' to '{}' in '{}'".format(name, new_name, root))
if name != new_name and new_name:
if not only_print:
os.rename(os.path.join(root, name),
os.path.join(root, new_name))
pass
dirs = dirs # type: list[str]
dirs.clear()
dirs.extend(new_dirs)
for file in files: # type: str
suffix_i = file.rfind('.')
suffix = file[suffix_i + 1:]
name = file[:suffix_i]
if suffix in refuse_suffix:
continue
new_name = name
new_name = delete_replace_str(new_name, replaces)
new_name = delete_re_str(new_name, re_sub)
new_name = new_name.strip("#@. -")
print("rename file '{}' to '{}' in '{}'".format(
"{}.{}".format(name, suffix),
"{}.{}".format(new_name, suffix),
root))
if name != new_name and new_name:
if not only_print:
os.rename(os.path.join(root, "{}.{}".format(name, suffix)),
os.path.join(root, "{}.{}".format(new_name, suffix)))
# todo move change name after print and wait for input | 5,328,386 |
def mediate(timer: TimerBase, decimals: int | None) -> int:
"""If the start function doesn't have decimals defined, then use the decimals value defined when the Timer() was initiated."""
return timer.decimals if decimals is None else validate_and_normalise(decimals) | 5,328,387 |
def calculate_n_inputs(inputs, config_dict):
"""
Calculate the number of inputs for a particular model.
"""
input_size = 0
for input_name in inputs:
if input_name == 'action':
input_size += config_dict['prior_args']['n_variables']
elif input_name == 'state':
input_size += config_dict['misc_args']['state_size']
elif input_name == 'reward':
input_size += 1
elif input_name in ['params', 'grads']:
if config_dict['approx_post_args']['constant_scale']:
input_size += config_dict['prior_args']['n_variables']
else:
input_size += 2 * config_dict['prior_args']['n_variables']
return input_size | 5,328,388 |
async def test_connection_pool_with_no_keepalive_connections_allowed():
"""
When 'max_keepalive_connections=0' is used, IDLE connections should not
be returned to the pool.
"""
with pytest.raises(ValueError):
AsyncConnectionPoolMixin(max_keepalive_connections=0.0) | 5,328,389 |
def clustering(
adata: ad.AnnData,
resolutions: Sequence[float],
clustering_method: str = "leiden",
cell_type_col: str = "cell_types",
batch_col: str = "batch_indices"
) -> Tuple[str, float, float]:
"""Clusters the data and calculate agreement with cell type and batch
variable.
This method cluster the neighborhood graph (requires having run sc.pp.
neighbors first) with "clustering_method" algorithm multiple times with the
given resolutions, and return the best result in terms of ARI with cell
type.
Other metrics such as NMI with cell type, ARi with batch are logged but not
returned. (TODO: also return these metrics)
Args:
adata: the dataset to be clustered. adata.obsp shouhld contain the keys
'connectivities' and 'distances'.
resolutions: a list of leiden/louvain resolution parameters. Will
cluster with each resolution in the list and return the best result
(in terms of ARI with cell type).
clustering_method: Either "leiden" or "louvain".
cell_type_col: a key in adata.obs to the cell type column.
batch_col: a key in adata.obs to the batch column.
Returns:
best_cluster_key: a key in adata.obs to the best (in terms of ARI with
cell type) cluster assignment column.
best_ari: the best ARI with cell type.
best_nmi: the best NMI with cell type.
"""
assert len(resolutions) > 0, f'Must specify at least one resolution.'
if clustering_method == 'leiden':
clustering_func: function = sc.tl.leiden
elif clustering_method == 'louvain':
clustering_func: function = sc.tl.louvain
else:
raise ValueError("Please specify louvain or leiden for the clustering method argument.")
_logger.info(f'Performing {clustering_method} clustering')
assert cell_type_col in adata.obs, f"{cell_type_col} not in adata.obs"
best_res, best_ari, best_nmi = None, -inf, -inf
for res in resolutions:
col = f'{clustering_method}_{res}'
clustering_func(adata, resolution=res, key_added=col)
ari = adjusted_rand_score(adata.obs[cell_type_col], adata.obs[col])
nmi = normalized_mutual_info_score(adata.obs[cell_type_col], adata.obs[col])
n_unique = adata.obs[col].nunique()
if ari > best_ari:
best_res = res
best_ari = ari
if nmi > best_nmi:
best_nmi = nmi
if batch_col in adata.obs and adata.obs[batch_col].nunique() > 1:
ari_batch = adjusted_rand_score(adata.obs[batch_col], adata.obs[col])
_logger.info(f'Resolution: {res:5.3g}\tARI: {ari:7.4f}\tNMI: {nmi:7.4f}\tbARI: {ari_batch:7.4f}\t# labels: {n_unique}')
else:
_logger.info(f'Resolution: {res:5.3g}\tARI: {ari:7.4f}\tNMI: {nmi:7.4f}\t# labels: {n_unique}')
return f'{clustering_method}_{best_res}', best_ari, best_nmi | 5,328,390 |
async def login_swagger(form_data: OAuth2PasswordRequestForm, db: AsyncIOMotorClient) -> LoginUserReplyModel:
"""
Login route, returns Bearer Token.
SWAGGER FRIENDLY.
Due to the swagger Api not letting me add otp as a required parameter
the otp needs to be added to the the end of the password
ex. 'passwordotpotp' .. no space just right after and otp is always 6 digits
TODO find way to modify swagger to let me add otp separately, no login2 needed
"""
password = form_data.password[:-6] # exclude the last 6 digits
otp = form_data.password[-6:] # include only the last 6 digits
user: UserModelDB = await get_user_by_email(form_data.username, db) # username is email
is_user_auth = authenticate_user(user, password=password, otp=otp)
if not is_user_auth:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Incorrect Authentication Data",
headers={"WWW-Authenticate": "Bearer"},
)
# create access token
access_token_expires = timedelta(minutes=ACCESS_TOKEN_EXPIRE_MINUTES)
access_token = create_access_token(user_id=str(user.id), expires_delta=access_token_expires)
# update db with last_login time and set the user to is_active=True
await db["users"].update_one({"email": form_data.username}, {"$set": {
"lastLogin": datetime.now().strftime("%m/%d/%y %H:%M:%S"),
"isActive": "true"
}})
reply = LoginUserReplyModel(
user=ShowUserModel.parse_obj(user),
access_token=access_token,
token_type="Bearer"
)
return reply | 5,328,391 |
def coords(lat: float, lon: float, alt: float = None ) -> str:
"""Turn longitude, latitude into a printable string."""
txt = "%2.4f%s" % (abs(lat), "N" if lat>0 else "S")
txt += " %2.4f%s" % (abs(lon), "E" if lon>0 else "W")
if alt:
txt += " %2.0fm" % alt
return txt | 5,328,392 |
def chromosome_to_smiles():
"""Wrapper function for simplicity."""
def sc2smi(chromosome):
"""Generate a SMILES string from a list of SMILES characters. To be customized."""
silyl = "([Si]([C])([C])([C]))"
core = chromosome[0]
phosphine_1 = (
"(P(" + chromosome[1] + ")(" + chromosome[2] + ")(" + chromosome[3] + "))"
)
phosphine_2 = (
"(P(" + chromosome[4] + ")(" + chromosome[5] + ")(" + chromosome[6] + "))"
)
smiles = "{0}{1}{2}{3}".format(core, phosphine_1, phosphine_2, silyl)
return smiles
return sc2smi | 5,328,393 |
def softmax_edges(graph, feat):
"""Apply batch-wise graph-level softmax over all the values of edge field
:attr:`feat` in :attr:`graph`.
Parameters
----------
graph : DGLGraph
The graph.
feat : str
The feature field.
Returns
-------
tensor
The tensor obtained.
Examples
--------
>>> import dgl
>>> import torch as th
Create two :class:`~dgl.DGLGraph` objects and initialize their
edge features.
>>> g1 = dgl.DGLGraph() # Graph 1
>>> g1.add_nodes(2)
>>> g1.add_edges([0, 1], [1, 0])
>>> g1.edata['h'] = th.tensor([[1., 0.], [2., 0.]])
>>> g2 = dgl.DGLGraph() # Graph 2
>>> g2.add_nodes(3)
>>> g2.add_edges([0, 1, 2], [1, 2, 0])
>>> g2.edata['h'] = th.tensor([[1., 0.], [2., 0.], [3., 0.]])
Softmax over edge attribute :attr:`h` in a batched graph.
>>> bg = dgl.batch([g1, g2], edge_attrs='h')
>>> dgl.softmax_edges(bg, 'h')
tensor([[0.2689, 0.5000], # [0.2689, 0.7311] = softmax([1., 2.])
[0.7311, 0.5000], # [0.5000, 0.5000] = softmax([0., 0.])
[0.0900, 0.3333], # [0.0900, 0.2447, 0.6652] = softmax([1., 2., 3.])
[0.2447, 0.3333], # [0.3333, 0.3333, 0.3333] = softmax([0., 0., 0.])
[0.6652, 0.3333]])
Softmax over edge attribute :attr:`h` in a single graph.
>>> dgl.softmax_edges(g1, 'h')
tensor([[0.2689, 0.5000], # [0.2689, 0.7311] = softmax([1., 2.])
[0.7311, 0.5000]]), # [0.5000, 0.5000] = softmax([0., 0.])
Notes
-----
If the input graph has batch size greater then one, the softmax is applied at each
example in the batch.
"""
return _softmax_on(graph, 'edges', feat) | 5,328,394 |
def command_result_processor_parameter_required(command_line_parameter):
"""
Command result message processor if a parameter stays unsatisfied.
Parameters
----------
command_line_parameter : ``CommandLineParameter``
Respective command parameter.
Returns
-------
message : `str`
"""
message_parts = []
message_parts.append('Parameter: ')
message_parts.append(repr(command_line_parameter.name))
message_parts.append(' is required.\n')
return ''.join(message_parts) | 5,328,395 |
def r_importer(modules, install_only=[], log=False):
"""
Import and install R packages. If the desired packages are not installed it will
automatically install them. Note that this function will act as a one time
delay in running time, if modules need to be installed. Import R packages
manually as e.g. <stargazer = rpy2.robjects.packages.importr('stargazer')>.
So, the same name used for installing, should be used to import the functions.
Important to note, this function imports the following modules from rpy2:
"rpy2.robjects.packages" and "rpy2.robjects.vectors".
Args:
modules: list of the desired packages. The packages to be included should
be as a string. E.g. modules = ['stargazer', 'tidyverse'].
install_only: default=None. list or string of packages to be installed
only. Note, combinations are possible.
log: default=False. Prints a log message if true, of the packages that are
(succesfully) installed.
Returns:
None
"""
if not isinstance(modules, list):
modules = [modules]
if not isinstance(install_only, list):
install_only = [install_only]
# import R's utility package:
utils = rpackages.importr('utils')
# R package names:
packnames = tuple(modules)
# Selectively install what needs to be install. Use CRAN cloud server:
names_to_install = [x for x in packnames if not rpackages.isinstalled(x)]
if len(names_to_install) > 0:
print('Installing:', names_to_install)
with walpy.suppress():
utils.install_packages(StrVector(names_to_install),
repos='https://cloud.r-project.org/')
print('Successfully installed:', names_to_install)
# Make modules non-overlapping with install_only:
modules = set(modules) - set(install_only)
# Import modules to be automatically imported
for module in modules:
rpackages.importr(module)
# Print log message if true:
if log == True:
print('Successfully imported:', [i for i in modules])
return | 5,328,396 |
def randdirichlet(a):
""" Python implementation of randdirichlet.m using randomgamma fucnction
:param a: vector of weights (shape parameters to the gamma distribution)
"""
try:
x = rand.randomgamma(a)
except ValueError:
a[a == 0] += 1e-16
x = rand.randomgamma(a)
x /= x.sum(axis=0)
return x | 5,328,397 |
def ensure_lockfile(keep_outdated=False, pypi_mirror=None):
"""Ensures that the lockfile is up-to-date."""
if not keep_outdated:
keep_outdated = project.settings.get("keep_outdated")
# Write out the lockfile if it doesn't exist, but not if the Pipfile is being ignored
if project.lockfile_exists:
old_hash = project.get_lockfile_hash()
new_hash = project.calculate_pipfile_hash()
if new_hash != old_hash:
click.echo(
crayons.red(
u"Pipfile.lock ({0}) out of date, updating to ({1})…".format(
old_hash[-6:], new_hash[-6:]
),
bold=True,
),
err=True,
)
do_lock(keep_outdated=keep_outdated, pypi_mirror=pypi_mirror)
else:
do_lock(keep_outdated=keep_outdated, pypi_mirror=pypi_mirror) | 5,328,398 |
def home(request):
"""
This is the home page request
"""
return render(request, 'generator/home.html') | 5,328,399 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.