content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def get_response_rows(response, template):
"""
Take in a list of responses and covert them to SSE.Rows based on the column type specified in template
The template should be a list of the form: ["str", "num", "dual", ...]
For string values use: "str"
For numeric values use: "num"
For dual values: "dual"
"""
response_rows = []
# For each row in the response list
for row in response:
i = 0
this_row = []
if len(template) > 1:
# For each column in the row
for col in row:
# Convert values to type SSE.Dual according to the template list
if template[i] == "str":
if col is None:
col = "\x00"
elif type(col) is not str:
col = "{0:.5f}".format(col)
this_row.append(SSE.Dual(strData=col))
elif template[i] == "num":
this_row.append(SSE.Dual(numData=col))
elif template[i] == "dual":
this_row.append(SSE.Dual(strData=col, numData=col))
i = i + 1
else:
# Convert values to type SSE.Dual according to the template list
if template[0] == "str":
if row is None:
row = "\x00"
elif type(row) is not str:
row = "{0:.5f}".format(row)
this_row.append(SSE.Dual(strData=row))
elif template[0] == "num":
this_row.append(SSE.Dual(numData=row))
elif template[0] == "dual":
this_row.append(SSE.Dual(strData=row, numData=row))
# Group columns into a iterable and add to the the response_rows
response_rows.append(iter(this_row))
# Values are then structured as SSE.Rows
response_rows = [SSE.Row(duals=duals) for duals in response_rows]
return response_rows
| 18,000
|
def stations_within_radius(stations, centre, r):
"""function that returns a list of all stations (type MonitoringStation)
within radius r of a geographic coordinate x."""
close_stations = []
for station in stations:
if haversine(station.coord, centre) < float(r):
close_stations.append(station)
return close_stations
| 18,001
|
def get_pack_display_name(pack_id: str) -> str:
"""
Gets the display name of the pack from the pack ID.
:param pack_id: ID of the pack.
:return: Name found in the pack metadata, otherwise an empty string.
"""
metadata_path = os.path.join(PACKS_FULL_PATH, pack_id, PACK_METADATA_FILE)
if pack_id and os.path.isfile(metadata_path):
with open(metadata_path, 'r') as json_file:
pack_metadata = json.load(json_file)
return pack_metadata.get('name')
return ''
| 18,002
|
async def test_delete_requires_admin(opp, opp_ws_client, opp_read_only_access_token):
"""Test delete requires admin."""
client = await opp_ws_client(opp, opp_read_only_access_token)
await client.send_json(
{"id": 5, "type": auth_ha.WS_TYPE_DELETE, "username": "test-user"}
)
result = await client.receive_json()
assert not result["success"], result
assert result["error"]["code"] == "unauthorized"
| 18,003
|
def test_command_base(run_cli_process_launch_command, fixture_code, generate_calc_job_node):
"""Test invoking the calculation launch command with only required inputs."""
code = fixture_code('quantumespresso.pw2wannier90').store()
calculation = generate_calc_job_node('quantumespresso.pw', test_name='default').store()
nnkp_file = SinglefileData(io.BytesIO(b'content')).store()
options = ['-X', code.full_label, '-P', calculation.outputs.remote_folder.pk, '-S', nnkp_file.pk]
run_cli_process_launch_command(launch_calculation, options=options)
| 18,004
|
def tflite_conversion(model, tflite_path, conversion_type="fp32"):
"""Performs tflite conversion (fp32, int8)."""
# Prepare model for inference
model = prepare_model_for_inference(model)
create_directories([os.path.dirname(tflite_path)])
converter = tf.lite.TFLiteConverter.from_keras_model(model)
def representative_dataset_gen(input_dim):
calib_data = []
for data in tqdm(training_data.take(1000), desc="model calibration"):
input_data = data[0]
for i in range(input_data.shape[1] // input_dim):
input_chunks = [
input_data[:, i * input_dim: (i + 1) * input_dim, :, ]
]
for chunk in input_chunks:
calib_data.append([chunk])
return lambda: [
(yield data) for data in tqdm(calib_data, desc="model calibration")
]
if conversion_type == "int8":
log("Quantizing Model")
(training_data, training_num_steps) = get_data("train", overlap=True)
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.inference_input_type = tf.int8
converter.inference_output_type = tf.int8
converter.representative_dataset = representative_dataset_gen(model.input_shape[1])
tflite_model = converter.convert()
open(tflite_path, "wb").write(tflite_model)
| 18,005
|
def jacobian(model, x, output_class):
"""
Compute the output_class'th row of a Jacobian matrix. In other words,
compute the gradient wrt to the output_class.
:param model: forward pass function.
:param x: input tensor.
:param output_class: the output_fz class we want to compute the gradients.
:return: output_class'th row of the Jacobian matrix wrt x.
"""
xvar = replicate_input_withgrad(x)
scores = model(xvar)
# compute gradients for the class output_class wrt the input x
# using backpropagation
torch.sum(scores[:, output_class]).backward()
return xvar.grad.detach().clone()
| 18,006
|
def get_ctf(bot, trigger):
"""See the CTF Login Information
Params:
uname
email
passwd
teamid
"""
section = 'ctf'
# Get section and option from first argument.
arg1 = trigger.group(3).split('.')
if len(arg1) == 1:
section_name, option = "ctf", arg1[0]
else:
bot.reply("Usage: .get option")
return
section = getattr(bot.config, section_name)
static_sec = isinstance(section, StaticSection)
if static_sec and not hasattr(section, option):
bot.say('[{}] section has no option {}.'.format(section_name, option))
return
# Display current value if no value is given.
value = trigger.group(4)
if not value:
if not static_sec and bot.config.parser.has_option(section, option):
bot.reply("%s does not exist." % (option))
return
value = getattr(section, option)
bot.reply(value.strip("'"))
return
| 18,007
|
def render_face_orthographic(mesh, background=None):
"""
mesh location should be normalized
:param mesh:
:param background:
:return:
"""
mesh.visual.face_colors = np.array([0.05, 0.1, 0.2, 1])
mesh = pyrender.Mesh.from_trimesh(mesh, smooth=False)
# mesh = pyrender.Mesh.from_trimesh(mesh)
scene.add(mesh, pose=np.eye(4))
camera_pose = np.eye(4)
# camera_pose[0, 3] = 1
# camera_pose[1, 3] = 1
# camera_pose[2, 3] = -10
# camera_pose[0, 0] = 1
# camera_pose[1, 1] = -1
# camera_pose[2, 2] = -1
#
# camera = pyrender.OrthographicCamera(xmag=1, ymag=1, zfar=100)
camera_pose[0, 3] = 1
camera_pose[1, 3] = 1
camera_pose[2, 3] = 10
camera_pose[0, 0] = 1
camera_pose[1, 1] = 1
camera_pose[2, 2] = 1
camera = pyrender.OrthographicCamera(xmag=1, ymag=1, zfar=100)
scene.add(camera, pose=camera_pose)
light = pyrender.DirectionalLight(color=[1.0, 1.0, 1.0], intensity=5.0)
scene.add(light, pose=camera_pose)
color, depth = r.render(scene)
scene.clear()
# print(color.shape)
color = np.array(color)
color = color[::-1]
if background is not None:
new_color = np.array(background)
new_color[color != 255] = color[color != 255]
color = new_color
return color
| 18,008
|
def do_servicegroup_show(cc, args):
"""Show a Service Group."""
try:
servicegroup = cc.smc_servicegroup.get(args.servicegroup)
except exc.HTTPNotFound:
raise exc.CommandError('Service Group not found: %s' % args.servicegroup)
except exc.Forbidden:
raise exc.CommandError("Not authorized. The requested action "
"requires 'admin' level")
else:
if servicegroup is None:
print("Service group %s could not be found" % args.servicegroup)
return
if servicegroup.status:
setattr(servicegroup, 'state', servicegroup.state + '-' +
servicegroup.status)
setattr(servicegroup, 'hostname', servicegroup.node_name)
_print_servicegroup_show(servicegroup)
| 18,009
|
def check_and_join(phrase, symbols=None, filter=None):
"""
Joins characters of ``phrase`` and if ``symbols`` is given, raises
an error if any character in ``phrase`` is not in ``symbols``.
Parameters
==========
phrase
String or list of strings to be returned as a string.
symbols
Iterable of characters allowed in ``phrase``.
If ``symbols`` is ``None``, no checking is performed.
Examples
========
>>> from sympy.crypto.crypto import check_and_join
>>> check_and_join('a phrase')
'a phrase'
>>> check_and_join('a phrase'.upper().split())
'APHRASE'
>>> check_and_join('a phrase!'.upper().split(), 'ARE', filter=True)
'ARAE'
>>> check_and_join('a phrase!'.upper().split(), 'ARE')
Traceback (most recent call last):
...
ValueError: characters in phrase but not symbols: "!HPS"
"""
rv = ''.join(''.join(phrase))
if symbols is not None:
symbols = check_and_join(symbols)
missing = ''.join(list(sorted(set(rv) - set(symbols))))
if missing:
if not filter:
raise ValueError(
'characters in phrase but not symbols: "%s"' % missing)
rv = translate(rv, None, missing)
return rv
| 18,010
|
def rigs_from_file(filepath: str, sensor_ids: Optional[Set[str]] = None) -> kapture.Rigs:
"""
Reads rigs from CSV file.
:param filepath: input file path
:param sensor_ids: input set of valid sensor ids.
If a rig id collides one of them, raise error.
If a sensor in rig is not in sensor_ids, it is ignored.
:return: rigs
"""
# rig_id, sensor_id, qw, qx, qy, qz, tx, ty, tz
rigs = kapture.Rigs()
with open(filepath) as file:
table = table_from_file(file)
for rig_id, sensor_id, qw, qx, qy, qz, tx, ty, tz in table:
if sensor_ids is not None and rig_id in sensor_ids:
raise ValueError(f'collision between a sensor ID and rig ID ({rig_id})')
rotation = float_array_or_none([qw, qx, qy, qz])
translation = float_array_or_none([tx, ty, tz])
pose = kapture.PoseTransform(rotation, translation)
rigs[str(rig_id), sensor_id] = pose
if sensor_ids is not None:
# expunge all undesired sensors
rig_ids = set(rigs)
for rig_id in rig_ids:
for sensor_id in set(rigs[rig_id]):
if sensor_id not in sensor_ids and sensor_id not in rig_ids:
logger.debug(f'dropping sensor {sensor_id} from rig {rig_id} because it is unknown sensor.')
del rigs[rig_id][sensor_id]
return rigs
| 18,011
|
def drawLaneOnImage(img):
"""
Find and draw the lane lines on the image `img`.
"""
left_fit, right_fit, left_fit_m, right_fit_m, _, _, _, _, _ = findLines(img)
output = drawLine(img, left_fit, right_fit)
return cv2.cvtColor( output, cv2.COLOR_BGR2RGB )
| 18,012
|
def enthalpy_diff(SA, CT, p_shallow, p_deep):
"""
Calculates the difference of the specific enthalpy of seawater between
two different pressures, p_deep (the deeper pressure) and p_shallow (the
shallower pressure), at the same values of SA and CT. This function uses
the computationally-efficient 48-term expression for density in terms of
SA, CT and p (McDougall et al., 2011). The output (enthalpy_diff_CT) is
the specific enthalpy evaluated at (SA, CT, p_deep) minus the specific
enthalpy at (SA, CT, p_shallow).
Parameters
----------
SA : array_like
Absolute Salinity [g/kg]
CT : array_like
Conservative Temperature [:math:`^\circ` C (ITS-90)]
p_shallow : array_like
lower sea pressure [dbar]
p_deep : array_like
upper sea pressure [dbar]
Returns
-------
enthalpy_diff : array_like
difference of specific enthalpy [J/kg]
(deep minus shallow)
Notes
-----
The 48-term equation has been fitted in a restricted range of parameter
space, and is most accurate inside the "oceanographic funnel" described in
McDougall et al. (2011). The GSW library function "infunnel(SA, CT, p)" is
available to be used if one wants to test if some of one's data lies
outside this "funnel".
Examples
--------
TODO
References
----------
.. [1] IOC, SCOR and IAPSO, 2010: The international thermodynamic equation
of seawater - 2010: Calculation and use of thermodynamic properties.
Intergovernmental Oceanographic Commission, Manuals and Guides No. 56,
UNESCO (English), 196 pp. See Eqns. (3.32.2) and (A.30.6).
.. [2] McDougall T.J., P.M. Barker, R. Feistel and D.R. Jackett, 2011: A
computationally efficient 48-term expression for the density of
seawater in terms of Conservative Temperature, and related properties
of seawater.
"""
SA = np.maximum(SA, 0)
sqrtSA = np.sqrt(SA)
a0 = (v21 + CT * (v22 + CT * (v23 + CT * (v24 + v25 * CT))) + SA *
(v26 + CT * (v27 + CT * (v28 + CT * (v29 + v30 * CT))) + v36 * SA +
sqrtSA * (v31 + CT * (v32 + CT * (v33 + CT * (v34 + v35 * CT))))))
a1 = v37 + CT * (v38 + CT * (v39 + v40 * CT)) + SA * (v41 + v42 * CT)
a2 = v43 + CT * (v44 + v45 * CT + v46 * SA)
a3 = v47 + v48 * CT
b0 = (v01 + CT * (v02 + CT * (v03 + v04 * CT)) + SA * (v05 + CT * (v06 +
v07 * CT) + sqrtSA * (v08 + CT * (v09 + CT * (v10 + v11 * CT)))))
b1 = 0.5 * (v12 + CT * (v13 + v14 * CT) + SA * (v15 + v16 * CT))
b2 = v17 + CT * (v18 + v19 * CT) + v20 * SA
b1sq = b1 * b1
sqrt_disc = np.sqrt(b1sq - b0 * b2)
N = a0 + (2 * a3 * b0 * b1 / b2 - a2 * b0) / b2
M = a1 + (4 * a3 * b1sq / b2 - a3 * b0 - 2 * a2 * b1) / b2
A = b1 - sqrt_disc
B = b1 + sqrt_disc
delta_p = p_deep - p_shallow
p_sum = p_deep + p_shallow
part1 = b0 + p_shallow * (2 * b1 + b2 * p_shallow)
part2 = (B + b2 * p_deep) * (A + b2 * p_shallow)
part3 = (N * b2 - M * b1) / (b2 * (B - A))
# This function calculates enthalpy_diff using the computationally
# efficient 48-term expression for density in terms of SA, CT and p. If one
# wanted to compute the enthalpy difference using the full TEOS-10 Gibbs
# function, the following lines of code will enable this.
# pt = pt_from_CT(SA, CT)
# t_shallow = pt_from_t(SA, pt, 0, p_shallow)
# t_deep = pt_from_t(SA, pt, 0, p_deep)
# enthalpy_diff = (enthalpy_t_exact(SA, t_deep, p_deep) -
# enthalpy_t_exact(SA, t_shallow, p_shallow))
# or call the following, it is identical to the lines above.
# enthalpy_diff = enthalpy_diff_CT_exact(SA, CT, p_shallow, p_deep)
return db2Pascal * (delta_p * (a2 - 2 * a3 * b1 / b2 + 0.5 * a3 * p_sum) /
b2 + (M / (2 * b2)) *
np.log(1 + delta_p * (2 * b1 + b2 * p_sum) / part1) +
part3 * np.log(1 + delta_p * b2 * (B - A) / part2))
| 18,013
|
def zipdir(path, zippath):
"""
walkfiles = os.walk(path)
zippath = zipfile.ZipFile(zippath, 'w')
for root, dirs, files in walkfiles:
for filename in files: zippath.write(os.path.join(root, filename))
"""
execStr = ['zip', '-r',zippath, path]
print(' '.join(execStr))
proc = subprocess.Popen(execStr, stdout=PIPE, stderr=PIPE)
(output, error) = proc.communicate()
if error: print ('error: '+ error)
print('output: '+ output)
| 18,014
|
def ss(a, axis=0):
### taken from SciPy
"""Squares each value in the passed array, adds these squares, and
returns the result.
Parameters
----------
a : array
axis : int or None
Returns
-------
The sum along the given axis for (a*a).
"""
a, axis = _chk_asarray(a, axis)
return numpy.sum(a*a, axis)
| 18,015
|
def main():
"""main"""
args = get_args()
tsv_files = args.tsv_file
dbname = args.dbname
if not os.path.isfile(dbname):
print('Bad --dbname "{}"'.format(dbname))
sys.exit(1)
db = sqlite3.connect(dbname)
for fnum, tsv_file in enumerate(tsv_files):
if not os.path.isfile(tsv_file):
print('Bad tsv_file "{}"'.format(tsv_file))
sys.exit(1)
sample_name, ext = os.path.splitext(tsv_file)
if ext != '.tsv':
print('"{}" does not end with ".tsv"'.format(tsv_file))
sys.exit(1)
if sample_name.endswith('.centrifuge'):
sample_name = re.sub(r'\.centrifuge$', '', sample_name)
sample_id = import_sample(sample_name, db)
print('{:3}: Importing "{}" ({})'.format(fnum + 1, sample_name,
sample_id))
import_tsv(db, tsv_file, sample_id)
print('Done')
| 18,016
|
def Journaling_TypeInfo():
"""Journaling_TypeInfo() -> RTTI"""
return _DataModel.Journaling_TypeInfo()
| 18,017
|
def _maven_artifact(
group,
artifact,
version,
ownership_tag = None,
packaging = None,
classifier = None,
exclusions = None,
neverlink = None,
testonly = None,
tags = None,
flatten_transitive_deps = None,
aliases = None):
"""Defines maven artifact by coordinates.
Args:
group: The Maven artifact coordinate group name (ex: "com.google.guava").
artifact: The Maven artifact coordinate artifact name (ex: "guava").
version: The Maven artifact coordinate version name (ex: "1.20.1").
ownership_tag: 3rd party dependency owner responsible for its maintenance.
packaging:The Maven artifact coordinate packaging name (ex: "jar").
classifier: The Maven artifact coordinate classifier name (ex: "jdk11").
exclusions: Artifact dependencies to be excluded from resolution closure.
neverlink: neverlink value to set,
testonly: testonly value to set.
tags: Target tags.
flatten_transitive_deps: Define all transitive deps as direct deps.
aliases: aliases that will point to this dep.
"""
maven_artifact = {}
maven_artifact["group"] = group
maven_artifact["artifact"] = artifact
maven_artifact["version"] = version
maven_artifact["aliases"] = aliases
maven_artifact["tags"] = tags
maven_artifact["flatten_transitive_deps"] = flatten_transitive_deps
if packaging != None:
maven_artifact["packaging"] = packaging
if classifier != None:
maven_artifact["classifier"] = classifier
if exclusions != None:
maven_artifact["exclusions"] = exclusions
if neverlink != None:
maven_artifact["neverlink"] = neverlink
if testonly != None:
maven_artifact["testonly"] = testonly
if ownership_tag != None:
maven_artifact["ownership_tag"] = ownership_tag
return maven_artifact
| 18,018
|
def docmdnf(cmd):
"""Execute a command."""
if flag_echo:
sys.stderr.write("executing: " + cmd + "\n")
if flag_dryrun:
return 0
return u.docmdnf(cmd)
| 18,019
|
def get_anchors(n):
"""Get a list of NumPy arrays, each of them is an anchor node set"""
m = int(np.log2(n))
anchor_set_id = []
for i in range(m):
anchor_size = int(n / np.exp2(i + 1))
for _ in range(m):
anchor_set_id.append(np.random.choice(n, size=anchor_size, replace=False))
return anchor_set_id
| 18,020
|
def diag_gaussian_log_likelihood(z, mu=0.0, logvar=0.0):
"""Log-likelihood under a Gaussian distribution with diagonal covariance.
Returns the log-likelihood for each dimension. One should sum the
results for the log-likelihood under the full multidimensional model.
Args:
z: The value to compute the log-likelihood.
mu: The mean of the Gaussian
logvar: The log variance of the Gaussian.
Returns:
The log-likelihood under the Gaussian model.
"""
return -0.5 * (logvar + np.log(2*np.pi) + \
tf.square((z-mu)/tf.exp(0.5*logvar)))
| 18,021
|
def _filter_option_to_config_setting(flt, setting):
"""
Encapsulates the logic for associating a filter database option with the filter setting from relay_config
:param flt: the filter
:param setting: the option deserialized from the database
:return: the option as viewed from relay_config
"""
if setting is None:
raise ValueError("Could not find filter state for filter {0}."
" You need to register default filter state in projectoptions.defaults.".format(flt.spec.id))
is_enabled = setting != '0'
ret_val = {
'is_enabled': is_enabled
}
# special case for legacy browser.
# If the number of special cases increases we'll have to factor this functionality somewhere
if flt.spec.id == FilterStatKeys.LEGACY_BROWSER:
if is_enabled:
if setting == '1':
# old style filter
ret_val['default_filter'] = True
else:
# new style filter, per legacy browser type handling
# ret_val['options'] = setting.split(' ')
ret_val['options'] = list(setting)
return ret_val
| 18,022
|
def write_config(conf, output_file):
"""Write documentation to yaml file."""
with open(output_file, 'w') as ofh:
yaml.dump(conf, ofh, default_flow_style=False)
| 18,023
|
def get_svg(accession, **kwargs):
"""
Returns a HMM sequence logo in SVG format.
Parameters
----------
accession : str
Pfam accession for desired HMM.
**kwargs :
Additional arguments are passed to :class:`LogoPlot`.
"""
logoplot = plot.LogoPlot(accession, **kwargs)
svg = logoplot.get_svg()
return svg
| 18,024
|
def wl_to_en( l ):
"""
Converts a wavelength, given in nm, to an energy in eV.
:param l: The wavelength to convert, in nm.
:returns: The corresponding energy in eV.
"""
a = phys.physical_constants[ 'electron volt-joule relationship' ][ 0 ] # J
return phys.Planck* phys.c/( a* l* 1e-9 )
| 18,025
|
def get_local_address_reaching(dest_ip: IPv4Address) -> Optional[IPv4Address]:
"""Get address of a local interface within same subnet as provided address."""
for iface in netifaces.interfaces():
for addr in netifaces.ifaddresses(iface).get(netifaces.AF_INET, []):
iface = IPv4Interface(addr["addr"] + "/" + addr["netmask"])
if dest_ip in iface.network:
return iface.ip
return None
| 18,026
|
def atSendCmdTest(cmd_name: 'str', params: 'list'):
""" 发送测试命令,方便调试 ATCore
"""
func_name = 'atSendCmdTest'
atserial.ATraderCmdTest_send(cmd_name, params)
res = recv_serial(func_name)
atReturnChecker(func_name, res.result)
return res.listResult
| 18,027
|
def action_pool_nodes_del(
batch_client, config, all_start_task_failed, all_starting,
all_unusable, nodeid):
# type: (batchsc.BatchServiceClient, dict, bool, bool, bool, list) -> None
"""Action: Pool Nodes Del
:param azure.batch.batch_service_client.BatchServiceClient batch_client:
batch client
:param dict config: configuration dict
:param bool all_start_task_failed: delete all start task failed nodes
:param bool all_starting: delete all starting nodes
:param bool all_unusable: delete all unusable nodes
:param list nodeid: list of nodeids to delete
"""
_check_batch_client(batch_client)
if ((all_start_task_failed or all_starting or all_unusable) and
nodeid is not None):
raise ValueError(
'cannot specify all start task failed nodes or unusable with '
'a specific node id')
batch.del_nodes(
batch_client, config, all_start_task_failed, all_starting,
all_unusable, nodeid)
| 18,028
|
def unlabeled_balls_in_unlabeled_boxes(balls, box_sizes):
"""
OVERVIEW
This function returns a generator that produces all distinct distributions of
indistinguishable balls among indistinguishable boxes, with specified box
sizes (capacities). This is a generalization of the most common formulation
of the problem, where each box is sufficiently large to accommodate all of
the balls. It might be asked, 'In what sense are the boxes indistinguishable
if they have different capacities?' The answer is that the box capacities
must be considered when distributing the balls, but once the balls have been
distributed, the identities of the boxes no longer matter.
CONSTRUCTOR INPUTS
n: the number of balls
box_sizes: This argument is a list of length 1 or greater. The length of
the list corresponds to the number of boxes. `box_sizes[i]` is a positive
integer that specifies the maximum capacity of the ith box. If
`box_sizes[i]` equals `n` (or greater), the ith box can accommodate all `n`
balls and thus effectively has unlimited capacity.
NOTE
For `unlabeled_balls_in_unlabeled_boxes`, the order of the elements of the
`box_sizes` list is unimportant because the code will sort it into non-
increasing order before any other processing is done.
"""
if not isinstance(balls, int):
raise TypeError("balls must be a non-negative integer.")
if balls < 0:
raise ValueError("balls must be a non-negative integer.")
if not isinstance(box_sizes, (list, tuple)):
raise ValueError("box_sizes must be a non-empty list or tuple.")
capacity= 0
for size in box_sizes:
if not isinstance(size, int):
raise TypeError("box_sizes must contain only positive integers.")
if size < 1:
raise ValueError("box_sizes must contain only positive integers.")
capacity+= size
if capacity < balls:
raise ValueError("The total capacity of the boxes is less than the "
"number of balls to be distributed.")
# Sort the box sizes so that the values decrease:
box_sizes= sorted(box_sizes, reverse=True)
return _unlabeled_balls_in_unlabeled_boxes(balls, box_sizes)
| 18,029
|
def find_notebooks(path):
"""Yield all the notebooks in a directory
Yields the path relative to the given directory
"""
for parent, dirs, files in os.walk(path):
if ".ipynb_checkpoints" in parent.split(os.path.sep):
# skip accidentally committed checkpoints
continue
for fname in files:
if fname.endswith(".ipynb"):
yield os.path.relpath(os.path.join(parent, fname), path)
| 18,030
|
def get_version():
"""Extract current version from __init__.py."""
with open("morphocell/__init__.py", encoding="utf-8") as fid:
for line in fid:
if line.startswith("__version__"):
VERSION = line.strip().split()[-1][1:-1]
break
return VERSION
| 18,031
|
def sessions_speakers(transaction):
"""
GET /sessions/1/speakers
:param transaction:
:return:
"""
with stash['app'].app_context():
speaker = SpeakerFactory()
db.session.add(speaker)
db.session.commit()
| 18,032
|
def run(length, width, height, fps, level, observation_spec):
"""Spins up an environment and runs the random agent."""
env = deepmind_lab.Lab(
level, [observation_spec],
config={
'fps': str(fps),
'width': str(width),
'height': str(height)
})
env.reset()
agent = DiscretizedRandomAgent()
reward = 0
t0 = time.time()
for _ in xrange(length):
if not env.is_running():
print('Environment stopped early')
env.reset()
agent.reset()
obs = env.observations()
action = agent.step(reward, obs[observation_spec])
reward = env.step(action, num_steps=1)
t1 = time.time()
duration = t1 - t0
print('resolution: %i x %i, spec: %s, steps: %i, duration: %.1f, fps: %.1f' %
(width, height, observation_spec, length, duration, length / duration))
| 18,033
|
def get_neighbor_v4_by_id(obj_id):
"""Return an NeighborV4 by id.
Args:
obj_id: Id of NeighborV4
"""
try:
obj = NeighborV4.get_by_pk(id=obj_id)
except NeighborV4NotFoundError as e:
raise NeighborV4DoesNotExistException(str(e))
return obj
| 18,034
|
def get_resources_json_obj(resource_name: str) -> Dict:
"""
Get a JSON object of a specified resource.
:param resource_name: The name of the resource.
:returns: The JSON object (in the form of a dictionary).
:raises Exception: An exception is raised if the specified resources does
not exist.
"""
resource_map = _get_resources(_get_resources_json()["resources"])
if resource_name not in resource_map:
raise Exception(
"Error: Resource with name '{}' does not exist".format(
resource_name
)
)
return resource_map[resource_name]
| 18,035
|
def skip_if(predicate, reason=None):
"""Skip a test if predicate is true."""
reason = reason or predicate.__name__
def decorate(fn):
fn_name = fn.__name__
def maybe(*args, **kw):
if predicate():
msg = "'%s' skipped: %s" % (fn_name, reason)
raise SkipTest(msg)
else:
return fn(*args, **kw)
return update_wrapper(maybe, fn)
return decorate
| 18,036
|
def synthesized_uvw(ants, time, phase_dir, auto_correlations):
"""
Synthesizes new UVW coordinates based on time according to
NRAO CASA convention (same as in fixvis)
User should check these UVW coordinates carefully:
if time centroid was used to compute
original uvw coordinates the centroids
of these new coordinates may be wrong, depending on whether
data timesteps were heavily flagged.
"""
pytest.importorskip('pyrap')
from pyrap.measures import measures
from pyrap.quanta import quantity as q
dm = measures()
epoch = dm.epoch("UT1", q(time[0], "s"))
ref_dir = dm.direction("j2000",
q(phase_dir[0], "rad"),
q(phase_dir[1], "rad"))
ox, oy, oz = ants[0]
obs = dm.position("ITRF", q(ox, "m"), q(oy, "m"), q(oz, "m"))
# Setup local horizon coordinate frame with antenna 0 as reference position
dm.do_frame(obs)
dm.do_frame(ref_dir)
dm.do_frame(epoch)
ant1, ant2 = np.triu_indices(ants.shape[0],
0 if auto_correlations else 1)
ant1 = ant1.astype(np.int32)
ant2 = ant2.astype(np.int32)
ntime = time.shape[0]
nbl = ant1.shape[0]
rows = ntime * nbl
uvw = np.empty((rows, 3), dtype=np.float64)
# For each timestep
for ti, t in enumerate(time):
epoch = dm.epoch("UT1", q(t, "s"))
dm.do_frame(epoch)
ant_uvw = np.zeros_like(ants)
# Calculate antenna UVW positions
for ai, (x, y, z) in enumerate(ants):
bl = dm.baseline("ITRF",
q([x, ox], "m"),
q([y, oy], "m"),
q([z, oz], "m"))
ant_uvw[ai] = dm.to_uvw(bl)["xyz"].get_value()[0:3]
# Now calculate baseline UVW positions
# noting that ant1 - ant2 is the CASA convention
base = ti*nbl
uvw[base:base + nbl, :] = ant_uvw[ant1] - ant_uvw[ant2]
return ant1, ant2, uvw
| 18,037
|
def execute_pso_strategy(df, options, topology, retrain_params, commission, data_name, s_test, e_test, iters=100, normalization='exponential'):
"""
Execute particle swarm optimization strategy on data history contained in df
:param df: dataframe with historical data
:param options: dict with the following parameters
- c1 - cognitive parameter with which the particle follows its personal best
- c2 - social parameter with which the particle follows the swarm's global best position
- w - parameter that controls the inertia of the swarm's movement
:param commision: commission to be paid on each operation
:param data_name: quote data name
:param start_date: start date of simulation
:param end_date: end date of simulation
:return:
- PSO_Cerebro - execution engine
- PSO_Strategy - pso strategy instance
"""
print_execution_name("Estrategia: particle swar optimization")
strategy_name = 'particle_swarm_optimization'
info = {
'Mercado': data_name,
'Estrategia': strategy_name,
'Fecha inicial': s_test,
'Fecha final': e_test
}
# ------------ Obtenemos los conjuntos de train y test ------------ #
s_test_date = datetime.strptime(s_test, '%Y-%m-%d')
s_train = s_test_date.replace(year = s_test_date.year - 2)
#s_train = s_test_date - timedelta(days=180)
e_train = s_test_date - timedelta(days=1)
gen_representation = GeneticRepresentation(df, s_train, e_train, s_test, e_test)
# ------------ Fijamos hiperparámetros ------------ #
n_particles = topology['particles']
num_neighbours = topology['neighbours']
minkowski_p_norm = 2
options['k'] = num_neighbours
options['p'] = minkowski_p_norm
dimensions=len(gen_representation.moving_average_rules)+2
if normalization == 'exponential':
max_bound = 2.0 * np.ones(dimensions-2)
min_bound = -max_bound
elif normalization == 'l1':
max_bound = 2.0 * np.ones(dimensions-2)
min_bound = np.zeros(dimensions-2)
max_bound = np.append(max_bound, [0.9, 0.0])
min_bound = np.append(min_bound, [0.0, -0.9])
bounds = (min_bound, max_bound)
# Call instance of PSO
optimizer = ps.single.LocalBestPSO(n_particles=n_particles,
dimensions=dimensions,
options=options,
bounds=bounds,
static=True)
# Perform optimization
kwargs={'from_date': s_train, 'to_date': e_train, 'normalization': normalization}
best_cost, best_pos = optimizer.optimize(gen_representation.cost_function,
iters=iters,
n_processes=2,
**kwargs)
# Create an instance from CombinedSignalStrategy class and assign parameters
PSO_Strategy = CombinedSignalStrategy
w, buy_threshold, sell_threshold = get_split_w_threshold(best_pos)
"""
print("Umbral de compra: ", buy_threshold)
print("Umbral de venta: ", sell_threshold)
crosses = ["(" + str(cross[0]) + ", " + str(cross[1]) + ")" for cross in gen_representation.moving_average_rules]
y_pos = np.arange(len(crosses))
plt.bar(y_pos, w)
plt.xticks(y_pos, crosses)
plt.xticks(rotation='vertical')
plt.subplots_adjust(top=0.98, bottom=0.2, left=0.08, right=0.98, hspace=0.0, wspace=0.0)
plt.show()
"""
PSO_Strategy.w = w
PSO_Strategy.buy_threshold = buy_threshold
PSO_Strategy.sell_threshold = sell_threshold
PSO_Strategy.moving_average_rules = gen_representation.moving_average_rules
PSO_Strategy.moving_averages = gen_representation.moving_averages_test
PSO_Strategy.optimizer = optimizer
PSO_Strategy.gen_representation = gen_representation
PSO_Strategy.normalization = normalization
PSO_Strategy.retrain_params = retrain_params
df_test = gen_representation.df_test
df_train = gen_representation.df_train
PSO_Cerebro = execute_strategy(PSO_Strategy, df_test, commission, info, retrain_params)
return PSO_Cerebro, PSO_Strategy
| 18,038
|
def human_turn(c_choice, h_choice):
"""
The Human plays choosing a valid move.
:param c_choice: computer's choice X or O
:param h_choice: human's choice X or O
:return:
"""
depth = len(empty_cells(board))
if depth == 0 or game_over(board):
return
# Dictionary of valid moves
move = -1
moves = {
1: [0, 0], 2: [0, 1], 3: [0, 2],
4: [1, 0], 5: [1, 1], 6: [1, 2],
7: [2, 0], 8: [2, 1], 9: [2, 2],
}
clean()
print(f'Human turn [{h_choice}]')
render(board, c_choice, h_choice)
while move < 1 or move > 9:
try:
move = int(input('Use numpad (1..9): '))
coord = moves[move]
can_move = set_move(coord[0], coord[1], HUMAN)
if not can_move:
print('Bad move')
move = -1
except (EOFError, KeyboardInterrupt):
print('Bye')
exit()
except (KeyError, ValueError):
print('Bad choice')
| 18,039
|
def env_get(d, key, default, decoders=decoders, required=None):
"""
Look up ``key`` in ``d`` and decode it, or return ``default``.
"""
if required is None:
required = isinstance(default, type)
try:
value = d[key]
except KeyError:
if required:
raise
return default
dt = (default if default is None or isinstance(default, type)
else type(default))
for decoder in decoders:
if (decoder.decodes_to_type(dt) and
decoder.decodes_from_value(value)
):
try:
return decoder.decode(value)
except Exception as e:
logger.error("%s couldn't convert %s: %s: %s",
decoder.__class__.__name__, key,
e.__class__.__name__, e)
raise
raise ValueError("no suitable env decoder for {}".format(key))
| 18,040
|
def get_integer_part(expr: 'Expr', no: int, options: OPT_DICT, return_ints=False) -> \
tUnion[TMP_RES, tTuple[int, int]]:
"""
With no = 1, computes ceiling(expr)
With no = -1, computes floor(expr)
Note: this function either gives the exact result or signals failure.
"""
from sympy.functions.elementary.complexes import re, im
# The expression is likely less than 2^30 or so
assumed_size = 30
result = evalf(expr, assumed_size, options)
if result is S.ComplexInfinity:
raise ValueError("Cannot get integer part of Complex Infinity")
ire, iim, ire_acc, iim_acc = result
# We now know the size, so we can calculate how much extra precision
# (if any) is needed to get within the nearest integer
if ire and iim:
gap = max(fastlog(ire) - ire_acc, fastlog(iim) - iim_acc)
elif ire:
gap = fastlog(ire) - ire_acc
elif iim:
gap = fastlog(iim) - iim_acc
else:
# ... or maybe the expression was exactly zero
if return_ints:
return 0, 0
else:
return None, None, None, None
margin = 10
if gap >= -margin:
prec = margin + assumed_size + gap
ire, iim, ire_acc, iim_acc = evalf(
expr, prec, options)
else:
prec = assumed_size
# We can now easily find the nearest integer, but to find floor/ceil, we
# must also calculate whether the difference to the nearest integer is
# positive or negative (which may fail if very close).
def calc_part(re_im: 'Expr', nexpr: MPF_TUP):
from .add import Add
_, _, exponent, _ = nexpr
is_int = exponent == 0
nint = int(to_int(nexpr, rnd))
if is_int:
# make sure that we had enough precision to distinguish
# between nint and the re or im part (re_im) of expr that
# was passed to calc_part
ire, iim, ire_acc, iim_acc = evalf(
re_im - nint, 10, options) # don't need much precision
assert not iim
size = -fastlog(ire) + 2 # -ve b/c ire is less than 1
if size > prec:
ire, iim, ire_acc, iim_acc = evalf(
re_im, size, options)
assert not iim
nexpr = ire
nint = int(to_int(nexpr, rnd))
_, _, new_exp, _ = ire
is_int = new_exp == 0
if not is_int:
# if there are subs and they all contain integer re/im parts
# then we can (hopefully) safely substitute them into the
# expression
s = options.get('subs', False)
if s:
doit = True
# use strict=False with as_int because we take
# 2.0 == 2
for v in s.values():
try:
as_int(v, strict=False)
except ValueError:
try:
[as_int(i, strict=False) for i in v.as_real_imag()]
continue
except (ValueError, AttributeError):
doit = False
break
if doit:
re_im = re_im.subs(s)
re_im = Add(re_im, -nint, evaluate=False)
x, _, x_acc, _ = evalf(re_im, 10, options)
try:
check_target(re_im, (x, None, x_acc, None), 3)
except PrecisionExhausted:
if not re_im.equals(0):
raise PrecisionExhausted
x = fzero
nint += int(no*(mpf_cmp(x or fzero, fzero) == no))
nint = from_int(nint)
return nint, INF
re_, im_, re_acc, im_acc = None, None, None, None
if ire:
re_, re_acc = calc_part(re(expr, evaluate=False), ire)
if iim:
im_, im_acc = calc_part(im(expr, evaluate=False), iim)
if return_ints:
return int(to_int(re_ or fzero)), int(to_int(im_ or fzero))
return re_, im_, re_acc, im_acc
| 18,041
|
def validate_tree(tree):
"""Checks the validty of the tree.
Parameters
----------
tree : bp.BP
The tree to validate
"""
# this is currently untested since we can't actually parse a tree of this
# nature: https://github.com/wasade/improved-octo-waddle/issues/29
if len(tree) <= 1:
raise ValueError("Tree must contain at least 2 nodes.")
# While traversing the tree, record tip / internal node names
# (Nodes without names are ignored, since we'll assign those later
# using tools.fill_missing_node_names())
tip_names = []
internal_node_names = []
max_branch_length = 0
# do not include the root in these checks
for i in range(1, len(tree)):
node = tree.postorderselect(i)
name = tree.name(node)
length = tree.length(node)
if name is not None:
if isleaf(tree, node):
tip_names.append(name)
else:
internal_node_names.append(name)
if length < 0:
raise ValueError(
"Non-root branches of the tree must have nonnegative "
"lengths."
)
max_branch_length = max(length, max_branch_length)
# We didn't consider the root node in the above traversal since we
# don't care about its length. However, we do care about its name,
# so we add the root's name to internal_node_names.
internal_node_names.append(tree.name(tree.postorderselect(i + 1)))
if max_branch_length == 0:
raise ValueError(
"At least one non-root branch of the tree must have a "
"positive length."
)
unique_tip_name_set = set(tip_names)
if len(unique_tip_name_set) != len(tip_names):
raise ValueError("Tip names in the tree must be unique.")
unique_internal_node_name_set = set(internal_node_names)
if len(unique_tip_name_set & unique_internal_node_name_set) > 0:
raise ValueError(
"Tip names in the tree cannot overlap with internal node "
"names."
)
if len(unique_internal_node_name_set) != len(internal_node_names):
warnings.warn(
"Internal node names in the tree are not unique.",
TreeFormatWarning
)
return
| 18,042
|
def doPrimaryClick(releaseDelay: Optional[float] = None):
"""
Performs a primary mouse click at the current mouse pointer location.
The primary button is the one that usually activates or selects an item.
This function honors the Windows user setting
for which button (left or right) is classed as the primary button.
@ param releaseDelay: optional float in seconds of how long NVDA should sleep
between pressing down and then releasing up the primary button.
"""
buttonFlags = getLogicalButtonFlags()
_doClick(buttonFlags.primaryDown, buttonFlags.primaryUp, releaseDelay)
| 18,043
|
def inv_last_roundf(ns):
"""
ns -> States of nibbles
Predict the states of nibbles after passing through the inverse last round
of SomeCipher. Refer to `last_roundf()` for more details.
"""
return inv_shift_row(ns)
| 18,044
|
def get_screen(name, layer=None):
"""
:doc: screens
Returns the ScreenDisplayable with the given `name` on layer. `name`
is first interpreted as a tag name, and then a screen name. If the
screen is not showing, returns None.
This can also take a list of names, in which case the first screen
that is showing is returned.
This function can be used to check if a screen is showing::
if renpy.get_screen("say"):
text "The say screen is showing."
else:
text "The say screen is hidden."
"""
if layer is None:
layer = get_screen_layer(name)
if isinstance(name, basestring):
name = (name,)
sl = renpy.exports.scene_lists()
for tag in name:
sd = sl.get_displayable_by_tag(layer, tag)
if sd is not None:
return sd
for tag in name:
sd = sl.get_displayable_by_name(layer, (tag,))
if sd is not None:
return sd
return None
| 18,045
|
def generate_address_full(chance=None, variation=False, format=1):
"""
Function to generate the full address of the profile.
Args:
chance: Integer between 1-100 used for realistic variation. (not required)
variation: Boolean value indicating whether variation is requested. (optional)
format: String value used to indicate required format. (optional)
Options include:
-1 (Str value)
-2 (List value)
Returns:
The return value. String/List value containing the full address.
"""
if not chance:
chance = random.randint(1,100)
csv_file = open(canadian_data_file_name, 'r')
csv_reader = csv.reader(csv_file, delimiter=',')
random_row = random.choice(list(csv_reader))
csv_file.close()
if format == 1 or format == "1":
return "%s %s, %s, %s, %s" % (generate_street_number(row=random_row),generate_street_name(chance=chance, variation=variation,row=random_row),generate_city(chance=chance, variation=variation,row=random_row),generate_province(chance=chance, variation=variation,row=random_row),generate_postal_code(chance=chance, variation=variation,row=random_row))
elif format == 2 or format == "2":
address_list=[]
address_list.append(generate_street_number(row=random_row))
address_list.append(generate_street_name(variation,row=random_row))
address_list.append(generate_city(variation,row=random_row))
address_list.append(generate_province(variation,row=random_row))
address_list.append(generate_postal_code(variation,row=random_row))
return address_list
| 18,046
|
def add_book(book, order, size, _age = 10):
""" Add a new order and size to a book, and age the rest of the book. """
yield order, size, _age
for o, s, age in book:
if age > 0:
yield o, s, age - 1
| 18,047
|
def binder_url(repo, branch="master", filepath=None):
"""
Build a binder url. If filepath is provided, the url will be for
the specific file.
Parameters
----------
repo: str
The repository in the form "username/reponame"
branch: str, optional
The branch, default "master"
filepath: str, optional
The path to a file in the repo, e.g. dir1/dir2/notebook.ipynb
Returns
-------
str
A binder url that will launch a notebook server
"""
if filepath is not None:
fpath = urllib.parse.quote(filepath, safe="%")
return resources.BINDER_URL_TEMPLATE_WITH_FILEPATH.format(
repo, branch, fpath
)
else:
return resources.BINDER_URL_TEMPLATE_NO_FILEPATH.format(repo, branch)
| 18,048
|
async def on_message_execute(message):
"""
The on_message function specifically handles messages that are sent to the bot.
It checks if the message is a command, and then executes it if it is.
:param self: Used to access the class' attributes and methods.
:param message: Used to store information about the message.
:return: None.
"""
if message.author.bot:
return
if isinstance(message.channel, DMChannel):
return
if re.search(r"(?i)^(?:hi|what\'s up|yo|hey|hello) lhbot", message.content):
await message.channel.send("hello")
if re.search(
r"(?i)(?:the|this) (?:current )?year is "
+ r"(?:almost |basically )?(?:over|done|finished)",
message.content,
):
await message.channel.send(get_year_string())
if re.search(r"(?i)^you wanna fight, lhbot\?", message.content):
await message.channel.send("bring it on pal (╯°□°)╯︵ ┻━┻")
if re.search(r"(?i)^lhbot meow", message.content):
await message.channel.send("ฅ^•ﻌ•^ฅ")
if re.search(
r"(?i)^lh what(?:\'s| is) the answer to life,? the universe and everything",
message.content,
):
await message.channel.send("42")
| 18,049
|
def _create_group_hub_without_avatar(_khoros_object, _api_url, _payload):
"""This function creates a group hub with only a JSON payload and no avatar image.
.. versionadded:: 2.6.0
:param _khoros_object: The core :py:class:`khoros.Khoros` object
:type _khoros_object: class[khoros.Khoros]
:param _api_url: The API URL to utilize in the API request
:type _api_url: str
:param _payload: The JSON payload to be used in the API request
:type _payload: dict
:returns: The API response from the POST request
:raises: :py:exc:`khoros.errors.exceptions.APIConnectionError`,
:py:exc:`khoros.errors.exceptions.POSTRequestError`
"""
_headers = {'content-type': 'application/json'}
_response = api.post_request_with_retries(_api_url, _payload, khoros_object=_khoros_object, headers=_headers)
return _response
| 18,050
|
def ReplaceDirName(rootDir,startNumber=0):
"""Modify the folder name under the rootDir path.
This function just change your Dir name in one loop.
rootDir : The dir in your computer.
startNumber: The DirName you want start( we set file name into number sequnce.) .If none, default = 0
# Returns
Not,but printing change detail.
"""
num = startNumber
dirs = os.listdir(rootDir)
for dir in dirs:
print('Old name is:' + dir) # 输出老的名字
num = num +1
temp = "%03d" % int(num) #The purpose is to unify the number into 3 digits, and add 0 before the insufficient
oldName = os.path.join(rootDir, dir) # 老文件夹的名字
newName = os.path.join(rootDir, temp) # 新文件夹的名字
os.rename(oldName, newName) #替换
| 18,051
|
def ulstrip(text):
"""
Strip Unicode extended whitespace from the left side of a string
"""
return text.lstrip(unicode_extended_whitespace)
| 18,052
|
def deploy(verbosity='noisy'):
"""
Full server deploy.
Updates the repository (server-side), synchronizes the database, collects
static files and then restarts the web service.
"""
if verbosity == 'noisy':
hide_args = []
else:
hide_args = ['running', 'stdout']
with hide(*hide_args):
puts('Updating repository...')
execute(update)
puts('Collecting static files...')
execute(collectstatic)
puts('Synchronizing database...')
execute(migrate)
puts('Restarting web server...')
execute(restart)
puts('Installing crontab...')
execute(crontab)
| 18,053
|
def clone_dcm_meta(dcm):
"""
Copy an existing pydicom Dataset as a basis for saving
another image
:param dcm: the pydicom dataset to be copied
:return:
"""
newdcm = pydi.Dataset()
for k, v in dcm.items():
newdcm[k] = v
newdcm.file_meta = mk_file_meta()
newdcm.is_little_endian = True
newdcm.is_implicit_VR = False
newdcm.SOPInstanceUID = newdcm.file_meta.MediaStorageSOPInstanceUID
newdcm.SOPClassUID = newdcm.file_meta.MediaStorageSOPClassUID
return newdcm
| 18,054
|
def test_ingredients_limited_to_user(authenticated_user):
"""Test that ingredients returned are for authenticated user"""
user, client = authenticated_user
user2 = get_user_model().objects.create_user("other@test.com", "testpass")
Ingredient.objects.create(user=user2, name="banana")
ingredient = Ingredient.objects.create(user=user, name="orange")
res = client.get(INGREDIENTS_URL)
assert res.status_code == status.HTTP_200_OK
assert len(res.data) == 1
assert res.data[0]["name"] == ingredient.name
| 18,055
|
def run_yosys_with_abc():
"""
Execute yosys with ABC and optional blackbox support
"""
ys_params = create_yosys_params()
yosys_template = args.yosys_tmpl if args.yosys_tmpl else os.path.join(
cad_tools["misc_dir"], "ys_tmpl_yosys_vpr_flow.ys")
tmpl = Template(open(yosys_template, encoding='utf-8').read())
with open("yosys.ys", 'w') as archfile:
archfile.write(tmpl.safe_substitute(ys_params))
run_command("Run yosys", "yosys_output.log",
[cad_tools["yosys_path"], 'yosys.ys'])
| 18,056
|
def load_plate(toml_path):
"""\
Parse a TOML-formatted configuration file defining how each well in a
particular plate should be interpreted.
Below is a list of the keys that are understood in the configuration file:
'xlsx_path' [string]
The path to the XLSX file containing the plate reader data, relative to
the configuration file itself. If not specified, this script will look
for a file with the same name as the configuration file, but the
'.xlsx' extension, e.g. 'abc.xlsx' if the config file is 'abc.toml'.
'template' [string]
The path to another TOML file that should be interpreted as containing
default values for all possible settings.
'notes' [string]
A string that will be printed every time the file is visualized. This
is meant to reminder the user of any details relating to this
particular experiment (e.g. mistakes) that might affect interpretation
of the data.
The following keys relate to particular wells. Each of these keys can be
specified in any of four kinds of block: [well.A1], [row.A], [col.1], and
[plate]. The [well] block allows values to be set for individual wells ('A1'
in this example). The [row] and [col] blocks allow values to be set for
whole rows and columns ('A' and '1' in these examples). The [plate] block
allows values to be set for the whole plate. The same value can be set
multiple times, in which case the value from the most specific block will
take precedence.
"""
import toml
import itertools
from pathlib import Path
def recursive_merge(layout, defaults, overwrite=False):
for key, default in defaults.items():
if isinstance(default, dict):
layout.setdefault(key, {})
recursive_merge(layout[key], default)
else:
if overwrite or key not in layout:
layout[key] = default
def do_load_paths(toml_path, expected_ext='.xlsx'):
toml_path = Path(toml_path).resolve()
layout = toml.load(str(toml_path))
# Resolve the path(s) to actual data.
if 'path' in layout and 'paths' in layout:
raise ValueError(f"{toml_path} specifies both 'path' and 'paths'")
elif 'path' in layout:
path = toml_path.parent / layout['path']
layout['paths'] = {'default': path}
elif 'paths' in layout:
layout['paths'] = {
toml_path.parent / x
for x in layout['paths']
}
else:
default_path = toml_path.with_suffix(expected_ext)
if default_path.exists():
layout['paths'] = {'default': default_path}
# Include a remote file if one is specified.
if 'template' in layout:
layout['template'] = toml_path.parent / layout['template']
template = do_load_paths(layout['template'])
recursive_merge(layout, template)
return layout
layout = do_load_paths(toml_path)
# Apply any row or column defaults.
if 'well' not in layout:
layout['well'] = {}
rows = layout.get('row', {})
cols = layout.get('col', {})
# Create new wells implied by the 'row' and 'col' blocks.
for row, col in itertools.product(rows, cols):
layout['well'].setdefault(f'{row}{col}', {})
# Update any existing wells.
for well in layout.get('well', {}):
row, col = well[:1], well[1:]
recursive_merge(layout['well'][well], rows.get(row, {}))
recursive_merge(layout['well'][well], cols.get(col, {}))
# Apply any plate-wide defaults.
layout.setdefault('plate', {}),
for well in layout.get('well', {}):
recursive_merge(layout['well'][well], layout['plate'])
# If the experiment has any notes, print them out.
if 'notes' in layout:
print(toml_path)
print(layout['notes'].strip())
print()
return layout
| 18,057
|
def reduce_entropy(X, axis=-1):
"""
calculate the entropy over axis and reduce that axis
:param X:
:param axis:
:return:
"""
return -1 * np.sum(X * np.log(X+1E-12), axis=axis)
| 18,058
|
def compile_pbt(lr: float = 5e-3, value_weight: float = 0.5):
"""
my default: 5e-3
# SAI: 1e-4
# KataGo: per-sample learning rate of 6e-5, except 2e-5 for the first 5mm samples
"""
input_shape = (N, N, dual_net.get_features_planes())
model = dual_net.build_model(input_shape)
opt = keras.optimizers.Adam(learning_rate=lr)
model.compile(optimizer=opt,
loss={
'policy': 'categorical_crossentropy',
'value': custom_BCE_loss},
loss_weights={
'policy': 0.50,
'value': value_weight},
metrics={
'policy': keras.metrics.CategoricalAccuracy(name="move_acc"),
})
return model
| 18,059
|
def _hostnames() -> List[str]:
"""Returns all host names from the ansible inventory."""
return sorted(_ANSIBLE_RUNNER.get_hosts())
| 18,060
|
def memory(kdump_memory):
"""Set memory allocated for kdump capture kernel"""
config_db = ConfigDBConnector()
if config_db is not None:
config_db.connect()
config_db.mod_entry("KDUMP", "config", {"memory": kdump_memory})
| 18,061
|
def seabass_to_pandas(path):
"""SeaBASS to Pandas DataFrame converter
Parameters
----------
path : str
path to an FCHECKed SeaBASS file
Returns
-------
pandas.DataFrame
"""
sb = readSB(path)
dataframe = pd.DataFrame.from_dict(sb.data)
return dataframe
| 18,062
|
def countVisits(item, value=None):
"""This function takes a pandas.Series of item tags, and an optional string for a specific tag
and returns a numpy.ndarray of the same size as the input, which contains either
1) a running count of unique transitions of item, if no target tag is given, or
2) a running count of the numer of entries to a run of target tag
:param item: a pandas Series of labels of events
:param value: optional value of the item to keep track of
:return: a running count of the unique values of items if value==None, or a running count of the specific value
"""
# make sure item is a 1-D np array or a Pandas Series
# if not isinstance(item, (pd.core.series.Series, np.ndarray) ):
assert (isinstance(item, pd.core.series.Series))
# create counter; this saves time, apparently
count = np.zeros((item.size), dtype=np.int)
if value is None:
# not specified, then we track any time item changes value
count[np.where(item != item.shift())] = 1
else:
# only when item==value
count[np.where(np.logical_and(item != item.shift(), item == value))] = 1
return count.cumsum()
| 18,063
|
def test_atomic_unsigned_short_min_exclusive_4_nistxml_sv_iv_atomic_unsigned_short_min_exclusive_5_1(mode, save_output, output_format):
"""
Type atomic/unsignedShort is restricted by facet minExclusive with
value 65534.
"""
assert_bindings(
schema="nistData/atomic/unsignedShort/Schema+Instance/NISTSchema-SV-IV-atomic-unsignedShort-minExclusive-5.xsd",
instance="nistData/atomic/unsignedShort/Schema+Instance/NISTXML-SV-IV-atomic-unsignedShort-minExclusive-5-1.xml",
class_name="NistschemaSvIvAtomicUnsignedShortMinExclusive5",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
| 18,064
|
def test_run_inference(ml_runner_with_container: MLRunner, tmp_path: Path) -> None:
"""
Test that run_inference gets called as expected.
"""
def _expected_files_exist() -> bool:
output_dir = ml_runner_with_container.container.outputs_folder
if not output_dir.is_dir():
return False
expected_files = ["test_mse.txt", "test_mae.txt"]
return all([(output_dir / p).exists() for p in expected_files])
# create the test data
import numpy as np
import torch
N = 100
x = torch.rand((N, 1)) * 10
y = 0.2 * x + 0.1 * torch.randn(x.size())
xy = torch.cat((x, y), dim=1)
data_path = tmp_path / "hellocontainer.csv"
np.savetxt(data_path, xy.numpy(), delimiter=",")
expected_ckpt_path = ml_runner_with_container.container.outputs_folder / "checkpoints" / "last.ckpt"
assert not expected_ckpt_path.exists()
# update the container to look for test data at this location
ml_runner_with_container.container.local_dataset_dir = tmp_path
assert not _expected_files_exist()
actual_train_ckpt_path = ml_runner_with_container.checkpoint_handler.get_recovery_or_checkpoint_path_train()
assert actual_train_ckpt_path is None
ml_runner_with_container.run()
actual_train_ckpt_path = ml_runner_with_container.checkpoint_handler.get_recovery_or_checkpoint_path_train()
assert actual_train_ckpt_path == expected_ckpt_path
actual_test_ckpt_path = ml_runner_with_container.checkpoint_handler.get_checkpoints_to_test()
assert actual_test_ckpt_path == [expected_ckpt_path]
assert actual_test_ckpt_path[0].exists()
# After training, the outputs directory should now exist and contain the 2 error files
assert _expected_files_exist()
# if no checkpoint handler, no checkpoint paths will be saved and these are required for
# inference so ValueError will be raised
with pytest.raises(ValueError) as e:
ml_runner_with_container.checkpoint_handler = None # type: ignore
ml_runner_with_container.run()
assert "expects exactly 1 checkpoint for inference, but got 0" in str(e)
| 18,065
|
def gen_accel_table(table_def):
"""generate an acceleration table"""
table = []
for i in range(1001):
table.append(0)
for limit_def in table_def:
range_start, range_end, limit = limit_def
for i in range(range_start, range_end + 1):
table[i] = limit
return table
| 18,066
|
def dataset_constructor(
config: ml_collections.ConfigDict,
) -> Tuple[
torch.utils.data.Dataset, torch.utils.data.Dataset, torch.utils.data.Dataset
]:
"""
Create datasets loaders for the chosen datasets
:return: Tuple (training_set, validation_set, test_set)
"""
dataset = {
"AddProblem": AdditionProblem,
"CopyMemory": CopyMemory,
"MNIST": MNIST,
"CIFAR10": CIFAR10,
"SpeechCommands": SpeechCommands,
"CharTrajectories": CharTrajectories,
}[config.dataset]
training_set = dataset(
partition="train",
seq_length=config.seq_length,
memory_size=config.memory_size,
mfcc=config.mfcc,
sr=config.sr_train,
dropped_rate=config.drop_rate,
)
test_set = dataset(
partition="test",
seq_length=config.seq_length,
memory_size=config.memory_size,
mfcc=config.mfcc,
sr=config.sr_train
if config.sr_test == 0
else config.sr_test, # Test set can be sample differently.
dropped_rate=config.drop_rate,
)
if config.dataset in ["SpeechCommands", "CharTrajectories"]:
validation_set = dataset(
partition="val",
seq_length=config.seq_length,
memory_size=config.memory_size,
mfcc=config.mfcc,
sr=config.sr_train,
dropped_rate=config.drop_rate,
)
else:
validation_set = None
return training_set, validation_set, test_set
| 18,067
|
def ensemble_log_params(m, params, hess=None,
steps=scipy.inf, max_run_hours=scipy.inf,
temperature=1.0, step_scale=1.0,
sing_val_cutoff=0, seeds=None,
recalc_hess_alg = False, recalc_func=None,
save_hours=scipy.inf, save_to=None,
skip_elems = 0, log_params=True,
save_scalefactors=False):
"""
Generate a Bayesian ensemble of parameter sets consistent with the data in
the model. The sampling is done in terms of the logarithm of the parameters.
Inputs:
m -- Model to generate the ensemble for
params -- Initial parameter KeyedList to start from
hess -- Hessian of the model
steps -- Maximum number of Monte Carlo steps to attempt
max_run_hours -- Maximum number of hours to run
temperature -- Temperature of the ensemble
step_scale -- Additional scale applied to each step taken. step_scale < 1
results in steps shorter than those dictated by the quadratic
approximation and may be useful if acceptance is low.
sing_val_cutoff -- Truncate the quadratic approximation at eigenvalues
smaller than this fraction of the largest.
seeds -- A tuple of two integers to seed the random number generator
recalc_hess_alg --- If True, the Monte-Carlo is done by recalculating the
hessian matrix every timestep. This signficantly
increases the computation requirements for each step,
but it may be worth it if it improves convergence.
recalc_func --- Function used to calculate the hessian matrix. It should
take only a log parameters argument and return the matrix.
If this is None, default is to use
m.GetJandJtJInLogParameteters
save_hours --- If save_to is not None, the ensemble will be saved to
that file every 'save_hours' hours.
save_to --- Filename to save ensemble to.
skip_elems --- If non-zero, skip_elems are skipped between each included
step in the returned ensemble. For example, skip_elems=1
will return every other member. Using this option can
reduce memory consumption.
save_scalefactors --- If True, scale factors will be saved during
integration.
Outputs:
ens, ens_fes, ratio, [scale_factors]
ens -- List of KeyedList parameter sets in the ensemble
ens_fes -- List of free energies for each parameter set
ratio -- Fraction of attempted moves that were accepted
scale_factors -- List of scale factors throughout ensemble, only returned
if save_scalefactors is True.
The sampling is done by Markov Chain Monte Carlo, with a Metropolis-Hasting
update scheme. The canidate-generating density is a gaussian centered on the
current point, with axes determined by the hessian. For a useful
introduction see:
Chib and Greenberg. "Understanding the Metropolis-Hastings Algorithm"
_The_American_Statistician_ 49(4), 327-335
"""
if scipy.isinf(steps) and scipy.isinf(max_run_hours):
logger.warn('Both steps and max_run_hours are infinite! '
'Code will not stop by itself!')
if seeds is None:
seeds = int(time.time()%1 * 1e6)
logger.debug('Seeding random number generator based on system time.')
logger.debug('Seed used: %s' % str(seeds))
scipy.random.seed(seeds)
if isinstance(params, KeyedList):
param_keys = params.keys()
curr_params = copy.deepcopy(params)
curr_F = m.free_energy(curr_params, temperature)
ens, ens_Fs = [curr_params], [curr_F]
curr_sf = m.internalVars['scaleFactors'].copy()
ens_scale_factors = [curr_sf]
# We work with arrays of params through the rest of the code
curr_params = scipy.array(curr_params)
if recalc_func is None and log_params:
recalc_func = lambda p: m.GetJandJtJInLogParameters(scipy.log(p))[1]
else:
recalc_func = lambda p: m.GetJandJtJ(p)[1]
accepted_moves, attempt_exceptions, ratio = 0, 0, scipy.nan
start_time = last_save_time = time.time()
# Calculate our first hessian if necessary
if hess is None:
hess = recalc_func(curr_params)
# Generate the sampling matrix used to generate candidate moves
samp_mat = _sampling_matrix(hess, sing_val_cutoff, temperature, step_scale)
steps_attempted = 0
while steps_attempted < steps:
# Have we run too long?
if (time.time() - start_time) >= max_run_hours*3600:
break
# Generate the trial move from the quadratic approximation
deltaParams = _trial_move(samp_mat)
# Scale the trial move by the step_scale and the temperature
#scaled_step = step_scale * scipy.sqrt(temperature) * deltaParams
scaled_step = deltaParams
if log_params:
next_params = curr_params * scipy.exp(scaled_step)
else:
next_params = curr_params + scaled_step
try:
next_F = m.free_energy(next_params, temperature)
except Utility.SloppyCellException, X:
logger.warn('SloppyCellException in free energy evaluation at step '
'%i, free energy set to infinity.' % len(ens))
logger.warn('Parameters tried: %s.' % str(next_params))
attempt_exceptions += 1
next_F = scipy.inf
except Utility.ConstraintViolatedException, X:
logger.warn('ConstraintViolatedException in free energy evaluation '
'at step %i, free energy set to infinity.' % len(ens))
logger.warn('Parameters tried: %s.' % str(next_params))
attempt_exceptions += 1
next_F = scipy.inf
if recalc_hess_alg and not scipy.isinf(next_F):
try:
next_hess = recalc_func(next_params)
next_samp_mat = _sampling_matrix(next_hess, sing_val_cutoff,
temperature, step_scale)
accepted = _accept_move_recalc_alg(curr_F, samp_mat,
next_F, next_samp_mat,
deltaParams, temperature)
except Utility.SloppyCellException, X:
logger.warn('SloppyCellException in JtJ evaluation at step '
'%i, move not accepted.' % len(ens))
logger.warn('Parameters tried: %s.' % str(next_params))
attempt_exceptions += 1
next_F = scipy.inf
accepted = False
else:
accepted = _accept_move(next_F - curr_F, temperature)
steps_attempted += 1
if accepted:
accepted_moves += 1.
curr_params = next_params
curr_sf = m.internalVars['scaleFactors'].copy()
curr_F = next_F
if recalc_hess_alg:
hess = next_hess
samp_mat = next_samp_mat
if steps_attempted % (skip_elems + 1) == 0:
ens_Fs.append(curr_F)
if save_scalefactors:
ens_scale_factors.append(curr_sf)
if isinstance(params, KeyedList):
ens.append(KeyedList(zip(param_keys, curr_params)))
else:
ens.append(curr_params)
ratio = accepted_moves/steps_attempted
# Save to a file
if save_to is not None\
and time.time() >= last_save_time + save_hours * 3600:
_save_ens(ens, ens_Fs, ratio, save_to, attempt_exceptions,
steps_attempted, ens_scale_factors,
save_sf=save_scalefactors)
last_save_time = time.time()
if save_to is not None:
_save_ens(ens, ens_Fs, ratio, save_to, attempt_exceptions,
steps_attempted, ens_scale_factors,
save_sf=save_scalefactors)
if save_scalefactors:
return ens, ens_Fs, ratio, ens_scale_factors
else:
return ens, ens_Fs, ratio
| 18,068
|
def process(actapi, country_list):
"""Fetch ISO-3166 list, process and print generic_uploader
data to stdout"""
facts_added = {}
for c_map in country_list:
for location_type, location in c_map.items():
if not location:
continue # Skip locations with empty values
# Skip facts that are already added
if location_type in LOCATION_TYPE_M and location not in facts_added:
fact_type = LOCATION_TYPE_M[location_type]
facts_added[location] = fact_type
fact = actapi.fact(fact_type).source("location", location)
if actapi.act_baseurl:
fact.add() # Add fact to platform, if baseurl is specified
else:
print(fact.json())
| 18,069
|
def read_from_url(url):
"""Read from a URL transparently decompressing if compressed."""
yield smart_open.open(url)
| 18,070
|
def issue_config_exists(repo_path):
"""
returns True if the issue template config.yml file exists in the repo_path
"""
path_to_config = repo_path + "/.github/ISSUE_TEMPLATE/config.yml"
return os.path.exists(path_to_config)
| 18,071
|
def test_safe_fn():
"""
Shows how the safe_fn guards against all exceptions.
:return:
"""
assert celldb._safe_fn(pow, 2, "a") is None
| 18,072
|
def _read(fd):
"""Default read function."""
return os.read(fd, 1024)
| 18,073
|
def get_metrics(actual_classes, pred_classes):
"""
Function to calculate performance metrics for the classifier
For each class, the following is calculated
TP: True positives = samples that were correctly put into the class
TN: True negatives = samples that were correctly not put into the class
FP: False positive = samples that were incorectly put into the class
FN: False negatives = samples that should be in the class but were put into
another class
Parameters
----------
pred_classes : neuron types predicted by the classifier
actual_classes : known neuron types
Returns
-------
conf_mat: Confusion matrix = a visual representation of the algorithm's performance
acc: Accuracy = the fraction of correctly classified samples
MK: Markedness = a measure of how trustworthy a classification is,
accounting for both positive and negative classifications.
Value close to 1 means the classifier makes mostly correct predictions, value
close to -1 means the classifier makes mostly wrong predictions.
"""
conf_mat = metrics.confusion_matrix(actual_classes, pred_classes)
acc = metrics.balanced_accuracy_score(actual_classes, pred_classes)
"""
the next portion of code is copied from:
https://towardsdatascience.com/multi-class-classification-extracting-performance-metrics-from-the-confusion-matrix-b379b427a872
"""
FP = conf_mat.sum(axis=0) - np.diag(conf_mat)
FN = conf_mat.sum(axis=1) - np.diag(conf_mat)
TP = np.diag(conf_mat)
TN = conf_mat.sum() - (FP + FN + TP)
FP = np.sum(FP)
FN = np.sum(FN)
TP = np.sum(TP)
TN = np.sum(TN)
"""
end of copied code
"""
MK = (TP/(TP+FP)) + (TN/(TN+FN)) - 1
return conf_mat, acc, MK
| 18,074
|
def load_modules():
"""Imports all modules from the modules directory."""
for module in os.listdir('modules/'):
if not module.startswith('_') and module.endswith('.py'):
__import__('modules.{}'.format(module.split('.')[0]))
| 18,075
|
def extract_tika_meta(meta):
"""Extracts and normalizes metadata from Apache Tika.
Returns a dict with the following keys set:
- content-type
- author
- date-created
- date-modified
- original-tika-meta
The dates are encoded in the ISO format."""
def _get_flat(dict, *keys):
item = None
for key in keys:
item = dict.get(key)
if item is not None:
break
if type(item) is list:
return item[0]
return item
def _get_bool(dict, *keys):
item = _get_flat(dict, *keys)
if not item:
return False
if type(item) is bool:
return item
return item.lower() == "true"
data = {
'content-type': _get_flat(meta,
'Content-Type',
'content-type'),
'author': _get_flat(meta,
'Author',
'meta:author',
'creator'),
'date-created': _get_flat(meta,
'Creation-Date',
'dcterms:created',
'meta:created',
'created'),
'date-modified': _get_flat(meta,
'Last-Modified',
'Last-Saved-Date',
'dcterms:modified',
'meta:modified',
'created'),
'original-tika-meta': meta
}
for key in ['date-modified', 'date-created']:
if data.get(key):
data[key] = dateutil.parser.parse(data[key]).isoformat()
return data
| 18,076
|
def update_table(key_id,guess,correct):
"""
Updates the d-
if wrong: adds guess
if right: adds +1 for right guesses
"""
try:
client = MongoClient('localhost') # get our client
db = client.quickdraw # get our database
if(correct==False):
db.qd.update_one({'key_id':key_id},{'$push': {'human_guesses': guess}})
else:
db.qd.update_one({'key_id':key_id},{'$inc':{'recognized_by_human': 1}})
except Exception as e:
print("Unable to connect to database: {0}".format(e))
| 18,077
|
def __gen_pause_flow(testbed_config,
src_port_id,
flow_name,
pause_prio_list,
flow_dur_sec):
"""
Generate the configuration for a PFC pause storm
Args:
testbed_config (obj): L2/L3 config of a T0 testbed
src_port_id (int): ID of the source port
flow_name (str): flow' name
pause_prio_list (list): priorities to pause for PFC frames
flow_dur_sec (float): duration of the flow in second
Returns:
flow configuration (obj): including name, packet format, rate, ...
"""
pause_time = []
for x in range(8):
if x in pause_prio_list:
pause_time.append('ffff')
else:
pause_time.append('0000')
vector = pfc_class_enable_vector(pause_prio_list)
pause_pkt = Header(PfcPause(
dst=FieldPattern(choice='01:80:C2:00:00:01'),
src=FieldPattern(choice='00:00:fa:ce:fa:ce'),
class_enable_vector=FieldPattern(choice=vector),
pause_class_0=FieldPattern(choice=pause_time[0]),
pause_class_1=FieldPattern(choice=pause_time[1]),
pause_class_2=FieldPattern(choice=pause_time[2]),
pause_class_3=FieldPattern(choice=pause_time[3]),
pause_class_4=FieldPattern(choice=pause_time[4]),
pause_class_5=FieldPattern(choice=pause_time[5]),
pause_class_6=FieldPattern(choice=pause_time[6]),
pause_class_7=FieldPattern(choice=pause_time[7]),
))
dst_port_id = (src_port_id + 1) % len(testbed_config.devices)
pause_src_point = PortTxRx(tx_port_name=testbed_config.ports[src_port_id].name,
rx_port_name=testbed_config.ports[dst_port_id].name)
"""
The minimal fixed time duration in IXIA is 1 second.
To support smaller durations, we need to use # of packets
"""
speed_str = testbed_config.layer1[0].speed
speed_gbps = int(speed_str.split('_')[1])
pause_dur = 65535 * 64 * 8.0 / (speed_gbps * 1e9)
pps = int(2 / pause_dur)
pkt_cnt = pps * flow_dur_sec
pause_flow = Flow(
name=flow_name,
tx_rx=TxRx(pause_src_point),
packet=[pause_pkt],
size=Size(64),
rate=Rate('pps', value=pps),
duration=Duration(FixedPackets(packets=pkt_cnt, delay=0))
)
return pause_flow
| 18,078
|
def plot_spectra(obs, model):
"""Plot two spectra."""
plt.plot(obs.xaxis, obs.flux, label="obs")
plt.plot(model.xaxis, model.flux, label="model")
plt.legend()
plt.show()
| 18,079
|
def move_calculator():
"""
A function that will calculate the best moves for the cuurent position.\n
This works by:
- Will check all the moves in the current position
"""
| 18,080
|
def parse_str_to_bio(str, dia_act):
""" parse str to BIO format """
intent = parse_intent(dia_act)
w_arr, bio_arr = parse_slots(str, dia_act)
bio_arr[-1] = intent
return ' '.join(w_arr), ' '.join(bio_arr), intent
| 18,081
|
def train_early_stop(
update_fn, validation_fn, optimizer, state, max_epochs=1e4, **early_stop_args
):
"""Run update_fn until given validation metric validation_fn increases.
"""
logger = Logger()
check_early_stop = mask_scheduler(**early_stop_args)
for epoch in jnp.arange(max_epochs):
(optimizer, state), metrics, output = update_fn(optimizer, state)
if epoch % 1000 == 0:
print(f"Loss step {epoch}: {metrics['loss']}")
if epoch % 25 == 0:
val_metric = validation_fn(optimizer, state)
stop_training, optimizer = check_early_stop(val_metric, epoch, optimizer)
metrics = {**metrics, "validation_metric": val_metric}
logger.write(metrics, epoch)
if stop_training:
print("Converged.")
break
logger.close()
return optimizer, state
| 18,082
|
def get_povm_object_names() -> List[str]:
"""Return the list of valid povm-related object names.
Returns
-------
List[str]
the list of valid povm-related object names.
"""
names = ["pure_state_vectors", "matrices", "vectors", "povm"]
return names
| 18,083
|
def choose(a,b):
""" n Choose r function """
a = op.abs(round(a))
b = op.abs(round(b))
if(b > a):
a, b = b, a
return factorial(a) / (factorial(b) * factorial(a-b))
| 18,084
|
def pad_and_stack_list_of_tensors(lst_embeddings: List[torch.Tensor], max_sequence_length: Optional[int] = None,
return_sequence_length: bool = False):
"""
it takes the list of embeddings as the input, then applies zero-padding and stacking to transform it as
@param lst_embeddings:
@param max_sequence_length:
"""
dim = -2 # second last axis. it must be the sequence dimension.
lst_seq_len = [embeddings.shape[dim] for embeddings in lst_embeddings]
if max_sequence_length is None:
max_sequence_length = max(lst_seq_len)
else:
n_max = max(lst_seq_len)
assert max_sequence_length >= n_max, \
f"`max_sequence_length` must be greater or equal to max. embeddings size: {n_max} > {max_sequence_length}"
lst_padded_embeddings = [pad_trailing_tensors(e_t, max_sequence_length) for e_t in lst_embeddings]
stacked_embeddings = torch.stack(lst_padded_embeddings)
if return_sequence_length:
return stacked_embeddings, lst_seq_len
else:
return stacked_embeddings
| 18,085
|
def bq_solid_for_queries(sql_queries):
"""
Executes BigQuery SQL queries.
Expects a BQ client to be provisioned in resources as context.resources.bigquery.
"""
sql_queries = check.list_param(sql_queries, 'sql queries', of_type=str)
@solid(
input_defs=[InputDefinition(_START, Nothing)],
output_defs=[OutputDefinition(List[DataFrame])],
config_field=define_bigquery_query_config(),
required_resource_keys={'bigquery'},
metadata={'kind': 'sql', 'sql': '\n'.join(sql_queries)},
)
def bq_solid(context): # pylint: disable=unused-argument
query_job_config = _preprocess_config(context.solid_config.get('query_job_config', {}))
# Retrieve results as pandas DataFrames
results = []
for sql_query in sql_queries:
# We need to construct a new QueryJobConfig for each query.
# See: https://bit.ly/2VjD6sl
cfg = QueryJobConfig(**query_job_config) if query_job_config else None
context.log.info(
'executing query %s with config: %s'
% (sql_query, cfg.to_api_repr() if cfg else '(no config provided)')
)
results.append(
context.resources.bigquery.query(sql_query, job_config=cfg).to_dataframe()
)
return results
return bq_solid
| 18,086
|
def RestartNetwork():
"""Restarts networking daemon."""
logging.warning('Restart networking.')
try:
subprocess.check_output(['/etc/init.d/networking', 'restart'])
if HasIp():
logging.info('Network is back')
except subprocess.CalledProcessError as e:
# This is expected in some network environment.
logging.warning(e.output)
if 'No lease, failing' in e.output:
logging.warning('Can not get network, maybe try again later.')
else:
raise
| 18,087
|
def mock_accession_unreplicated(
mocker: MockerFixture,
mock_accession_gc_backend,
mock_metadata,
lab: str,
award: str,
) -> Accession:
"""
Mocked accession instance with dummy __init__ that doesn't do anything and pre-baked
assembly property. @properties must be patched before instantiation
"""
mocker.patch.object(
Accession,
"experiment",
new_callable=PropertyMock(
return_value=EncodeExperiment(
{
"@id": "foo",
"assay_term_name": "microRNA",
"replicates": [
{"biological_replicate_number": 1, "status": "released"}
],
}
)
),
)
mocked_accession = AccessionMicroRna(
"imaginary_steps.json",
Analysis(mock_metadata, backend=mock_accession_gc_backend),
"mock_server.biz",
EncodeCommonMetadata(lab, award),
Recorder(use_in_memory_db=True),
no_log_file=True,
)
return mocked_accession
| 18,088
|
def get_prepared_statement(statement_name: Optional[str] = None,
work_group: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetPreparedStatementResult:
"""
Resource schema for AWS::Athena::PreparedStatement
:param str statement_name: The name of the prepared statement.
:param str work_group: The name of the workgroup to which the prepared statement belongs.
"""
__args__ = dict()
__args__['statementName'] = statement_name
__args__['workGroup'] = work_group
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws-native:athena:getPreparedStatement', __args__, opts=opts, typ=GetPreparedStatementResult).value
return AwaitableGetPreparedStatementResult(
description=__ret__.description,
query_statement=__ret__.query_statement)
| 18,089
|
def set_initial_current_workspace(db_workspace):
"""
:param workspace:
:return:
"""
sql_create_current_workspace = ''' INSERT OR IGNORE INTO current_workspace(current_db)
VALUES(?) '''
CUR.execute(sql_create_current_workspace,db_workspace)
CONNECTION.commit()
| 18,090
|
def handle_col(element, box, _get_image_from_uri, _base_url):
"""Handle the ``span`` attribute."""
if isinstance(box, boxes.TableColumnBox):
integer_attribute(element, box, 'span')
if box.span > 1:
# Generate multiple boxes
# http://lists.w3.org/Archives/Public/www-style/2011Nov/0293.html
return [box.copy() for _i in range(box.span)]
return [box]
| 18,091
|
def get_dotted_field(input_dict: dict, accessor_string: str) -> dict:
"""Gets data from a dictionary using a dotted accessor-string.
Parameters
----------
input_dict : dict
A nested dictionary.
accessor_string : str
The value in the nested dict.
Returns
-------
dict
Data from the dictionary.
"""
current_data = input_dict
for chunk in accessor_string.split("."):
current_data = current_data.get(chunk, {})
return current_data
| 18,092
|
def separa_frases(sentenca):
"""[A funcao recebe uma sentenca e devolve uma lista das frases dentro da sentenca]
Arguments:
sentenca {[str]} -- [recebe uma frase]
Returns:
[lista] -- [lista das frases contidas na sentença]
"""
return re.split(r'[,:;]+', sentenca)
| 18,093
|
def read_datasets(path=None, filename="datasets.json"):
"""Read the serialized (JSON) dataset list
"""
if path is None:
path = _MODULE_DIR
else:
path = pathlib.Path(path)
with open(path / filename, 'r') as fr:
ds = json.load(fr)
# make the functions callable
for _, dset_opts in ds.items():
args = dset_opts.get('load_function_args', {})
kwargs = dset_opts.get('load_function_kwargs', {})
fail_func = partial(unknown_function, dset_opts['load_function_name'])
func_mod_name = dset_opts.get('load_function_module', None)
if func_mod_name:
func_mod = importlib.import_module(func_mod_name)
else:
func_mod = _MODULE
func_name = getattr(func_mod, dset_opts['load_function_name'], fail_func)
func = partial(func_name, *args, **kwargs)
dset_opts['load_function'] = func
return ds
| 18,094
|
def get_solvents(reaction):
"""Return solvents involved in the specified reaction."""
for df in reaction.data_fields:
if SOLVENT_RE.match(df):
n = SOLVENT_RE.search(df).group('N')
name_field = 'RXN:VARIATION:STEPNO:SOLVENT(' + n + '):MOL:SYMBOL'
if name_field in reaction.data_fields:
name = reaction.data_fields[name_field]
else:
name = ''
yield reaction.data_fields[df], name, n
| 18,095
|
def test_negative_format_postcode2(formatting_negative_value_special_char):
"""Function to test format_postcode method for negative cases(Special chars)
arguments:
formatting_positive_value -- list of post codes with special char
"""
postcode = Postcode()
print(postcode.format_postcode(formatting_negative_value_special_char))
assert postcode.valid == False and postcode.message == "ERROR: No special Characters allowed"
| 18,096
|
def as_actor(input, actor) :
"""Takes input and actor, and returns [as
<$actor>]$input[endas]."""
if " " in actor :
repla = "<%s>"%actor
else :
repla = actor
return "[as %s]%s[endas]" % (repla, input)
| 18,097
|
def error_403(request):
"""View rendered when encountering a 403 error."""
return error_view(request, 403, _("Forbidden"),
_("You are not allowed to acces to the resource %(res)s.")
% {"res": request.path})
| 18,098
|
def restarts(**bindings):
"""Provide restarts. Known as `RESTART-CASE` in Common Lisp.
Roughly, restarts can be thought of as canned error recovery strategies.
That's the most common use case, although not the only possible one. You
can use restarts whenever you'd like to define a set of actions to handle a
specific condition (both in the everyday and technical senses of the word),
while allowing code higher up the call stack to decide which of those
actions to take in any particular use case. This improves modularity.
Note that restarts may be defined at any level of the call stack,
so they don't all have to be at the same level.
A restart can take any number of args and kwargs; its call signature
depends only on how it's intended to be invoked.
Example::
with restarts(use_value=(lambda x: x)) as result:
...
result << 42
The `with restarts` form binds an `unpythonic.collections.box` to hold the
return value of the block. Use `unbox(result)` to access the value. The
default value the box holds, if nothing is set into it, is `None`.
If the code inside the `with` block invokes one of the restarts defined in
this `with restarts`, the contents of the box are automatically set to the
value returned by the restart. Then execution continues from immediately
after the block.
The manual result assignment via `<<` at the end of the block is an
`unpythonic` idiom; it sets the return value of the block for a normal
return, i.e. when no restart was invoked.
We (ab)use the `with ... as ...` form, because in Python `with` is a
statement and thus cannot return a value directly. Also, it is idiomatic
enough to convey the meaning that `with restarts` introduces a binding.
If none of your restarts need to return a value, you can omit the as-binding.
If you just need a jump label for skipping the rest of the block at the
higher-level code's behest, you can use `lambda: None` as the restart
function (`cerror` and `warn` do this internally).
If you'd like to use a parametric decorator and a `def` instead of a `with`,
see the alternate syntax `with_restarts`.
"""
# Implementation notes:
#
# - Normally, an `__exit__` method of a context manager **must not**
# reraise if it gets an exception; this is the caller's responsibility.
# Instead, if the `__exit__` method wishes to indicate to the context
# manager framework (the `with` statement) that the exception should be
# propagated outwards, the method must return `False`.
# https://docs.python.org/3/library/stdtypes.html#typecontextmanager
# https://docs.python.org/3/reference/datamodel.html#context-managers
#
# - However, when a context manager is implemented using the
# `@contextmanager` decorator from `contextlib`, then the generator
# **must** reraise the exception (in the part after the `yield`,
# corresponding to `__exit__`) if it wishes to propagate it outwards.
# This is what we do here.
#
# - How does the `InvokeRestart` exception reach our generator in the first
# place, given that this generator is in the paused state at the time the
# exception is raised? The magic is in `contextlib`. When an exception is
# raised in the `with` body (whose context manager we are), the
# `@contextmanager` decorator throws the exception into the generator,
# into the position where it yielded. So if that `yield` is inside a
# `try`, the corresponding `except` clauses will get control.
# https://docs.python.org/3/library/contextlib.html#contextlib.contextmanager
#
# Regarding exceptions in generators in general, there's a pitfall to be
# aware of: if the `finally` clause of a `try`/`finally` contains a
# `yield`, the generator must jump through a hoop to work as expected:
# https://amir.rachum.com/blog/2017/03/03/generator-cleanup/
#
# In the `try` part it's always safe to `yield`, so in this particular
# instance this doesn't concern us. In the `finally` part it's *possible*
# to `yield`, but then `GeneratorExit` requires special consideration.
#
# Instead of using `@contextmanager`, we could have implemented `restarts`
# using `__enter__` and `__exit__` methods. We would examine the exception
# arguments in `__exit__`. If it was an `InvokeRestart`, and ours, we would
# process the restart and return `True` to indicate to the `with` machinery
# that the exception was handled and should not be propagated further. If
# it wasn't, we would just return `False` to let the `with` machinery
# propagate the exception. But using `@contextmanager`, we don't need a
# class. This way the code is shorter, and our exception processing can use
# the standard `try`/`except` construct.
b = box(None)
with Restarts(bindings):
try:
yield b
except InvokeRestart as exc:
if exc.restart.context is bindings: # if it's ours
b << exc()
else:
raise
| 18,099
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.