content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def read_data_file(file_path: str, filename: str) -> Union[pd.DataFrame, str]:
"""Check read data file."""
logger.info(f"Reading {file_path}")
try:
if file_path.endswith(CSV):
return pd.read_csv(file_path, sep=",")
elif file_path.endswith(TSV):
return pd.read_csv(file_path, sep="\t")
else:
return pd.read_csv(file_path, sep=None, engine='python')
except IOError:
logger.error(f"Failed to read {filename} {file_path}. File exists: {os.path.isfile(file_path)}")
return f'There is a problem with your file {filename}. please check that it meets the criteria.'
| 11,100
|
def mnist_loader(path="../../corruptmnist", n_files=8, image_scale=255):
"""
Loads .npz corruptedmnist, assumes loaded image values to be between 0 and 1
"""
# load and stack the corrupted mnist dataset
train_images = np.vstack(
[np.load(path + "/train_{}.npz".format(str(i)))["images"] for i in range(n_files)]
)
train_labels = np.hstack(
[np.load(path + "/train_{}.npz".format(str(i)))["labels"] for i in range(n_files)]
)
test_images = np.load(path + "/test.npz")["images"]
test_labels = np.load(path + "/test.npz")["labels"]
return train_images * image_scale, train_labels, test_images * image_scale, test_labels
| 11,101
|
def create_or_append_to_zip(file_handle, zip_path, arc_name=None):
"""
Append file_handle to given zip_path with name arc_name if given, else file_handle. zip_path will be created.
:param file_handle: path to file or file-like object
:param zip_path: path to zip archive
:param arc_name: optional filename in archive
"""
with zipfile.ZipFile(zip_path, 'a') as my_zip:
if arc_name:
my_zip.write(file_handle, arc_name)
else:
my_zip.write(file_handle)
| 11,102
|
def reduce_min(values, index, name='segmented_reduce_min'):
"""Computes the minimum over segments."""
return _segment_reduce(values, index, tf.math.unsorted_segment_min, name)
| 11,103
|
def write_exam_list(data: dict, filename: str) -> None:
"""Write a list of exam names to a json file
:param data: a dictionary containing data for all exams
:param filename: the file to write the list of exams to
:rtype: None
"""
exams = [e.name for e in data.keys()]
with open(filename, 'w') as f:
json.dump(exams, f)
| 11,104
|
def main():
"""Populate the Redshift database."""
config = configparser.ConfigParser()
config.read('dwh.cfg')
conn = psycopg2.connect(
'host={0} dbname={1} user={2} password={3} port={4}'.format(
*config['CLUSTER'].values(),
),
)
cur = conn.cursor()
load_staging_tables(cur, conn)
insert_tables(cur, conn)
conn.close()
| 11,105
|
def _applyTargetState(targetState, md, httpclient):
"""
compares the current device state against the targetStateProvider and issues updates as necessary to ensure the
device is
at that state.
:param md:
:param targetState: the target state.
:param httpclient: the http client
:return:
"""
anyUpdate = False
if md['fs'] != targetState.fs:
logger.info("Updating fs from " + str(md['fs']) + " to " + str(targetState.fs) + " for " + md['name'])
anyUpdate = True
if md['samplesPerBatch'] != targetState.samplesPerBatch:
logger.info("Updating samplesPerBatch from " + str(md['samplesPerBatch']) + " to " + str(
targetState.samplesPerBatch) + " for " + md['name'])
anyUpdate = True
if md['gyroEnabled'] != targetState.gyroEnabled:
logger.info("Updating gyroEnabled from " + str(md['gyroEnabled']) + " to " + str(
targetState.gyroEnabled) + " for " + md['name'])
anyUpdate = True
if md['gyroSens'] != targetState.gyroSens:
logger.info(
"Updating gyroSens from " + str(md['gyroSens']) + " to " + str(targetState.gyroSens) + " for " + md[
'name'])
anyUpdate = True
if md['accelerometerEnabled'] != targetState.accelerometerEnabled:
logger.info("Updating accelerometerEnabled from " + str(md['accelerometerEnabled']) + " to " + str(
targetState.accelerometerEnabled) + " for " + md['name'])
anyUpdate = True
if md['accelerometerSens'] != targetState.accelerometerSens:
logger.info("Updating accelerometerSens from " + str(md['accelerometerSens']) + " to " + str(
targetState.accelerometerSens) + " for " + md['name'])
anyUpdate = True
if anyUpdate:
payload = marshal(targetState, targetStateFields)
logger.info("Applying target state change " + md['name'] + " - " + str(payload))
if RecordingDeviceStatus.INITIALISED.name == md.get('status'):
try:
httpclient.patch(md['serviceURL'], json=payload)
except Exception as e:
logger.exception(e)
else:
logger.warning("Ignoring target state change until " + md['name'] + " is idle, currently " + md['status'])
else:
logger.debug("Device " + md['name'] + " is at target state, we continue")
| 11,106
|
def _data_type(data_string: str):
""" convert the data type string (i.e., FLOAT, INT16, etc.) to the appropriate int.
See: https://deeplearning4j.org/api/latest/onnx/Onnx.TensorProto.DataType.html
"""
for key, val in glob.DATA_TYPES.items():
if key == data_string:
return val
_print("Data string not found. Use `list_data_types()` to list all supported data strings.")
return False
| 11,107
|
def test_gruneisen_mesh(ph_nacl_gruneisen):
"""Test of mode Grueneisen parameter calculation on sampling mesh."""
ph0, ph_minus, ph_plus = ph_nacl_gruneisen
phg = PhonopyGruneisen(ph0, ph_minus, ph_plus)
phg.set_mesh([4, 4, 4])
# qpoints, weights, freqs, eigvecs, gamma = phg.get_mesh()
weights = []
g_mesh_vals = np.reshape(np.asarray(g_mesh.split(), dtype="double"), (-1, 15))
for i, (qpt, w, freqs, _, gammas) in enumerate(zip(*phg.get_mesh())):
weights.append(w)
# print(" ".join(["%.5f" % v for v in qpt]))
# print(" ".join(["%.5f" % v for v in freqs]))
# print(" ".join(["%.5f" % v for v in gammas]))
np.testing.assert_allclose(np.r_[qpt, freqs, gammas], g_mesh_vals[i], atol=1e-5)
np.testing.assert_array_equal(weights, g_mesh_weights)
| 11,108
|
def bin_power(dataset, fsamp:int, band=range(0, 45)):
"""Power spec
Args:
dataset: n_epoch x n_channel x n_sample
fsamp:
band:
Returns:
n_epoch x n_channel x len(band)
"""
res = []
for i, data in enumerate(dataset):
res.append(power(data, fsamp=fsamp, band=band))
return res
| 11,109
|
def deflate_and_base64_encode(string_val):
"""
Deflates and the base64 encodes a string
:param string_val: The string to deflate and encode
:return: The deflated and encoded string
"""
if not isinstance(string_val, six.binary_type):
string_val = string_val.encode('utf-8')
return base64.b64encode(zlib.compress(string_val)[2:-4])
| 11,110
|
def smooth_reward_curve(x, y):
"""Smooths a reward curve--- how?"""
k = min(31, int(np.ceil(len(x) / 30))) # Halfwidth of our smoothing convolution
xsmoo = x[k:-k]
ysmoo = np.convolve(y, np.ones(2 * k + 1), mode='valid') / np.convolve(np.ones_like(y), np.ones(2 * k + 1), mode='valid')
downsample = max(int(np.floor(len(xsmoo) / 1e3)), 1)
return xsmoo[::downsample], ysmoo[::downsample]
| 11,111
|
def sdot(s):
"""Returns the time derivative of a given state.
Args:
s(1x6 numpy array): the state vector [rx,ry,rz,vx,vy,vz]
Returns:
1x6 numpy array: the time derivative of s [vx,vy,vz,ax,ay,az]
"""
mu_Earth = 398600.4405
r = np.linalg.norm(s[0:3])
a = -mu_Earth/(r**3)*s[0:3]
p_j2 = j2_pert(s)
p_drag = drag(s)
a = a+p_j2+p_drag
return np.array([*s[3:6],*a])
| 11,112
|
def abort(
status_code: int,
message: t.Optional[str] = None,
detail: t.Optional[t.Any] = None,
headers: t.Optional[t.Mapping[str, str]] = None
) -> None:
"""A function to raise HTTPError exception.
Similar to Flask's `abort`, but returns a JSON response.
Examples:
```python
from apiflask import APIFlask, abort
from flask import escape
app = APIFlask(__name__)
@app.get('/<name>')
def hello(name):
if name == 'Foo':
abort(404, 'This man is missing.')
return f'Hello, escape{name}'!
```
P.S. When `app.json_errors` is `True` (default), Flask's `flask.abort` will also
return JSON error response.
Arguments:
status_code: The status code of the error (4XX and 5xx).
message: The simple description of the error. If not provided,
the reason phrase of the status code will be used.
detail: The detailed information of the error, you can use it to
provide the addition information such as custom error code,
documentation URL, etc.
headers: A dict of headers used in the error response.
*Version changed: 0.4.0*
- Rename the function name from `abort_json` to `abort`.
"""
raise HTTPError(status_code, message, detail, headers)
| 11,113
|
def merge_xunit(in_files, out_file, ignore_flaky=False, quiet=False):
"""
Merges the input files into the specified output file.
:param in_files: list of input files
:param out_file: location to write merged output file
:param ignore_flaky: whether to ignore flaky test cases
:param quiet: whether to suppress some prints
:return: nothing
"""
if len(in_files) == 0:
return
logger.debug("input files are: " + ",".join(in_files))
logger.debug("output file is: " + out_file)
first_in = in_files[0]
open_first_in = codecs.open(first_in, "r", "utf-8")
xml_string = open_first_in.read().encode("utf-8")
merge_xml = minidom.parseString(xml_string)
testsuite = merge_xml.firstChild
errors = int(_safe_attribute(testsuite, 'errors', 0))
failures = int(_safe_attribute(testsuite, 'failures', 0))
num_tests = int(_safe_attribute(testsuite, 'tests', 0))
time = float(_safe_attribute(testsuite, 'time', "0.0").replace(',', ''))
skipped = int(_safe_attribute(testsuite, 'skipped'), 0)
to_merge = [x for x in in_files if x != first_in]
name2tests = defaultdict(list)
for in_file in to_merge:
try:
if not quiet:
print ('Processing %s ' % in_file)
in_xml = minidom.parse(in_file)
in_testsuite = in_xml.firstChild
errors += int(_safe_attribute(in_testsuite, 'errors', 0))
failures += int(_safe_attribute(in_testsuite, 'failures', 0))
num_tests += int(_safe_attribute(in_testsuite, 'tests', 0))
time += float(_safe_attribute(in_testsuite, 'time', "0.0").replace(',', ''))
skipped += int(_safe_attribute(in_testsuite, 'skipped', 0))
for test_case in in_xml.getElementsByTagName('testcase'):
name = (_safe_attribute(test_case, "classname"), _safe_attribute(test_case, "name"))
name2tests[name].append(test_case)
except Exception as e:
print("Unable to fully process %s: %s" % (in_file, e))
# Filter out the failures of flaky tests
if ignore_flaky:
for name, tests in name2tests.iteritems():
# Failed: all failed. This also works for skipped tests, since they'll always skip.
# Flaky: one pass and one or more failures
# Succeeded: all passed
# Failed testcases have child <error> or <failure> nodes.
# Skipped testscases have a child <skipped/> node.
failed_list = []
for test in tests:
failed = False
for child in test.childNodes:
# Only count presence of a child element, want to ignore text nodes
if child.nodeType == xml.dom.Node.ELEMENT_NODE:
failed = True
break
failed_list.append(failed)
failed = all(failed_list)
succeeded = all([not f for f in failed_list])
# Failure or success, we can pass through
if failed or succeeded:
continue
else:
# Filter out failed attempts from a flaky run
succeeded = []
for test in tests:
if not test.hasChildNodes():
# If it succeeded, append to the list
succeeded.append(test)
else:
# Else do not append, and update the global stats
for child in test.childNodes:
# Skip everything that's not an element, i.e. <error> or <failure>
if child.nodeType != xml.dom.Node.ELEMENT_NODE:
continue
if child.nodeName == "error":
errors -= 1
elif child.nodeName == "failure":
failures -= 1
time -= float(_safe_attribute(child, "time", "0.0").replace(',', ''))
num_tests -= 1
name2tests[name] = succeeded
# Populate the output DOM
for tests in name2tests.values():
for test in tests:
testsuite.appendChild(test)
_safe_set_attribute(testsuite, 'errors', errors)
_safe_set_attribute(testsuite, 'failures', failures)
_safe_set_attribute(testsuite, 'tests', num_tests)
_safe_set_attribute(testsuite, 'time', time)
_safe_set_attribute(testsuite, 'skipped', skipped)
merge_xml.writexml(codecs.open(out_file, 'w', encoding="utf-8"),
indent="\t", newl="\n", encoding="utf-8")
| 11,114
|
def get_values(abf,key="freq",continuous=False):
"""returns Xs, Ys (the key), and sweep #s for every AP found."""
Xs,Ys,Ss=[],[],[]
for sweep in range(abf.sweeps):
for AP in cm.matrixToDicts(abf.APs):
if not AP["sweep"]==sweep:
continue
Ys.append(AP[key])
Ss.append(AP["sweep"])
if continuous:
Xs.append(AP["expT"])
else:
Xs.append(AP["sweepT"])
return np.array(Xs),np.array(Ys),np.array(Ss)
| 11,115
|
def test_get_invalid_individual_recipes(test_client):
"""
GIVEN a Flask application configured for testing
WHEN the '/blog/<blog_title>' page is requested (GET) with invalid blog titles
THEN check that 404 errors are returned
"""
invalid_blog_titles = ['instant_pot', 'butter', 'abcd']
for blog_title in invalid_blog_titles:
response = test_client.get(f'/blog/{blog_title}/')
assert response.status_code == 404
| 11,116
|
def geodetic2cd(
gglat_deg_array, gglon_deg_array, ggalt_km_array, decimals=2, year=2021.0
):
"""Transformation from Geodetic (lat, lon, alt) to Centered Dipole (CD) (lat, lon, alt).
Author: Giorgio Savastano (giorgiosavastano@gmail.com)
Parameters
----------
gglon_deg_array : np.ndarray
array containing geodetic longitude values in degrees
gglat_deg_array : np.ndarray
array containing geodetic latitude values in degrees
ggalt_km_array : np.ndarray
array containing geodetic altitude values in km
decimals : int, default=2
Number of decimal places to round to. If
decimals is negative, it specifies the number of positions to
the left of the decimal point.
year : float, default=2021.0
year for computing the IGRF Gauss coefficients
Returns
-------
tuple[np.ndarray, np.ndarray, np.ndarray]
CD lat, lon, alt arrays
"""
if type(gglon_deg_array) == list:
logger.info(" Converting list to np.ndarrays.")
gglon_deg_array = np.asarray(gglon_deg_array)
gglat_deg_array = np.asarray(gglat_deg_array)
ggalt_km_array = np.asarray(ggalt_km_array)
elif type(gglon_deg_array) != np.ndarray:
logger.info(f" Converting {type(gglon_deg_array)} to np.ndarrays.")
gglon_deg_array = np.asarray([gglon_deg_array])
gglat_deg_array = np.asarray([gglat_deg_array])
ggalt_km_array = np.asarray([ggalt_km_array])
x_geoc, y_geoc, z_geoc = pymap3d.geodetic2ecef(
gglat_deg_array, gglon_deg_array, ggalt_km_array * 1000.0
)
x_cd, y_cd, z_cd = ecef2eccdf(x_geoc, y_geoc, z_geoc, year=year)
colat_cd, long_cd, r_cd = ecef2spherical(x_cd, y_cd, z_cd)
lat_cd = np.round(90 - colat_cd, decimals)
alt_cd = np.round(r_cd - CONSTS.RE_M, decimals)
return lat_cd, long_cd, alt_cd
| 11,117
|
def load_pickle(file_path):
"""
load the pickle object from the given path
:param file_path: path of the pickle file
:return: obj => loaded obj
"""
with open(file_path, "rb") as obj_des:
obj = pickle.load(obj_des)
# return the loaded object
return obj
| 11,118
|
def main():
""" Main """
scores = np.array([1.0, 2.0, 3.0])
print softmax(scores)
scores = np.array([[1, 2, 3, 6],
[2, 4, 5, 6],
[3, 8, 7, 6]])
print softmax(scores)
| 11,119
|
def _expand_global_features(B, T, g, bct=True):
"""Expand global conditioning features to all time steps
Args:
B (int): Batch size.
T (int): Time length.
g (Tensor): Global features, (B x C) or (B x C x 1).
bct (bool) : returns (B x C x T) if True, otherwise (B x T x C)
Returns:
Tensor: B x C x T or B x T x C or None
"""
if g is None:
return None
g = g.unsqueeze(-1) if g.dim() == 2 else g
if bct:
g_bct = g.expand(B, -1, T)
return g_bct.contiguous()
else:
g_btc = g.expand(B, -1, T).transpose(1, 2)
return g_btc.contiguous()
| 11,120
|
def get_args():
"""Get command-line arguments"""
parser = argparse.ArgumentParser(
description='Find common kmers',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('file1',
help='Input file 1',
metavar='FILE1',
type=argparse.FileType('rt'))
parser.add_argument('file2',
help='Input file 2',
metavar='FILE2',
type=argparse.FileType('rt'))
parser.add_argument('-k',
'--kmer',
help='K-mer size',
metavar='int',
type=int,
default=3)
args = parser.parse_args()
if args.kmer < 1:
parser.error(f'--kmer "{args.kmer}" must be > 0')
return args
| 11,121
|
def plot_CDF(data, ax=None, reverse=False, plot=True, **plotargs):
""" plot Cumulative Ratio. """
n_samples = len(data)
X = sorted(data, reverse=reverse)
Y = np.arange(1,n_samples+1)/n_samples
if plot or ax:
if ax is None:
fig, ax = plt.subplots()
ax.plot(X, Y, **plotargs)
ax.set_ylabel("Cumulative Ratio")
return ax
return (X, Y)
| 11,122
|
def is_zsettable(s):
"""quick check that all values in a dict are reals"""
return all(map(lambda x: isinstance(x, (int, float, long)), s.values()))
| 11,123
|
def plotMeetingGraphs(robots, index, team, path, subplot=None, length=0):
"""Plot the trajectories of all robots
Input arguments:
robots = robots which measured and moved around
index = which robots should be plotted
team = team of the robots
subplot = if we want to plot all teams
length = how many subplots we need
path = savePath
"""
#TODO: add savefig in main loop
if subplot != None:
plt.figure('RRT* Graphs')
plt.subplot(np.ceil(length/2),2,subplot)
else:
plt.figure()
plt.title('Team %.d' %team)
cmap = plt.get_cmap('hsv')
for i in range(0,len(index)):
graph = robots[index[i]].totalGraph
node_color = colors.to_hex(cmap(i/len(index)))
nx.draw(graph, label='robot %.d' %(index[i]), pos=nx.get_node_attributes(graph, 'pos'),
node_color=node_color,node_size=100,with_labels = True,font_color='w',font_size=8)
plt.legend()
plt.savefig(path + 'RRT* Graphs' + '.png')
plt.close()
| 11,124
|
def initialize_simulator(task_ids: Sequence[str],
action_tier: str) -> ActionSimulator:
"""Initialize ActionSimulator for given tasks and tier."""
tasks = phyre.loader.load_compiled_task_list(task_ids)
return ActionSimulator(tasks, action_tier)
| 11,125
|
async def async_unload_entry(hass: core.HomeAssistant, config_entry: config_entries.ConfigEntry) -> bool:
"""Unload a config entry."""
_LOGGER.debug("%s: async_unload_entry", DOMAIN)
try:
all_ok = True
for platform in SUPPORTED_PLATFORMS:
_LOGGER.debug("%s - async_setup_entry: unload platform: %s", DOMAIN, platform)
platform_ok = await asyncio.gather(*[hass.config_entries.async_forward_entry_unload(config_entry, platform)])
if not platform_ok:
_LOGGER.error("%s - async_setup_entry: failed to unload: %s (%s)", DOMAIN, platform, platform_ok)
all_ok = platform_ok
if DATA_SERVER in hass.data[DOMAIN] and hass.data[DOMAIN][DATA_SERVER] is not None:
BuiltInServer = hass.data[DOMAIN][DATA_SERVER]
if await BuiltInServer.async_dSServerStop("integration_unload") is False:
_LOGGER.error("%s - async_setup_entry: failed to unload server: %s", DOMAIN, BuiltInServer)
all_ok = False
else:
hass.data[DOMAIN][DATA_SERVER] = None
hass.data[DOMAIN][config_entry.entry_id]["unsub_options_update_listener"]()
if all_ok:
hass.data[DOMAIN].pop(config_entry.entry_id)
return all_ok
except Exception as e:
_LOGGER.error("%s - async_unload_entry: setup devices failed: %s (%s.%s)", DOMAIN, str(e), e.__class__.__module__, type(e).__name__)
return False
| 11,126
|
def emr_cluster_security_configuration_check(cache: dict, awsAccountId: str, awsRegion: str, awsPartition: str) -> dict:
"""[EMR.1] EMR Clusters should have a security configuration specified"""
response = list_clusters(cache)
myEmrClusters = response["Clusters"]
for cluster in myEmrClusters:
clusterId = str(cluster["Id"])
iso8601Time = datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).isoformat()
try:
response = emr.describe_cluster(ClusterId=clusterId)
clusterId = str(response["Cluster"]["Id"])
clusterName = str(response["Cluster"]["Name"])
clusterArn = str(response["Cluster"]["ClusterArn"])
secConfigName = str(response["Cluster"]["SecurityConfiguration"])
# this is a Passing Check
finding = {
"SchemaVersion": "2018-10-08",
"Id": clusterArn + "/emr-cluster-sec-policy-check",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": clusterArn,
"AwsAccountId": awsAccountId,
"Types": ["Software and Configuration Checks/AWS Security Best Practices"],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "INFORMATIONAL"},
"Confidence": 99,
"Title": "[EMR.1] EMR Clusters should have a security configuration specified",
"Description": "EMR Cluster "
+ clusterName
+ " has a security configuration specified.",
"Remediation": {
"Recommendation": {
"Text": "EMR cluster security configurations cannot be specified after creation. For information on creating and attaching a security configuration refer to the Use Security Configurations to Set Up Cluster Security section of the Amazon EMR Management Guide",
"Url": "https://docs.aws.amazon.com/emr/latest/ManagementGuide/emr-security-configurations.html",
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsEmrCluster",
"Id": clusterArn,
"Partition": awsPartition,
"Region": awsRegion,
"Details": {
"Other": {
"clusterId": clusterId,
"clusterName": clusterName,
"securityConfigurationName": secConfigName,
}
},
}
],
"Compliance": {
"Status": "PASSED",
"RelatedRequirements": [
"NIST CSF PR.IP-1",
"NIST SP 800-53 CM-2",
"NIST SP 800-53 CM-3",
"NIST SP 800-53 CM-4",
"NIST SP 800-53 CM-5",
"NIST SP 800-53 CM-6",
"NIST SP 800-53 CM-7",
"NIST SP 800-53 CM-9",
"NIST SP 800-53 SA-10",
"AICPA TSC A1.3",
"AICPA TSC CC1.4",
"AICPA TSC CC5.3",
"AICPA TSC CC6.2",
"AICPA TSC CC7.1",
"AICPA TSC CC7.3",
"AICPA TSC CC7.4",
"ISO 27001:2013 A.12.1.2",
"ISO 27001:2013 A.12.5.1",
"ISO 27001:2013 A.12.6.2",
"ISO 27001:2013 A.14.2.2",
"ISO 27001:2013 A.14.2.3",
"ISO 27001:2013 A.14.2.4",
],
},
"Workflow": {"Status": "RESOLVED"},
"RecordState": "ARCHIVED",
}
yield finding
except Exception as e:
if str(e) == "'SecurityConfiguration'":
finding = {
"SchemaVersion": "2018-10-08",
"Id": clusterArn + "/emr-cluster-sec-policy-check",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": clusterArn,
"AwsAccountId": awsAccountId,
"Types": ["Software and Configuration Checks/AWS Security Best Practices"],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "MEDIUM"},
"Confidence": 99,
"Title": "[EMR.1] EMR Clusters should have a security configuration specified",
"Description": "EMR Cluster "
+ clusterName
+ " does not have a security configuration specified. Security configurations are used to define encryption, authorization and authentication strategies for your EMR cluster. Clusters cannot be modified after creation, for more information refer to the remediation section.",
"Remediation": {
"Recommendation": {
"Text": "EMR cluster security configurations cannot be specified after creation. For information on creating and attaching a security configuration refer to the Use Security Configurations to Set Up Cluster Security section of the Amazon EMR Management Guide",
"Url": "https://docs.aws.amazon.com/emr/latest/ManagementGuide/emr-security-configurations.html",
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsEmrCluster",
"Id": clusterArn,
"Partition": awsPartition,
"Region": awsRegion,
"Details": {
"Other": {"clusterId": clusterId, "clusterName": clusterName,}
},
}
],
"Compliance": {
"Status": "FAILED",
"RelatedRequirements": [
"NIST CSF PR.IP-1",
"NIST SP 800-53 CM-2",
"NIST SP 800-53 CM-3",
"NIST SP 800-53 CM-4",
"NIST SP 800-53 CM-5",
"NIST SP 800-53 CM-6",
"NIST SP 800-53 CM-7",
"NIST SP 800-53 CM-9",
"NIST SP 800-53 SA-10",
"AICPA TSC A1.3",
"AICPA TSC CC1.4",
"AICPA TSC CC5.3",
"AICPA TSC CC6.2",
"AICPA TSC CC7.1",
"AICPA TSC CC7.3",
"AICPA TSC CC7.4",
"ISO 27001:2013 A.12.1.2",
"ISO 27001:2013 A.12.5.1",
"ISO 27001:2013 A.12.6.2",
"ISO 27001:2013 A.14.2.2",
"ISO 27001:2013 A.14.2.3",
"ISO 27001:2013 A.14.2.4",
],
},
"Workflow": {"Status": "NEW"},
"RecordState": "ACTIVE",
}
yield finding
else:
print(e)
| 11,127
|
def import_sensitivities(input, file_location):
"""
Ratio is the C/O starting gas ratio
file_location is the LSR C and O binding energy, false to load the base case
"""
tol, ratio = input
try:
data = pd.read_csv(file_location + '/all-sensitivities/' + tol + '{:.1f}RxnSensitivity.csv'.format(ratio))
data = data.values
data = data.tolist()
return data
except:
print('Cannot find ' + file_location + '/all-sensitivities/' + tol + '{:.1f}RxnSensitivity.csv'.format(ratio))
| 11,128
|
def extract_files_to_process(options, company_file):
"""Extract the files from the ENER zip file and the ITR/DFP inside of it,
and collect all the XML files
"""
force_download = options.get("force_download", False)
local_base_path = _doc_local_base_path(options, company_file)
# Make sure the file is in the local cache
local_file = "{0}/{1}". \
format(local_base_path, company_file.file_name)
if not exists(options, local_file):
copy_file(options, company_file.file_url, local_file)
working_local_base_path = \
_doc_local_working_base_path(options, company_file)
file_to_export = "{0}/{1}".format(local_base_path, company_file.file_name)
if exists(options, working_local_base_path):
if force_download:
# Clean the folder of the company file (working folder)
delete_all(options, working_local_base_path)
files_ref = extract_zip(
options, file_to_export, working_local_base_path)
else:
files_ref = listdir(options, working_local_base_path)
# If the folder is empty
if not files_ref:
mkdirs(options, working_local_base_path)
files_ref = extract_zip(
options, file_to_export, working_local_base_path)
else:
mkdirs(options, working_local_base_path)
files_ref = extract_zip(
options, file_to_export, working_local_base_path)
available_files = {}
if company_file.doc_type in ["ITR", "DFP"]:
for the_file in files_ref:
if re.match(RE_FILE_BY_XML, the_file, re.IGNORECASE):
filename = ntpath.basename(the_file)
available_files[filename] = the_file
elif re.match(RE_FILE_BY_ITR, the_file, re.IGNORECASE):
itr_dest_folder = "{0}/itr_content/".\
format(working_local_base_path)
itr_files = extract_zip(options, the_file, itr_dest_folder)
for itr_file in itr_files:
filename = ntpath.basename(itr_file)
available_files["itr/{}".format(filename)] = itr_file
# Once unzipped, we can delete the original file from the
elif re.match(RE_FILE_BY_DFP, the_file, re.IGNORECASE):
dfp_dest_folder = "{0}/dfp_content/".\
format(working_local_base_path)
dfp_files = extract_zip(options, the_file, dfp_dest_folder)
for dfp_file in dfp_files:
filename = ntpath.basename(dfp_file)
available_files["dfp/{}".format(filename)] = dfp_file
return available_files
| 11,129
|
def test():
"""
Set the application name
"""
# get the library
import journal
# make up a name
name = "app"
# register it
journal.application(name)
# get the chronicler's notes
notes = journal.chronicler.notes
# verify that the key is registered and has the correct value
assert notes["application"] == name
# all done
return
| 11,130
|
def pg_dump(dsn, output):
"""
Сохраняет схему БД в файл
:param dsn: Строка подключения. Например: username@localhost:5432/dname
:param output: Имя файла для сохранения DDL
:type dsn: str
:type output: str
"""
host, port, user, pwd, dbname, socket = parse_dsn(dsn)
args = [
autodetect_pg_dump_path(),
"-h",
socket or host,
"-p",
str(port),
"-U",
user,
"-d",
dbname,
"--schema-only",
"--no-owner",
"--no-privileges",
"--no-tablespaces",
"--no-unlogged-table-data",
"-F",
"p",
"-f",
output,
]
env = os.environ.copy()
if pwd:
env["PGPASSWORD"] = pwd
else:
args.append("--no-password")
return shell(args, env)
| 11,131
|
def test__additive_hash(ht):
"""Test _addive_hash method work properly."""
assert ht._additive_hash('wriggle') == 53
| 11,132
|
def merge(source, dest):
""" Copy all properties and relations from one entity onto another, then
mark the source entity as an ID alias for the destionation entity. """
if source.id == dest.id:
return source
if dest.same_as == source.id:
return source
if source.same_as == dest.id:
return dest
if dest.same_as is not None:
# potential infinite recursion here.
canonical = Entity.by_id(dest.same_as)
if canonical is not None:
return merge(source, canonical)
if dest.schema.is_parent(source.schema):
dest.schema = source.schema
dest_valid = [a.name for a in dest.schema.attributes]
dest_active = [p.name for p in dest.active_properties]
for prop in source.properties:
prop.entity = dest
if prop.name in dest_active:
prop.active = False
if prop.name not in dest_valid:
properties_logic.delete(prop)
for rel in source.inbound:
rel.target = dest
db.session.add(rel)
for rel in source.outbound:
rel.source = dest
db.session.add(rel)
source.same_as = dest.id
db.session.flush()
_entity_changed.delay(dest.id, 'update')
_entity_changed.delay(source.id, 'delete')
return dest
| 11,133
|
def create_summary_text(summary):
"""
format a dictionary so it can be printed to screen or written to a plain
text file
Args:
summary(dict): the data to format
Returns:
textsummary(str): the summary dict formatted as a string
"""
summaryjson = json.dumps(summary, indent=3)
textsummary = re.sub('[{},"]', '', summaryjson)
return textsummary
| 11,134
|
def test_write_to_stats_with_no_parsed_data():
"""It should not call stats when parsing the data returned None."""
loader = ItemLoader()
loader.stats = mock.Mock()
parsed_data = None
expected_stat_key = "parser/ItemLoader/field_name/css/0/missing"
assert loader.write_to_stats("field_name", parsed_data, 0, "css") == None
loader.stats.inc_value.assert_called_once_with(expected_stat_key)
| 11,135
|
def random_datetime(start, end):
"""Generate a random datetime between `start` and `end`"""
return start + datetime.timedelta(
# Get a random amount of seconds between `start` and `end`
seconds=random.randint(0, int((end - start).total_seconds())),
)
| 11,136
|
def maximo_basico(a: float, b: float) -> float:
"""Toma dos números y devuelve el mayor.
Restricción: No utilizar la función max"""
if a > b:
return a
return b
| 11,137
|
def listas_mesmo_tamanho(lista_de_listas):
"""
Recebe uma lista de listas e retorna 'True' caso todas as listas
sejam de mesmo tamanho e 'False', caso contrário
"""
tamanho_padrao = len(lista_de_listas[0])
for lista in lista_de_listas:
if(len(lista) != tamanho_padrao):
return False
return True
| 11,138
|
def load_performance_win_x64_win_x64_vs2017_settings(conf):
"""
Setup all compiler and linker settings shared over all win_x64_win_x64_v140 configurations for
the 'performance' configuration
"""
v = conf.env
conf.load_win_x64_win_x64_vs2017_common_settings()
# Load additional shared settings
conf.load_performance_cryengine_settings()
conf.load_performance_msvc_settings()
conf.load_performance_windows_settings()
| 11,139
|
def bam2ec(bam_file, ec_file, chunks, directory, mincount, multisample, number_processes, rangefile, sample, targets, verbose):
"""
Convert a BAM file (bam_file) to a binary EC file (ec_file)
"""
utils.configure_logging(verbose)
if multisample:
if sample:
print('-s, --sample should NOT be specified with --multisample')
return
methods.bam2ec_multisample(bam_file, ec_file, chunks, mincount, directory, number_processes, rangefile, targets)
else:
methods.bam2ec(bam_file, ec_file, chunks, directory, number_processes, rangefile, sample, targets)
| 11,140
|
def compute_all_mordred_descrs(mols, max_cpus=None, quiet=True):
"""
Compute all Mordred descriptors, including 3D ones
Args:
mols: List of RDKit mol objects for molecules to compute descriptors for.
max_cpus: Max number of cores to use for computing descriptors. None means use all available cores.
quiet: If True, avoid displaying progress indicators for computations.
Returns:
res_df: DataFrame containing Mordred descriptors for molecules.
"""
calc = get_mordred_calculator(ignore_3D=False)
log.debug("Computing Mordred descriptors")
res_df = calc.pandas(mols, quiet=quiet, nproc=max_cpus)
log.debug("Done computing Mordred descriptors")
res_df = res_df.fill_missing().applymap(float)
return res_df
| 11,141
|
def self_quarantine_policy_40():
"""
Real Name: b'self quarantine policy 40'
Original Eqn: b'1-PULSE(self quarantine start 40, self quarantine end 40-self quarantine start 40)*self quarantine effectiveness 40'
Units: b'dmnl'
Limits: (None, None)
Type: component
b''
"""
return 1 - functions.pulse(__data['time'], self_quarantine_start_40(),
self_quarantine_end_40() -
self_quarantine_start_40()) * self_quarantine_effectiveness_40()
| 11,142
|
def fetch_mate_variant_record(vcfhandle, chr_mate, pos_mate, mateid, count=0, slop=50):
"""
We fetch the MateID variant Record for the breakend being process
:param vcfhandle:
:param chr_mate:
:param pos_mate:
:param mateid: must be a string and not a tuple
:param count: Normally the mate_record is found really quickly after first try because the mate_record is at expected pos_mate position; in some case not so
we have to expand the search (hence the slop value defined below); we limit the expansion to three tries, after that we search the whole contig;
It is defined here to be used recurrently with the method
:param slop: this slop differs in purpose from the user given slop; the point here is to find the mate_record as fast as possible
within the vcf record so this slop here is just a region size to lookup for the vcf record
:return: a Unique VariantRecord (it must be one and one only) otherwise None or raise Error
"""
res_fetch = vcfhandle.fetch(contig=str(chr_mate), start=(int(pos_mate) - slop) - 1, end=int(pos_mate) + slop)
total_items_found = sum(1 for v in res_fetch)
logger.debug("search region: {}:{}-{}".format(str(chr_mate), str((int(pos_mate) - slop) - 1), str(int(pos_mate) + slop)))
logger.debug("res_fetch ==> " + str(res_fetch))
logger.debug("total_items_found ==> " + str(total_items_found))
if count < 3:
res_fetch = vcfhandle.fetch(contig=str(chr_mate), start=(int(pos_mate) - slop) - 1, end=int(pos_mate) + slop)
else:
res_fetch = vcfhandle.fetch(contig=str(chr_mate))
try:
if total_items_found >= 1:
rec_found = None
## we check that the mate id is present in the search result
for rec in res_fetch:
logger.debug("mate rec captured by res_fetch ==> " + str(rec))
logger.debug(str(rec.chrom) + ":" + str(rec.pos))
if 'MATEID' not in rec.info.keys():
# if we increase the slop, we might found records that are not BND breakends and therefore no MATEID is present in the INFO field
continue
logger.debug("mateid we want ot find in captured/fetched records: " + str(mateid))
# NOTE: rec.info['MATEID'] returns a tuple such as: ('MantaBND:2:254200:254201:0:0:0:1',)
if str(rec.id) == str(mateid):
logger.debug("yeah mate id found ... returning rec --> " + str(rec))
rec_found = rec
break
if rec_found is None:
count += 1
logger.debug("rec is still none")
logger.info("broadening the search by increasing the slop around pos_mate b/c the value in pos_mate might not be equivalent to the value in pos_alt: loop_" + str(count))
return fetch_mate_variant_record(vcfhandle, chr_mate, pos_mate, mateid, count=count, slop=1000 + slop)
else:
return rec_found
else:
count += 1
logger.info("broadening the search by increasing the slop around pos_mate b/c the value in pos_mate might not be equivalent to the value in pos_alt: loop_" + str(count))
return fetch_mate_variant_record(vcfhandle, chr_mate, pos_mate, mateid, count=count, slop=1000 + slop)
except Exception as e:
logger.error(e)
logger.error("ERROR: MATE NOT FOUND; Check your VCF input to see if the id << " + str(mateid) + " >> exists.")
exit(2)
| 11,143
|
def knapsack_bqm(cities, values, weights, total_capacity, value_r=0, weight_r=0):
"""
build the knapsack binary quadratic model
From DWave Knapsack examples
Originally from Andrew Lucas, NP-hard combinatorial problems as Ising spin glasses
Workshop on Classical and Quantum Optimization; ETH Zuerich - August 20, 2014
based on Lucas, Frontiers in Physics _2, 5 (2014)
See # Q-Alpha version for original introduction of value_r and weight_r
value_r: the proportion of value contributed from the objects outside of the knapsack.
For the standard knapsack problem this is 0,
but in the case of GDP a closed city retains some % of GDP value;
or for health problems it may contribute negative value (-1).
weight_r: the proportion of weight contributed from the objects outside of the knapsack.
For the standard knapsack problem this is 0,
but in the case of sick people we might consider that a closed city
retains some % of its sick people over time;
or for health problems it may contribute negative value (-1)
"""
# Initialize BQM - use large-capacity BQM so that the problem can be
# scaled by the user.
bqm = dimod.AdjVectorBQM(dimod.Vartype.BINARY)
# Lagrangian multiplier
# First guess as suggested in Lucas's paper
lagrange = max(values)
# Number of objects
x_size = len(values)
# Lucas's algorithm introduces additional slack variables to handle
# the inequality. max_y_index indicates the maximum index in the y
# sum; hence the number of slack variables.
max_y_index = ceil(log(total_capacity))
# Slack variable list for Lucas's algorithm. The last variable has
# a special value because it terminates the sequence.
y = [2**n for n in range(max_y_index - 1)]
y.append(total_capacity + 1 - 2**(max_y_index - 1))
# Q-Alpha - calculate the extra constant in second part of problem hamiltonian
C = sum([weight * weight_r for weight in weights])
# Q-Alpha - change weights to weight*(1-weight_r)
weights = [weight*(1-weight_r) for weight in weights]
# Q-Alpha - change values to value*(1-value_r)
values = [value*(1-value_r) for value in values]
# Hamiltonian xi-xi terms
for k in range(x_size):
# Q-Alpha add final term lagrange * C * weights[k]
bqm.set_linear(
cities[k],
lagrange * (weights[k] ** 2) - values[k] + lagrange * C * weights[k])
# Hamiltonian xi-xj terms
for i in range(x_size):
for j in range(i + 1, x_size):
key = (cities[i], cities[j])
bqm.quadratic[key] = 2 * lagrange * weights[i] * weights[j]
# Hamiltonian y-y terms
for k in range(max_y_index):
# Q-Alpha add final term -lagrange * C * y[k]
bqm.set_linear('y' + str(k), lagrange *
(y[k]**2) - lagrange * C * y[k])
# Hamiltonian yi-yj terms
for i in range(max_y_index):
for j in range(i + 1, max_y_index):
key = ('y' + str(i), 'y' + str(j))
bqm.quadratic[key] = 2 * lagrange * y[i] * y[j]
# Hamiltonian x-y terms
for i in range(x_size):
for j in range(max_y_index):
key = (cities[i], 'y' + str(j))
bqm.quadratic[key] = -2 * lagrange * weights[i] * y[j]
return bqm
| 11,144
|
def test_build_from_args_no_hit(config_file, random_dt, script_path, new_config):
"""Try building experiment when not in db"""
cmdargs = {
"name": "supernaekei",
"config": config_file,
"user_args": [script_path, "x~uniform(0,10)"],
}
with OrionState(experiments=[], trials=[]):
with pytest.raises(NoConfigurationError) as exc_info:
experiment_builder.get_from_args(cmdargs)
assert "No experiment with given name 'supernaekei' and version '*'" in str(
exc_info.value
)
exp = experiment_builder.build_from_args(cmdargs)
assert exp.name == cmdargs["name"]
assert exp.configuration["refers"] == {
"adapter": [],
"parent_id": None,
"root_id": exp._id,
}
assert exp.metadata["datetime"] == random_dt
assert exp.metadata["user"] == "dendi"
assert exp.metadata["user_script"] == cmdargs["user_args"][0]
assert exp.metadata["user_args"] == cmdargs["user_args"]
assert exp.max_trials == 100
assert exp.max_broken == 5
assert exp.algorithms.configuration == {"random": {"seed": None}}
| 11,145
|
def nltk_punkt_de(data: List[str], model=None) -> List[str]:
"""Sentence Segmentation (SBD) with NLTK's Punct Tokenizer
Parameters:
-----------
data : List[str]
list of N documents as strings. Each document is then segmented
into sentences.
model (Default: None)
Preloaded instance of the NLP model. See nlptasks.sbd.get_model
Returns:
--------
List[str]
list of M sentences as strings. Pls note that the information
about the relationship to the document is lost.
Example:
--------
import nlptasks as nt
import nlptasks.sbd
docs = ["Die Kuh ist bunt. Die Bäuerin mäht die Wiese."]
sents = nt.sbd.nltk_punkt_de(docs)
Help:
-----
- https://www.nltk.org/api/nltk.tokenize.html#module-nltk.tokenize.punkt
"""
# SBD
sentences = []
for rawstr in data:
sents = nltk.tokenize.sent_tokenize(rawstr, language="german")
sentences.extend(sents)
# done
return sentences
| 11,146
|
def usage():
"""Prints usage to the screen"""
print """
-------------------------------------------------------------------------------
Author: Kyle Hernandez <khernandez@bsd.uchicago.edu>
Description: Gets the top 90th percentile high-quality variants from a VCF file
and outputs them to a new VCF file. This method is used for the base quality
recalibration methods
Notes:
- Only processes loci with 'PASS' in the FILTER column
- Only processes loci where at least one sample has a SNP GT > 90
Usage:
GetHighQualSNPs.py <input.vcf> <output.vcf>
-------------------------------------------------------------------------------
"""
| 11,147
|
def is_pattern_error(exception: TypeError) -> bool:
"""Detect whether the input exception was caused by invalid type passed to `re.search`."""
# This is intentionally simplistic and do not involve any traceback analysis
return str(exception) == "expected string or bytes-like object"
| 11,148
|
def execute_query(bq_client: bigquery.Client,
env_vars: Dict[str, Union[str, bool]],
query_path: object,
output_table_name: str,
time_partition: bool) -> None:
"""Executes transformation query to a new destination table.
Args:
bq_client: bigquery.Client object
env_vars: Dictionary of key: value, where value is environment variable
query_path: Object representing location of SQL query to execute
output_table_name: String representing name of table that holds output
time_partition: Boolean indicating whether to time-partition output
"""
dataset_ref = bq_client.get_dataset(bigquery.DatasetReference(
project=bq_client.project,
dataset_id=env_vars['corrected_dataset_id']))
table_ref = dataset_ref.table(output_table_name)
job_config = bigquery.QueryJobConfig()
job_config.destination = table_ref
job_config.write_disposition = bigquery.WriteDisposition().WRITE_TRUNCATE
# Time Partitioning table is only needed for final output query
if time_partition:
job_config.time_partitioning = bigquery.TimePartitioning(
type_=bigquery.TimePartitioningType.DAY,
expiration_ms=None)
sql = query_path.query
sql = sql.format(**env_vars)
logging.info('Attempting query...')
# Execute Query
query_job = bq_client.query(
query=sql,
job_config=job_config)
query_job.result()
| 11,149
|
def textrank(articles, encoders, reduction_methods, reduction_methods_params):
"""
Description: Similarity between any two sentences is used as an equivalent to the web page transition probability
"""
for enc_name, enc_model in encoders.items():
# load sentence encoder
print(enc_name)
sentence_encoder = enc_model()
for reduction_method_name in reduction_methods.keys():
# load reduction model
print(reduction_method_name)
# fetch files
for file in tqdm(os.scandir(articles)):
with open(file.path) as f:
json_file = json.load(f)
article, ids = [], []
for comment_id, sents in json_file.items():
for s in sents:
article.append(s)
ids.append(comment_id)
assert len(article) == len(ids)
# Compute sentence embeddings
embeddings = sentence_encoder.encode_sentences(article)
# Select dim reduction method
if reduction_method_name != 'None':
red_method = reduction_methods[reduction_method_name](**reduction_methods_params[reduction_method_name])
embeddings = red_method.fit_transform(embeddings)
# similarity matrix
sim_mat = cosine_similarity(embeddings)
np.fill_diagonal(sim_mat, 0)
# rescale cosine score to [0, 1]
scaler = MinMaxScaler(feature_range=(0, 1))
sim_mat = scaler.fit_transform(sim_mat.flatten().reshape(-1, 1)).reshape(len(embeddings),
len(embeddings))
np.fill_diagonal(sim_mat, 0)
# calculate pagerank, monitor convergence
try:
nx_graph = nx.from_numpy_array(sim_mat)
scores = nx.pagerank(nx_graph, alpha=0.85, max_iter=500)
# get all scores for each comment
d = defaultdict(list)
for idx, (s , comment_id) in enumerate(zip(article, ids)):
d[comment_id].append(scores[idx])
# evaluate comment based on a function: max, mean, ...
scored_comment = {}
for key, value in d.items():
scored_comment[key] = (max(value), np.argmax(value))
# save into df with columns: article_id, comment_id, text, score
df = defaultdict(list)
for comment_id, score in scored_comment.items():
score, best_sentence_id = score
df['comment_id'].append(comment_id)
text = [f'<best_sentence> {s} </best_sentence>' if i == best_sentence_id else s for i, s in enumerate(json_file[comment_id])]
# get best_sentence
for sent in text:
if '<best_sentence>' in sent:
best_sentence = sent[16:-17]
df['best_sentence'].append(best_sentence)
break
df['text'].append(' '.join(text))
df['score'].append(score)
df['article_id'].append(int(file.name.split('.')[0]))
df = pd.DataFrame(df)
df = df[['article_id', 'comment_id', 'text', 'best_sentence', 'score']]
df = df.round(5)
# # get golden summary, append it to the end of the row
# file_id = int(file.name.split('.')[0])
# with open(f'data/kristina/cro_summaries/reference/vecernji_{file_id}.tgt') as g:
# gold = ' '.join([line.strip() for line in g])
#
# df.loc[-1] = [int(file.name.split('.')[0]), 'gold', gold, 1]
if not os.path.isfile('output/comments.csv'):
df.to_csv('output/comments.csv', mode='w', header=True, index=False)
else:
df.to_csv('output/comments.csv', mode='a', header=False, index=False)
except nx.exception.PowerIterationFailedConvergence:
print(f'File {file.name} did not converge')
| 11,150
|
def redirect(reluri):
""" Instruct the client to redirect to the supplied relative URI
@param reluri: relative URI to redirect to
"""
raise HTTPRedirect(base_uri() + '/' + reluri)
| 11,151
|
def iwave_modes_banded(N2, dz, k=None):
"""
!!! DOES NOT WORK!!!
Calculates the eigenvalues and eigenfunctions to the internal wave eigenvalue problem:
$$
\left[ \frac{d^2}{dz^2} - \frac{1}{c_0} \bar{\rho}_z \right] \phi = 0
$$
with boundary conditions
"""
nz = N2.shape[0] # Remove the surface values
if k is None:
k = nz-2
dz2 = 1/dz**2
# Construct the LHS matrix, A
A = np.vstack([-1*dz2*np.ones((nz,)),\
2*dz2*np.ones((nz,)),\
-1*dz2*np.ones((nz,)),\
])
# BC's
#A[0,0] = -1.
#A[0,1] = 0.
#A[-1,-1] = -1.
#A[-1,-2] = 0.
A[1,0] = -1.
A[2,0] = 0.
A[1,-1] = -1.
A[0,-1] = 0.
# Now convert from a generalized eigenvalue problem to
# A.v = lambda.B.v
# a standard problem
# A.v = lambda.v
# By multiply the LHS by inverse of B
# (B^-1.A).v = lambda.v
# B^-1 = 1/N2 since B is diagonal
A[0,:] /= N2
A[1,:] /= N2
A[2,:] /= N2
w, phi = linalg.eig_banded(A)
pdb.set_trace()
## Main diagonal
#dd = 2*dz2*np.ones((nz,))
#dd /= N2
#dd[0] = -1
#dd[-1] = -1
## Off diagonal
#ee = -1*dz2*np.ones((nz-1,))
#ee /= N2[0:-1]
#ee[0] = 0
#ee[-1] = 0
## Solve... (use scipy not numpy)
#w, phi = linalg.eigh_tridiagonal(dd, ee )
#####
c = 1. / np.power(w, 0.5) # since term is ... + N^2/c^2 \phi
# Sort by the eigenvalues
idx = np.argsort(c)[::-1] # descending order
## Calculate the actual phase speed
cn = np.real( c[idx] )
idxgood = ~np.isnan(cn)
phisort = phi[:,idx]
return np.real(phisort[:,idxgood]), np.real(cn[idxgood])
| 11,152
|
def parse_fastq_pf_flag(records):
"""Take a fastq filename split on _ and look for the pass-filter flag
"""
if len(records) < 8:
pf = None
else:
fastq_type = records[-1].lower()
if fastq_type.startswith('pass'):
pf = True
elif fastq_type.startswith('nopass'):
pf = False
elif fastq_type.startswith('all'):
pf = None
else:
raise ValueError("Unrecognized fastq name: %s" % (
"_".join(records),))
return pf
| 11,153
|
def test_process_reverse_polarity():
"""algorithm_test.AdjustedAlgorithm_test.test_process()
Check adjusted data processing versus files generated from
original script. Tests reverse polarity martix.
"""
# load adjusted data transform matrix and pier correction
a = adj(
statefile="etc/adjusted/adjbou_state_HE_.json",
inchannels=["H", "E"],
outchannels=["H", "E"],
)
# load boulder May 20 files from /etc/ directory
with open("etc/adjusted/BOU202005vmin.min") as f:
raw = i2.IAGA2002Factory().parse_string(f.read())
with open("etc/adjusted/BOU202005adj.min") as f:
expected = i2.IAGA2002Factory().parse_string(f.read())
# process he(raw) channels with loaded transform
adjusted = a.process(raw)
# compare channels from adjusted and expected streams
assert_almost_equal(
actual=adjusted.select(channel="H")[0].data,
desired=expected.select(channel="H")[0].data,
decimal=2,
)
assert_almost_equal(
actual=adjusted.select(channel="E")[0].data,
desired=expected.select(channel="E")[0].data,
decimal=2,
)
| 11,154
|
def rule_manager():
""" Pytest fixture for generating rule manager instance """
ignore_filter = IgnoreFilter(None, verbose=False)
return RuleManager(None, ignore_filter, verbose=False)
| 11,155
|
def zeros(fn, arr, *args):
"""
Find where a function crosses 0. Returns the zeroes of the function.
Parameters
----------
fn : function
arr : array of arguments for function
*args : any other arguments the function may have
"""
# the reduced function, with only the argument to be solved for (all other arguments fixed):
def fn_reduced(array): return fn(array, *args)
# the array of values of the function:
fn_arr = fn_reduced(arr)
# looking where the function changes sign...
sign_change_arr = np.where(np.logical_or((fn_arr[:-1] < 0.) * (fn_arr[1:] > 0.),
(fn_arr[:-1] > 0.) * (fn_arr[1:] < 0.))
)[0]
# or, just in case, where it is exactly 0!
exact_zeros_arr = np.where(fn_arr == 0.)[0]
# defining the array of 0-crossings:
cross_arr = []
# first, interpolating between the sign changes
if len(sign_change_arr) > 0:
for i in range(len(sign_change_arr)):
cross_arr.append(
brentq(fn_reduced, arr[sign_change_arr[i]],
arr[sign_change_arr[i] + 1])
)
# and then adding those places where it is exactly 0
if len(exact_zeros_arr) > 0:
for i in range(len(exact_zeros_arr)):
cross_arr.append(arr[exact_zeros_arr[i]])
# sorting the crossings in increasing order:
cross_arr = np.sort(np.array(cross_arr))
return cross_arr
| 11,156
|
def create_low_latency_conv_model(fingerprint_input, model_settings,
is_training):
"""Builds a convolutional model with low compute requirements.
This is roughly the network labeled as 'cnn-one-fstride4' in the
'Convolutional Neural Networks for Small-footprint Keyword Spotting' paper:
http://www.isca-speech.org/archive/interspeech_2015/papers/i15_1478.pdf
Here's the layout of the graph:
(fingerprint_input)
v
[Conv2D]<-(weights)
v
[BiasAdd]<-(bias)
v
[Relu]
v
[MatMul]<-(weights)
v
[BiasAdd]<-(bias)
v
[MatMul]<-(weights)
v
[BiasAdd]<-(bias)
v
[MatMul]<-(weights)
v
[BiasAdd]<-(bias)
v
This produces slightly lower quality results than the 'conv' model, but needs
fewer weight parameters and computations.
During training, dropout nodes are introduced after the relu, controlled by a
placeholder.
Args:
fingerprint_input: TensorFlow node that will output audio feature vectors.
model_settings: Dictionary of information about the model.
is_training: Whether the model is going to be used for training.
Returns:
TensorFlow node outputting logits results, and optionally a dropout
placeholder.
"""
if is_training:
dropout_prob = tf.placeholder(tf.float32, name='dropout_prob')
input_frequency_size = model_settings['dct_coefficient_count']
input_time_size = model_settings['spectrogram_length']
fingerprint_4d = tf.reshape(fingerprint_input,
[-1, input_time_size, input_frequency_size, 1])
first_filter_width = 8
first_filter_height = input_time_size
first_filter_count = 186
first_filter_stride_x = 1
first_filter_stride_y = 4
first_weights = tf.Variable(
tf.truncated_normal(
[first_filter_height, first_filter_width, 1, first_filter_count],
stddev=0.01))
first_bias = tf.Variable(tf.zeros([first_filter_count]))
first_conv = tf.nn.conv2d(fingerprint_4d, first_weights, [
1, first_filter_stride_y, first_filter_stride_x, 1
], 'VALID') + first_bias
first_relu = tf.nn.relu(first_conv)
if is_training:
first_dropout = tf.nn.dropout(first_relu, dropout_prob)
else:
first_dropout = first_relu
first_conv_output_width = math.floor(
(input_frequency_size - first_filter_width + first_filter_stride_x) /
first_filter_stride_x)
first_conv_output_height = math.floor(
(input_time_size - first_filter_height + first_filter_stride_y) /
first_filter_stride_y)
first_conv_element_count = int(
first_conv_output_width * first_conv_output_height * first_filter_count)
flattened_first_conv = tf.reshape(first_dropout,
[-1, first_conv_element_count])
first_fc_output_channels = 128
first_fc_weights = tf.Variable(
tf.truncated_normal(
[first_conv_element_count, first_fc_output_channels], stddev=0.01))
first_fc_bias = tf.Variable(tf.zeros([first_fc_output_channels]))
first_fc = tf.matmul(flattened_first_conv, first_fc_weights) + first_fc_bias
if is_training:
second_fc_input = tf.nn.dropout(first_fc, dropout_prob)
else:
second_fc_input = first_fc
second_fc_output_channels = 128
second_fc_weights = tf.Variable(
tf.truncated_normal(
[first_fc_output_channels, second_fc_output_channels], stddev=0.01))
second_fc_bias = tf.Variable(tf.zeros([second_fc_output_channels]))
second_fc = tf.matmul(second_fc_input, second_fc_weights) + second_fc_bias
if is_training:
final_fc_input = tf.nn.dropout(second_fc, dropout_prob)
else:
final_fc_input = second_fc
label_count = model_settings['label_count']
final_fc_weights = tf.Variable(
tf.truncated_normal(
[second_fc_output_channels, label_count], stddev=0.01))
final_fc_bias = tf.Variable(tf.zeros([label_count]))
final_fc = tf.matmul(final_fc_input, final_fc_weights) + final_fc_bias
if is_training:
return final_fc, dropout_prob
else:
return final_fc
| 11,157
|
def get_args(description: str = "YouTube") -> argparse.Namespace:
"""
Retrieve parsed arguments as a Namespace.
Parameters
----------
description : str
Description given to ArgumentParser.
Returns
-------
args : argparse.Namespace
Namespace with arguments specified.
"""
parser = argparse.ArgumentParser(description)
# Hyperparameters for pretraining embedding
# TODO Find correct lambda value from paper
parser.add_argument(
"--cmc-lambda",
action="store",
dest="CMC_LAMBDA",
default=1,
type=float,
help="Weight for combining TDC and CMC loss. Defaults to 1.",
)
parser.add_argument(
"--lr",
action="store",
dest="LR",
default=1e-4,
type=float,
help="Learning rate for Adam optimizer. Defaults to 1e-4.",
)
parser.add_argument(
"--batch-size",
action="store",
dest="BATCH_SIZE",
default=32,
type=int,
help="Batch size for pretraining embedding. Defaults to 32.",
)
parser.add_argument(
"--nb-steps",
action="store",
dest="NB_STEPS",
default=200000,
type=int,
help="Number of training steps for embedding. Defaults to 200000.",
)
# Misc. arguments for pretraining embedding
parser.add_argument(
"--save-interval",
action="store",
dest="SAVE_INTERVAL",
default=10000,
type=int,
help="Interval for saving models during pretraining. Defaults to 10000.",
)
parser.add_argument(
"--tsne-interval",
action="store",
dest="TSNE_INTERVAL",
default=1000,
type=int,
help="Interval for plotting t-SNE during pretraining. Defaults to 1000.",
)
# Hyperparameters for training DQN agent
parser.add_argument(
"--ckpt-freq",
action="store",
dest="CKPT_FREQ",
default=16,
type=int,
help="Frequency of checkpoints (N) selected from embedding. Defaults to 16.",
)
parser.add_argument(
"--ckpt-horizon",
action="store",
dest="CKPT_HORIZON",
default=1,
type=int,
help=" Horizon(Δt) for checkpoints. Defaults to 1.",
)
parser.add_argument(
"--imitation-cutoff",
action="store",
dest="IMITATION_CUTOFF",
default=0.5,
type=float,
help="Cutoff (α) for giving imitation reward. Defaults to 0.5.",
)
args = parser.parse_args()
return args
| 11,158
|
def _DropEmptyPathSegments(path):
"""Removes empty segments from the end of path.
Args:
path: A filesystem path.
Returns:
path with trailing empty segments removed. Eg /duck/// => /duck.
"""
while True:
(head, tail) = os.path.split(path)
if tail:
break
path = head
return path
| 11,159
|
def _format_author(url, full_name):
""" Helper function to make author link """
return u"<a class='more-info' href='%s'>%s</a>" % (url, full_name)
| 11,160
|
def all_series(request: HttpRequest) -> JsonResponse:
"""
View that serves all the series in a JSON array.
:param request: The original request.
:return: A JSON-formatted response with the series.
"""
return JsonResponse([
_series_response(request, s)
for s in get_response(request)
], safe=False)
| 11,161
|
def test_foci():
"""Test plotting of foci
"""
mlab.options.backend = 'test'
brain = Brain(*std_args)
coords = [[-36, 18, -3],
[-43, 25, 24],
[-48, 26, -2]]
brain.add_foci(coords, map_surface="white", color="gold")
annot_path = pjoin(subj_dir, subject_id, 'label', 'lh.aparc.a2009s.annot')
ids, ctab, names = nib.freesurfer.read_annot(annot_path)
verts = np.arange(0, len(ids))
coords = np.random.permutation(verts[ids == 74])[:10]
scale_factor = 0.7
brain.add_foci(coords, coords_as_verts=True,
scale_factor=scale_factor, color="#A52A2A")
brain.close()
| 11,162
|
def stlx_powerset(s):
"""If s is a set, the expression pow(s) computes the power set of s. The power set of s is
defined as the set of all subsets of s."""
def powerset_generator(i):
for subset in it.chain.from_iterable(it.combinations(i, r) for r in range(len(i)+1)):
yield set(subset)
return SetlxSet(SetlxSet(z) for z in powerset_generator(s))
| 11,163
|
def get_entries_configuration(data):
"""Given the dictionary of resources, returns the generated factory xml file
Args:
data (dict): A dictionary similar to the one returned by ``get_information``
Returns:
str: The factory xml file as a string
"""
entries_configuration = ""
for _, site_information in sorted(data.items()):
for celem, ce_information in sorted(site_information.items()):
for _, q_information in sorted(ce_information.items()):
for entry, entry_information in sorted(q_information.items()):
entry_configuration = copy.deepcopy(entry_information)
entry_configuration["entry_name"] = entry
# Can we get these information (next key)?
entry_configuration["attrs"]["GLIDEIN_REQUIRED_OS"] = {
"comment": "This value has been hardcoded",
"value": "any",
}
# Probably we can use port from attribute AddressV1 or CollectorHost
entry_configuration["gatekeeper"] = celem + " " + celem + ":9619"
entry_configuration["rsl"] = ""
entry_configuration["attrs"] = get_attr_str(entry_configuration["attrs"])
if "submit_attrs" in entry_configuration:
entry_configuration["submit_attrs"] = get_submit_attr_str(entry_configuration["submit_attrs"])
else:
entry_configuration["submit_attrs"] = ""
entry_configuration["limits"] = get_limits_str(entry_configuration["limits"])
entry_configuration["submission_speed"] = get_submission_speed(
entry_configuration["submission_speed"]
)
entries_configuration += ENTRY_STUB % entry_configuration
return entries_configuration
| 11,164
|
def validinput(x0, xf, n):
"""Checks that the user input is valid.
Args:
x0 (float): Start value
xf (float): End values
n (int): Number of sample points
Returns:
False if x0 > xf or if
True otherwise
"""
valid = True
if x0 > xf:
valid = False
if int(n) != n:
valid = False
if not valid:
print("Please recheck your input")
return valid
| 11,165
|
def multi_class_bss(predictions: np.ndarray, targets: np.ndarray) -> float:
"""
Brier Skill Score:
bss = 1 - bs / bs_{ref}
bs_{ref} will be computed for a model that makes a predictions according to the prevalance of each class in dataset
:param predictions: probability score. Expected Shape [N, C]
:param targets: target class (int) per sample. Expected Shape [N]
"""
# BS
bs = multi_class_bs(predictions, targets)
# no skill BS
no_skill_prediction = [(targets == target_cls).sum() / targets.shape[0] for target_cls in
range(predictions.shape[-1])]
no_skill_predictions = np.tile(np.array(no_skill_prediction), (predictions.shape[0], 1))
bs_ref = multi_class_bs(no_skill_predictions, targets)
return 1.0 - bs / bs_ref
| 11,166
|
def get_arxiv_id_or_ascl_id(result_record):
"""
:param result_record:
:return:
"""
identifiers = result_record.get("identifier", [])
for identifier in identifiers:
if "arXiv:" in identifier:
return identifier.replace("arXiv:", "")
if "ascl:" in identifier:
return identifier.replace("ascl:", "")
return ""
| 11,167
|
def get_query_claim_similarities(
sim: Mapping[Tuple[str, int], float],
softmax: bool,
) -> Mapping[Tuple[str, int], float]:
"""
Preprocess query claim similarities.
:param sim:
A mapping from (premise_id, claim_id) to the logits of the similarity model, shape: (2,).
:param softmax:
Whether to apply softmax or use raw logits.
:return:
A mapping from (premise_id, claim_id) to scalar similarity value.
"""
# ensure consistent order
pairs = sorted(sim.keys())
# create tensor,shape: (num_pairs, 2)
sim = torch.stack(
tensors=[
torch.as_tensor(data=sim[pair], dtype=torch.float32)
for pair in pairs
],
dim=0,
)
# apply softmax is requested
if softmax:
sim = sim.softmax(dim=-1)
# take probability of "similar" class
sim = sim[:, 1]
# one row corresponds to one pair similarity
return dict(zip(pairs, sim))
| 11,168
|
def regexp(options: dict):
"""
Apply a regexp method to the dataset
:param options: contains two values:
- find: which string should be find
- replace: string that will replace the find string
"""
def apply_regexp(dataset, tag):
"""
Apply a regexp to the dataset
"""
element = dataset.get(tag)
if element is not None:
element.value = re.sub(
options["find"], options["replace"], str(element.value)
)
return apply_regexp
| 11,169
|
def RunUnitTests():
"""Runs all registered unit tests."""
test_runner = module_test_runner.ModuleTestRunner()
test_runner.modules = [
document_test,
model_test,
ops_test,
robot_abstract_test,
util_test,
]
test_runner.RunAllTests()
| 11,170
|
def do_one_subject(sub_curr, params, verbose=False):
"""
launch sessions processing for sub_curr
parameters:
-----------
sub_curr: dict
contains subject base directory
contains subject index
params: dict
parameters for layout, data and analysis
"""
sub_idx, sub_dir = sub_curr['sub_idx'], sub_curr['sub_dir']
nb_sess = params['data']['nb_sess']
dlayo = params['layout']
sess_idx = range(1, nb_sess+1)
sess_dirs = [osp.join(sub_dir, (dlayo['dir']['sess+']).format(idx)) for idx in sess_idx]
sesss_info = {}
sess_curr = {}
for sess_idx, sess_dir in enumerate(sess_dirs, 1): # start idx at 1
sess_curr['sess_idx'] = sess_idx
sess_curr['sess_dir'] = sess_dir
sess_str = (dlayo['dir']['sess+']).format(sess_idx)
if verbose: print('\n' + '---'*11 + "\n" + sess_str)
sesss_info[sess_str] = do_one_sess(sess_curr, sub_curr, params, verbose=verbose)
return sesss_info
| 11,171
|
def write_to_excel(sheet_one_names, sheet_two_names, url, url2, destfile):
"""
Write to destination excel
"""
for i in sheet_one_names:
data = pd.read_excel(url, sheet_name=i)
with pd.ExcelWriter(destfile, engine="openpyxl", mode="a") as writer:
data.to_excel(writer, index=False, sheet_name=i)
for i in sheet_two_names:
data = pd.read_excel(url2, sheet_name=i)
with pd.ExcelWriter(destfile, engine="openpyxl", mode="a") as writer:
data.to_excel(writer, index=False, sheet_name=i)
# remove the extra Sheet added if exists while creating the destfile
if "Sheet" not in sheet_one_names and "Sheet" not in sheet_two_names:
workbook1 = openpyxl.load_workbook(destfile)
del workbook1["Sheet"]
workbook1.save(destfile)
| 11,172
|
def comp_skin_effect(self, freq, T_op=20, T_ref=20, type_skin_effect=1):
"""Compute the skin effect factor for the conductor
Parameters
----------
self : Conductor
an Conductor object
freq: float
electrical frequency [Hz]
T_op: float
Conductor operational temperature [degC]
T_ref: float
Conductor reference temperature [degC]
type_skin_effect: int
Model type for skin effect calculation:
- 1: analytical model (default)
Returns
----------
Xkr_skinS : float
skin effect coeff for resistance at freq
Xke_skinS : float
skin effect coeff for inductance at freq
"""
# initialization
Xkr_skinS = 1
Xke_skinS = 1
if type_skin_effect == 1: # analytical calculations based on Pyrhonen
sigmar = self.cond_mat.elec.get_conductivity(T_op=T_op, T_ref=T_ref)
mu0 = 4 * pi * 1e-7
ws = 2 * pi * freq
Slot = self.parent.parent.slot
# nsw = len(ws)
# case of preformed rectangular wire CondType11
if hasattr(self, "Wwire") and hasattr(self, "Hwire"):
Hwire = self.Hwire
Wwire = self.Wwire
Nwppc_rad = self.Nwppc_rad
Nwppc_tan = self.Nwppc_tan
# case of round wire CondType12 - approximation based on rectangular wire formula
elif hasattr(self, "Wwire") and not hasattr(self, "Hwire"):
Hwire = self.Wwire
Wwire = self.Wwire
Nwppc_tan = self.Nwppc
Nwppc_rad = self.Nwppc
# case of bar conductor
elif hasattr(self, "Hbar") and hasattr(self, "Wbar"):
Hwire = self.Hbar
Wwire = self.Wbar
Nwppc_tan = 1
Nwppc_rad = 1
Alpha_wind = Slot.comp_angle_active_eq()
R_wind = Slot.comp_radius_mid_active()
W2s = 2 * R_wind * sin(Alpha_wind)
# average resistance factor over the slot
ksi = Hwire * sqrt((1 / 2) * ws * mu0 * sigmar * Nwppc_tan * Wwire / W2s)
phi_skin = self.comp_phi_skin(ksi)
psi_skin = self.comp_psi_skin(ksi)
phip_skin = self.comp_phip_skin(ksi)
psip_skin = self.comp_psip_skin(ksi)
Xkr_skinS = phi_skin + ((Nwppc_rad ** 2 - 1) / 3) * psi_skin
Xke_skinS = (1 / Nwppc_rad ** 2) * phip_skin + (
1 - 1 / Nwppc_rad ** 2
) * psip_skin
return Xkr_skinS, Xke_skinS
| 11,173
|
def insert_point_into_G(G_, point, node_id=100000, max_distance_meters=5,
nearby_nodes_set=set([]), allow_renaming=True,
verbose=False, super_verbose=False):
"""
Insert a new node in the graph closest to the given point.
Notes
-----
If the point is too far from the graph, don't insert a node.
Assume all edges have a linestring geometry
http://toblerity.org/shapely/manual.html#object.simplify
Sometimes the point to insert will have the same coordinates as an
existing point. If allow_renaming == True, relabel the existing node.
convert linestring to multipoint?
https://github.com/Toblerity/Shapely/issues/190
TODO : Implement a version without renaming that tracks which node is
closest to the desired point.
Arguments
---------
G_ : networkx graph
Input networkx graph, with edges assumed to have a dictioary of
properties that includes the 'geometry' key.
point : shapely Point
Shapely point containing (x, y) coordinates
node_id : int
Unique identifier of node to insert. Defaults to ``100000``.
max_distance_meters : float
Maximum distance in meters between point and graph. Defaults to ``5``.
nearby_nodes_set : set
Set of possible edge endpoints to search. If nearby_nodes_set is not
empty, only edges with a node in this set will be checked (this can
greatly speed compuation on large graphs). If nearby_nodes_set is
empty, check all possible edges in the graph.
Defaults to ``set([])``.
allow_renameing : boolean
Switch to allow renaming of an existing node with node_id if the
existing node is closest to the point. Defaults to ``False``.
verbose : boolean
Switch to print relevant values to screen. Defaults to ``False``.
super_verbose : boolean
Switch to print mucho values to screen. Defaults to ``False``.
Returns
-------
G_, node_props, min_dist : tuple
G_ is the updated graph
node_props gives the properties of the inserted node
min_dist is the distance from the point to the graph
"""
# check if node_id already exists in G
# if node_id in set(G_.nodes()):
# print ("node_id:", node_id, "already in G, cannot insert node!")
# return
best_edge, min_dist, best_geom = get_closest_edge_from_G(
G_, point, nearby_nodes_set=nearby_nodes_set,
verbose=super_verbose)
[u, v, key] = best_edge
G_node_set = set(G_.nodes())
if verbose:
print("Inserting point:", node_id)
print("best edge:", best_edge)
print(" best edge dist:", min_dist)
u_loc = [G_.nodes[u]['x'], G_.nodes[u]['y']]
v_loc = [G_.nodes[v]['x'], G_.nodes[v]['y']]
print("ploc:", (point.x, point.y))
print("uloc:", u_loc)
print("vloc:", v_loc)
if min_dist > max_distance_meters:
if verbose:
print("min_dist > max_distance_meters, skipping...")
return G_, {}, -1, -1
else:
# updated graph
# skip if node exists already
if node_id in G_node_set:
if verbose:
print("Node ID:", node_id, "already exists, skipping...")
return G_, {}, -1, -1
# G_.edges[best_edge[0]][best_edge[1]][0]['geometry']
line_geom = best_geom
# Length along line that is closest to the point
line_proj = line_geom.project(point)
# Now combine with interpolated point on line
new_point = line_geom.interpolate(line_geom.project(point))
x, y = new_point.x, new_point.y
#################
# create new node
try:
# first get zone, then convert to latlon
_, _, zone_num, zone_letter = utm.from_latlon(G_.nodes[u]['lat'],
G_.nodes[u]['lon'])
# convert utm to latlon
lat, lon = utm.to_latlon(x, y, zone_num, zone_letter)
except:
lat, lon = y, x
# set properties
# props = G_.nodes[u]
node_props = {'highway': 'insertQ',
'lat': lat,
'lon': lon,
'osmid': node_id,
'x': x,
'y': y}
# add node
G_.add_node(node_id, **node_props)
# assign, then update edge props for new edge
_, _, edge_props_new = copy.deepcopy(
list(G_.edges([u, v], data=True))[0])
# remove extraneous 0 key
# print ("edge_props_new.keys():", edge_props_new)
# if list(edge_props_new.keys()) == [0]:
# edge_props_new = edge_props_new[0]
# cut line
split_line = cut_linestring(line_geom, line_proj)
# line1, line2, cp = cut_linestring(line_geom, line_proj)
if split_line is None:
print("Failure in cut_linestring()...")
print("type(split_line):", type(split_line))
print("split_line:", split_line)
print("line_geom:", line_geom)
print("line_geom.length:", line_geom.length)
print("line_proj:", line_proj)
print("min_dist:", min_dist)
return G_, {}, 0, 0
if verbose:
print("split_line:", split_line)
# if cp.is_empty:
if len(split_line) == 1:
if verbose:
print("split line empty, min_dist:", min_dist)
# get coincident node
outnode = ''
outnode_x, outnode_y = -1, -1
x_p, y_p = new_point.x, new_point.y
x_u, y_u = G_.nodes[u]['x'], G_.nodes[u]['y']
x_v, y_v = G_.nodes[v]['x'], G_.nodes[v]['y']
# if verbose:
# print "x_p, y_p:", x_p, y_p
# print "x_u, y_u:", x_u, y_u
# print "x_v, y_v:", x_v, y_v
# sometimes it seems that the nodes aren't perfectly coincident,
# so see if it's within a buffer
buff = 0.05 # meters
if (abs(x_p - x_u) <= buff) and (abs(y_p - y_u) <= buff):
outnode = u
outnode_x, outnode_y = x_u, y_u
elif (abs(x_p - x_v) <= buff) and (abs(y_p - y_v) <= buff):
outnode = v
outnode_x, outnode_y = x_v, y_v
# original method with exact matching
# if (x_p == x_u) and (y_p == y_u):
# outnode = u
# outnode_x, outnode_y = x_u, y_u
# elif (x_p == x_v) and (y_p == y_v):
# outnode = v
# outnode_x, outnode_y = x_v, y_v
else:
print("Error in determining node coincident with node: "
+ str(node_id) + " along edge: " + str(best_edge))
print("x_p, y_p:", x_p, y_p)
print("x_u, y_u:", x_u, y_u)
print("x_v, y_v:", x_v, y_v)
# return
return G_, {}, 0, 0
# if the line cannot be split, that means that the new node
# is coincident with an existing node. Relabel, if desired
if allow_renaming:
node_props = G_.nodes[outnode]
# A dictionary with the old labels as keys and new labels
# as values. A partial mapping is allowed.
mapping = {outnode: node_id}
Gout = nx.relabel_nodes(G_, mapping)
if verbose:
print("Swapping out node ids:", mapping)
return Gout, node_props, x_p, y_p
else:
# new node is already added, presumably at the exact location
# of an existing node. So just remove the best edge and make
# an edge from new node to existing node, length should be 0.0
line1 = LineString([new_point, Point(outnode_x, outnode_y)])
edge_props_line1 = edge_props_new.copy()
edge_props_line1['length'] = line1.length
edge_props_line1['geometry'] = line1
# make sure length is zero
if line1.length > buff:
print("Nodes should be coincident and length 0!")
print(" line1.length:", line1.length)
print(" x_u, y_u :", x_u, y_u)
print(" x_v, y_v :", x_v, y_v)
print(" x_p, y_p :", x_p, y_p)
print(" new_point:", new_point)
print(" Point(outnode_x, outnode_y):",
Point(outnode_x, outnode_y))
return
# add edge of length 0 from new node to neareest existing node
G_.add_edge(node_id, outnode, **edge_props_line1)
return G_, node_props, x, y
# originally, if not renaming nodes,
# just ignore this complication and return the orignal
# return G_, node_props, 0, 0
else:
# else, create new edges
line1, line2 = split_line
# get distances
# print ("insert_point(), G_.nodes[v]:", G_.nodes[v])
u_loc = [G_.nodes[u]['x'], G_.nodes[u]['y']]
v_loc = [G_.nodes[v]['x'], G_.nodes[v]['y']]
# compare to first point in linestring
geom_p0 = list(line_geom.coords)[0]
# or compare to inserted point? [this might fail if line is very
# curved!]
# geom_p0 = (x,y)
dist_to_u = scipy.spatial.distance.euclidean(u_loc, geom_p0)
dist_to_v = scipy.spatial.distance.euclidean(v_loc, geom_p0)
# reverse edge order if v closer than u
if dist_to_v < dist_to_u:
line2, line1 = split_line
if verbose:
print("Creating two edges from split...")
print(" original_length:", line_geom.length)
print(" line1_length:", line1.length)
print(" line2_length:", line2.length)
print(" u, dist_u_to_point:", u, dist_to_u)
print(" v, dist_v_to_point:", v, dist_to_v)
print(" min_dist:", min_dist)
# add new edges
edge_props_line1 = edge_props_new.copy()
edge_props_line1['length'] = line1.length
edge_props_line1['geometry'] = line1
# remove geometry?
# edge_props_line1.pop('geometry', None)
# line2
edge_props_line2 = edge_props_new.copy()
edge_props_line2['length'] = line2.length
edge_props_line2['geometry'] = line2
# remove geometry?
# edge_props_line1.pop('geometry', None)
# insert edge regardless of direction
# G_.add_edge(u, node_id, **edge_props_line1)
# G_.add_edge(node_id, v, **edge_props_line2)
# check which direction linestring is travelling (it may be going
# from v -> u, which means we need to reverse the linestring)
# otherwise new edge is tangled
geom_p0 = list(line_geom.coords)[0]
dist_to_u = scipy.spatial.distance.euclidean(u_loc, geom_p0)
dist_to_v = scipy.spatial.distance.euclidean(v_loc, geom_p0)
# if verbose:
# print "dist_to_u, dist_to_v:", dist_to_u, dist_to_v
if dist_to_u < dist_to_v:
G_.add_edge(u, node_id, **edge_props_line1)
G_.add_edge(node_id, v, **edge_props_line2)
else:
G_.add_edge(node_id, u, **edge_props_line1)
G_.add_edge(v, node_id, **edge_props_line2)
if verbose:
print("insert edges:", u, '-', node_id, 'and', node_id, '-', v)
# remove initial edge
G_.remove_edge(u, v, key)
return G_, node_props, x, y
| 11,174
|
def draw_bbox(img, ymin, xmin, ymax, xmax, color, str_list=()):
"""draw bounding box over an image."""
font = ImageFont.truetype("/workspace/fonts/JosefinSans-SemiBold.ttf", 25)
draw = ImageDraw.Draw(img)
width, height = img.size
left, right = xmin * width, xmax * width
top, bottom = ymin * height, ymax * height
draw.line(
[(left, top), (left, bottom), (right, bottom), (right, top), (left, top)],
width=5,
fill=color,
)
for string in str_list[::-1]:
draw.text((left, top), string, fill="black", font=font)
| 11,175
|
def find_paths(initial_path, extension):
"""
From a path, return all the files of a given extension inside.
:param initial_path: the initial directory of search
:param extension: the extension of the files to be searched
:return: list of paths inside the initial path
"""
paths = glob.glob(initial_path+r'/**/*.' + extension, recursive=True)
return paths
| 11,176
|
def ridge_line(df_act, t_range='day', n=1000):
"""
https://plotly.com/python/violin/
for one day plot the activity distribution over the day
- sample uniform from each interval
"""
df = activities_dist(df_act.copy(), t_range, n)
colors = n_colors('rgb(5, 200, 200)', 'rgb(200, 10, 10)', len(df.columns), colortype='rgb')
data = df.values.T
fig = go.Figure()
i = 0
for data_line, color in zip(data, colors):
fig.add_trace(go.Violin(x=data_line, line_color=color, name=df.columns[i]))
i += 1
fig.update_traces(orientation='h', side='positive', width=3, points=False)
fig.update_layout(xaxis_showgrid=False, xaxis_zeroline=False)
return fig
| 11,177
|
def transfer(
name: str,
start: Optional[datetime.datetime],
interval: str,
source_endpoint: str,
dest_endpoint: str,
label: Optional[str],
stop_after_date: Optional[datetime.datetime],
stop_after_runs: Optional[int],
sync_level: Optional[int],
encrypt_data: bool,
verify_checksum: bool,
preserve_timestamp: bool,
item: Optional[List[Tuple[str, str, Optional[str]]]],
items_file: Optional[str],
verbose: bool,
):
"""
Submit a task for periodic transfer or sync using Globus transfer. The options for
this command are tailored to the transfer action.
"""
action_url = urllib.parse.urlparse(
"https://actions.automate.globus.org/transfer/transfer/run"
)
endpoints = [source_endpoint, dest_endpoint]
tc = get_transfer_client()
error_if_not_activated(tc, endpoints)
data_access_scopes = _get_required_data_access_scopes(tc, endpoints)
transfer_ap_scope = (
"https://auth.globus.org/scopes/actions.globus.org/transfer/transfer"
)
if len(data_access_scopes) > 0:
transfer_ap_scope = (
f"{transfer_ap_scope}[{TRANSFER_ALL_SCOPE}[{' '.join(data_access_scopes)}]]"
)
# Just declare it for typing purposes
transfer_items: List[Dict[str, Union[str, bool]]] = []
if item:
transfer_items = [
{
"source_path": i[0].strip(),
"destination_path": i[1].strip(),
"recursive": i[2],
}
for i in item
]
elif items_file:
# Unwind the generator
transfer_items = [i for i in _read_csv(items_file)]
action_body = {
"source_endpoint_id": source_endpoint,
"destination_endpoint_id": dest_endpoint,
"transfer_items": transfer_items,
}
if label:
action_body["label"] = label
else:
action_body["label"] = f"Job from Timer service named {name}"
if sync_level is not None:
action_body["sync_level"] = sync_level
action_body["encrypt_data"] = encrypt_data
action_body["verify_checksum"] = verify_checksum
action_body["preserve_timestamp"] = preserve_timestamp
callback_body = {"body": action_body}
interval_seconds = _parse_timedelta(interval).total_seconds()
if not interval_seconds:
raise click.UsageError(f"Couldn't parse interval: {interval}")
if interval_seconds < 60:
raise click.UsageError(f"Interval is too short, minimum is 1 minute")
response = job_submit(
name,
start,
interval_seconds,
transfer_ap_scope,
action_url,
action_body=None,
action_file=None,
callback_body=callback_body,
stop_after_date=stop_after_date,
stop_after_runs=stop_after_runs,
)
show_job(response, verbose=verbose)
| 11,178
|
def merge_og_files():
""" Function to open, crop and merge the APHRODITE data """
ds_list = []
extent = ls.basin_extent('indus')
print('1951-2007')
for f in tqdm(glob.glob(
'_Data/APHRODITE/APHRO_MA_025deg_V1101.1951-2007.gz/*.nc')):
ds = xr.open_dataset(f)
ds = ds.rename({'latitude': 'lat', 'longitude': 'lon', 'precip': 'tp'})
ds_cropped = ds.tp.sel(lon=slice(extent[1], extent[3]),
lat=slice(extent[2], extent[0]))
ds_resampled = (ds_cropped.resample(time="M")).mean()
ds_resampled['time'] = ds_resampled.time.astype(float)/365/24/60/60/1e9
ds_resampled['time'] = ds_resampled['time'] + 1970
ds_list.append(ds_resampled)
print('2007-2016')
for f in tqdm(glob.glob('_Data/APHRODITE/APHRO_MA_025deg_V1101_EXR1/*.nc')):
ds = xr.open_dataset(f)
ds = ds.rename({'precip': 'tp'})
ds_cropped = ds.tp.sel(lon=slice(extent[1], extent[3]),
lat=slice(extent[2], extent[0]))
ds_resampled = (ds_cropped.resample(time="M")).mean()
ds_resampled['time'] = ds_resampled.time.astype(float)/365/24/60/60/1e9
ds_resampled['time'] = ds_resampled['time'] + 1970
ds_list.append(ds_resampled)
ds_merged = xr.merge(ds_list)
# Standardise time resolution
maxyear = float(ds_merged.time.max())
minyear = float(ds_merged.time.min())
time_arr = np.arange(round(minyear) + 1./24., round(maxyear), 1./12.)
ds_merged['time'] = time_arr
ds_merged.to_netcdf("_Data/APHRODITE/aphrodite_indus_1951_2016.nc")
| 11,179
|
def op(name,
value,
display_name=None,
description=None,
collections=None):
"""Create a TensorFlow summary op to record data associated with a particular the given guest.
Arguments:
name: A name for this summary operation.
guest: A rank-0 string `Tensor`.
display_name: If set, will be used as the display name
in TensorBoard. Defaults to `name`.
description: A longform readable description of the summary data.
Markdown is supported.
collections: Which TensorFlow graph collections to add the summary
op to. Defaults to `['summaries']`. Can usually be ignored.
"""
# The `name` argument is used to generate the summary op node name.
# That node name will also involve the TensorFlow name scope.
# By having the display_name default to the name argument, we make
# the TensorBoard display clearer.
if display_name is None:
display_name = name
# We could pass additional metadata other than the PLUGIN_NAME within the
# plugin data by using the content parameter, but we don't need any metadata
# for this simple example.
summary_metadata = tf.SummaryMetadata(
display_name=display_name,
summary_description=description,
plugin_data=tf.SummaryMetadata.PluginData(
plugin_name=PLUGIN_NAME))
# Return a summary op that is properly configured.
return tf.summary.tensor_summary(
name,
value,
summary_metadata=summary_metadata,
collections=collections)
| 11,180
|
def select_only_top_n_common_types(dataset: pd.DataFrame, n: int = 10) -> pd.DataFrame:
"""
First find the most popular 'n' types. Remove any uncommon types from the
dataset
:param dataset: The complete dataset
:param n: The number of top types to select
:return: Return the dataframe once the top 'n' types has been removed
"""
len_before_filtering = len(dataset)
print(f'*** Selecting only the most common "{n}" types from the dataset. Current length is {len_before_filtering}')
top_types = dataset['type'].value_counts()[:n].to_dict()
dataset = dataset[dataset['type'].apply(lambda x: x in top_types)]
len_after_filtering = len(dataset)
print(
f'Removed {len_before_filtering - len_after_filtering} elements, the current length of the dataset is {len_after_filtering}\n')
return dataset
| 11,181
|
def renew_cached_query_task(pk: int):
"""
Renews `CachedQuery` object identified by primary key `pk`
:param pk: primary key
"""
try:
cq = CachedQuery.objects.get(pk=pk)
except CachedQuery.DoesNotExist:
logger.debug('renew_cached_query: CachedQuery object not found: #%s', pk)
return
else:
try:
cq.renew()
except RenewalError as exc:
logger.warning('Renewal error (%s), deleting cache: %s', exc, cq)
cq.delete()
| 11,182
|
def load_checkpoint(filename='checkpoint.pth.tar'):
"""Load for general purpose (e.g., resume training)"""
filename = os.path.join(CHECKPOINTS_PATH, filename)
print(filename)
if not os.path.isfile(filename):
return None
state = torch.load(filename)
return state
| 11,183
|
def __one_both_closed(x, y, c = None, l = None):
"""convert coordinates to zero-based, both strand, open/closed coordinates.
Parameters are from, to, is_positive_strand, length of contig.
"""
return x - 1, y
| 11,184
|
def get_corners(img, sigma=1, alpha=0.05, thresh=1000):
""" Returns the detected corners as a list of tuples """
ret = []
i_x = diff_x(img)
i_y = diff_y(img)
i_xx = ndimage.gaussian_filter(i_x ** 2, sigma=sigma)
i_yy = ndimage.gaussian_filter(i_y ** 2, sigma=sigma)
i_xy = ndimage.gaussian_filter(i_x * i_y, sigma=sigma)
height, width = img.shape[:2]
det = i_xx * i_yy - i_xy ** 2
trace = i_xx + i_yy
r_val = det - alpha * trace ** 2
for i in range(2, height - 3):
for j in range(2, width - 3):
if r_val[i, j] > thresh and r_val[i, j] == np.amax(r_val[i - 1:i + 2, j - 1:j + 2]):
ret.append((i, j))
return ret
| 11,185
|
def add_param_starts(this_starts, params_req, global_conf, run_period_len, start_values_min, start_values_max):
"""Process the param starts information taken from the generator, and add it to
the array being constructed.
Inputs:
this_starts: a tuple with (starts_min, starts_max), the output from a generator's
get_param_starts() function.
params_req: integer, the number of parameters this generator requires
global_conf: a dict including 'min_param_val' and 'max_param_val'
run_period_len: the number of periods to run for
start_values_min: the array to append the min start values to
start_values_max: the array to append the max start values to
Outputs:
start_values_min, start_values_max, updated versions (not necessarily in-place)
"""
(starts_min, starts_max) = this_starts
starts_min = numpy.array(starts_min)
starts_max = numpy.array(starts_max)
if starts_min.size == 0:
start_values_min = numpy.hstack((start_values_min, (
(numpy.ones((run_period_len, params_req)) *
global_conf['min_param_val']).tolist())))
else:
start_values_min = numpy.hstack((start_values_min, starts_min))
if starts_max.size == 0:
start_values_max = numpy.hstack((start_values_max, (
(numpy.ones((run_period_len, params_req)) *
global_conf['max_param_val']).tolist())))
else:
start_values_max = numpy.hstack((start_values_max, starts_max))
return start_values_min, start_values_max
| 11,186
|
def pareto(data, name=None, exp=None, minval=None, maxval=None, **kwargs):
"""the pareto distribution: val ~ val**exp | minval <= val < maxval
"""
assert (exp is not None) and (minval is not None) and (maxval is not None), \
'must supply exp, minval, and maxval!' ### done to make command-line arguments easier in add-prior-weights
if name is not None:
data = data[name]
ans = exp*np.log(data)
ans[np.logical_not((minval<=val)*(val<maxval))] = -np.infty
return ans
| 11,187
|
def codegen_reload_data():
"""Parameters to codegen used to generate the fn_ansible_tower package"""
reload_params = {"package": u"fn_ansible_tower",
"incident_fields": [],
"action_fields": [u"ansible_tower_arguments", u"ansible_tower_credential", u"ansible_tower_hosts", u"ansible_tower_inventory", u"ansible_tower_job_name", u"ansible_tower_module", u"ansible_tower_module_arguments", u"ansible_tower_run_tags", u"ansible_tower_skip_tags", u"job_status", u"last_updated", u"tower_project", u"tower_save_as", u"tower_template_pattern"],
"function_params": [u"incident_id", u"tower_arguments", u"tower_credential", u"tower_hosts", u"tower_inventory", u"tower_job_id", u"tower_job_status", u"tower_last_updated", u"tower_module", u"tower_project", u"tower_run_tags", u"tower_save_as", u"tower_skip_tags", u"tower_template_id", u"tower_template_name", u"tower_template_pattern"],
"datatables": [u"ansible_tower_job_templates", u"ansible_tower_launched_jobs"],
"message_destinations": [u"fn_ansible_tower"],
"functions": [u"ansible_tower_get_ad_hoc_command_results", u"ansible_tower_get_job_results", u"ansible_tower_launch_job_template", u"ansible_tower_list_job_templates", u"ansible_tower_list_jobs", u"ansible_tower_run_an_ad_hoc_command"],
"phases": [],
"automatic_tasks": [],
"scripts": [],
"workflows": [u"ansible_tower_get_ad_hoc_command_results", u"ansible_tower_get_job_results", u"ansible_tower_launch_job_template", u"ansible_tower_list_job_templates", u"ansible_tower_list_jobs", u"ansible_tower_run_an_ad_hoc_command", u"ansible_tower_run_job__artifact", u"ansible_tower_run_job__incident"],
"actions": [u"Ansible Tower Get Ad Hoc Command Results", u"Ansible Tower Get Job Results", u"Ansible Tower List Job Templates", u"Ansible Tower List Jobs", u"Ansible Tower Run an Ad Hoc Command", u"Ansible Tower Run Job", u"Ansible Tower Run Job - Artifact", u"Ansible Tower Run Job - Incident"],
"incident_artifact_types": []
}
return reload_params
| 11,188
|
def get_default_identity(username, provider=None):
"""
Return the default identity given to the user-group for provider.
"""
try:
filter_query = {}
if provider:
filter_query['provider'] = provider
from core.models.group import GroupMembership
memberships = GroupMembership.objects.filter(user__username=username).prefetch_related('group')
for membership in memberships:
group = membership.group
identities = group.current_identities.filter(
**filter_query)
if group and identities.count() > 0:
default_identity = identities.first()
logger.debug(
"default_identity set to %s " %
default_identity)
return default_identity
# No identities found for any group
if settings.AUTO_CREATE_NEW_ACCOUNTS:
new_identities = create_new_accounts(username, selected_provider=provider)
if new_identities:
return new_identities[0]
logger.error("%s has no identities. Functionality will be severely limited." % username)
return None
except Exception as e:
logger.exception(e)
return None
| 11,189
|
def add_db(vdb:VariantsDb):
"""Add that db to settings, connections, and activate it"""
from varappx.handle_init import db
vdb.is_active = 1
db.session.add(vdb)
db.session.commit()
add_db_to_settings(vdb.name, vdb.filename)
| 11,190
|
def get_text_item(text):
"""Converts a text into a tokenized text item
:param text:
:return:
"""
if config['data']['lowercased']:
text = text.lower()
question_tokens = [Token(t) for t in word_tokenize(text)]
question_sentence = Sentence(' '.join([t.text for t in question_tokens]), question_tokens)
return TextItem(question_sentence.text, [question_sentence])
| 11,191
|
def Ltotal(scatter: bool):
"""
Graph for computing 'Ltotal'.
"""
graph = beamline(scatter=scatter)
if not scatter:
return graph
del graph['two_theta']
return graph
| 11,192
|
def gen_dir(download_dir, main_keyword):
"""Helper function | generates a directory where pics will be downloaded"""
if not download_dir:
download_dir = './data/'
img_dir = download_dir + main_keyword + '/'
if not os.path.exists(img_dir):
os.makedirs(img_dir)
return img_dir
| 11,193
|
def is_valid_page_to_edit(prev_pg_to_edit, pg_to_edit):
"""Check if the page is valid to edit or not
Args:
prev_pg_to_edit (obj): page to edit object of previous page
pg_to_edit (obj): page to edit object of current page
Returns:
boolean: true if valid else false
"""
try:
prev_pg_ref_end = int(prev_pg_to_edit.ref_end_page_no)
cur_pg_ref_start = int(pg_to_edit.ref_start_page_no)
cur_pg_ref_end = int(pg_to_edit.ref_end_page_no)
except Exception:
return False
if prev_pg_to_edit == pg_to_edit:
if cur_pg_ref_end >= cur_pg_ref_start:
return True
else:
return False
elif prev_pg_to_edit.vol != pg_to_edit.vol and cur_pg_ref_start <= cur_pg_ref_end:
return True
elif cur_pg_ref_start <= cur_pg_ref_end and prev_pg_ref_end <= cur_pg_ref_start:
return True
else:
return False
| 11,194
|
def get_diffs(backups, backup_id, partner_backups, bound=10):
"""
Given a list `backups`, a `backup_id`, and `bound`
Compute the a dict containing diffs/stats of surronding the `backup_id`:
diff_dict = {
"stats": diff_stats_list,
"files": files_list,
"partners": partner_files_list,
"prev_backup_id": prev_backup_id,
"backup_id": backup_id,
"next_backup_id": next_backup_id
}
return {} if `backup_id` not found
"""
backup_dict = _get_backup_range(backups, backup_id, bound)
if not backup_dict:
return {}
backups = backup_dict["backups"]
backup_id = backup_dict["backup_id"] # relevant backup_id might be different
prev_backup_id = backup_dict["prev_backup_id"]
next_backup_id = backup_dict["next_backup_id"]
get_recent_backup = _recent_backup_finder(partner_backups)
assign_files = backups[0].assignment.files
files_list, diff_stats_list, partner_files_list = [], [], []
for i, backup in enumerate(backups):
if not i: # first unique backup => no diff
continue
prev = backups[i - 1].files()
curr = backup.files()
files = highlight.diff_files(prev, curr, "short")
files_list.append(files)
backup_stats = {
'submitter': backup.submitter.email,
'backup_id' : backup.hashid,
'bid': backup.id,
'partner_backup_id': None,
'partner_bid': None,
'question': None,
'time': None,
'passed': None,
'failed': None
}
analytics = backup and backup.analytics()
grading = backup and backup.grading()
partner_backup_files = None
if analytics:
backup_stats['time'] = analytics.get('time')
partner_backup = get_recent_backup(analytics)
if partner_backup:
backup_stats["partner_backup_id"] = partner_backup.hashid
backup_stats["partner_bid"] = partner_backup.id
partner_backup_files = highlight.diff_files(partner_backup.files(), curr, "short")
if grading:
questions = list(grading.keys())
question = None
passed, failed = 0, 0
for question in questions:
passed += grading.get(question).get('passed')
passed += grading.get(question).get('failed')
if len(questions) > 1:
question = questions
backup_stats['question'] = question
backup_stats['passed'] = passed
backup_stats['failed'] = failed
else:
unlock = backup.unlocking()
backup_stats['question'] = "[Unlocking] " + unlock.split(">")[0]
diff_stats_list.append(backup_stats)
partner_files_list.append(partner_backup_files)
diff_dict = {
"stats": diff_stats_list,
"files": files_list,
"partners": partner_files_list,
"prev_backup_id": prev_backup_id,
"backup_id": backup_id,
"next_backup_id": next_backup_id
}
return diff_dict
| 11,195
|
def admits_voc_list(cid: CID) -> List[str]:
"""
Return list of nodes in cid with positive value of control.
"""
return [x for x in list(cid.nodes) if admits_voc(cid, x)]
| 11,196
|
def test_medicinalproductpackaged_1(base_settings):
"""No. 1 tests collection for MedicinalProductPackaged.
Test File: medicinalproductpackaged-example.json
"""
filename = (
base_settings["unittest_data_dir"] / "medicinalproductpackaged-example.json"
)
inst = medicinalproductpackaged.MedicinalProductPackaged.parse_file(
filename, content_type="application/json", encoding="utf-8"
)
assert "MedicinalProductPackaged" == inst.resource_type
impl_medicinalproductpackaged_1(inst)
# testing reverse by generating data from itself and create again.
data = inst.dict()
assert "MedicinalProductPackaged" == data["resourceType"]
inst2 = medicinalproductpackaged.MedicinalProductPackaged(**data)
impl_medicinalproductpackaged_1(inst2)
| 11,197
|
def __load_config_file(file_name):
"""
Loads varibles and constants from yalm config file and turns them into module's global variables
:param filename: str, config file name
:return: None
"""
with open(file_name) as f:
data_map = yaml.safe_load(f)
f.close()
globals().update(data_map)
globals()['file_ext'] = tuple(globals()['file_ext'])
| 11,198
|
def build_lib() -> None:
"""Build the package."""
subprocess.run(["nbdev_build_lib"])
| 11,199
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.