content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def choose(text: str, prompt: str, options: Dict[str, str], suggestion: str, none_allowed: bool):
"""
Helper function to ask user to select from a list of options (with optional description).
Suggestion can be given. 'None' can be allowed as a valid input value.
"""
p = ColorPrint()
key_list = list(options.keys())
p.print('\n'.join(wrap(text + ':', 80)))
p.print('{!y}[')
for k in range(len(key_list)):
elem = key_list[k]
description = options[elem]
if description:
p.print(' {!m}#{k}{!} {!y}{elem}{!}:', k=k, elem=elem)
for line in description.split('\n'):
p.print(' {line}', line=line)
else:
p.print(' {!m}#{k}{!} {!y}{elem}{!}', k=k, elem=elem)
p.print('{!y}]')
p.print('Selection can be made by unique prefix or index.')
while True:
val = ask(prompt, suggestion, str, none_allowed)
if val is None:
return val
try:
index = int(val)
if index in range(len(key_list)):
return key_list[index]
else:
p.error('{!r}No match for given index.')
except:
matches = [key for key in options.keys() if key[:len(val)] == val]
if len(matches) == 0:
p.error('{!r}No match for given substring.')
elif len(matches) > 1:
p.error('{!r}Selection not unique for given substring.')
else:
return matches[0]
| 5,339,000
|
def gen(n):
""" (int) -> generator
Generate n, n - 2, n - 3, ..., 0
"""
for i in range(n, -1, -2):
yield i
| 5,339,001
|
def visualize_Large(model_name, feature):
"""
visualize one large plot for a model and
color it according to certain feature labels
:param model_name:
:param feature:
"""
embedding = ut.load_numpy_file(ut.embedding_path + model_name + "_embedding.npy")
tsne = TSNE(n_components=2, verbose=1, perplexity=40, n_iter=300)
tsne_results = tsne.fit_transform(embedding)
x = tsne_results[:, 0]
y = tsne_results[:, 1]
area = np.pi * 3
dot_colors = ["blue", "red", "orange", "green", "yellow", "cyan", "purple", "black", "pink"]
label_types = [feature + "_lables"]
plot_secondary_index = 0
for label_type in label_types:
if label_type == "node_labels":
labels = ut.node_labels
else:
labels = ut.load_numpy_file(ut.topo_features_labels_path + label_type + ".npy")
number_classes = len(set(labels))
xc = []
yc = []
for c in range(0, number_classes):
xc.append([])
yc.append([])
for i in range(0, len(ut.graph.nodes)):
if labels[i] == c:
xc[c].append(x[i])
yc[c].append(y[i])
plt.scatter(xc[c], yc[c], s=area, c=dot_colors[c], alpha=0.5)
plot_secondary_index += 1
plt.show()
| 5,339,002
|
def get_environ_list(name, default=None):
"""Return the split colon-delimited list from an environment variable.
Returns an empty list if the variable didn't exist.
"""
packed = os.environ.get(name)
if packed is not None:
return packed.split(':')
elif default is not None:
return default
else:
return []
| 5,339,003
|
def util_color(
graph: list[list[int]], max_color: int, colored_vertices: list[int], index: int
) -> bool:
"""
alur :
1. Periksa apakah pewarnaan selesai
1.1 Jika pengembalian lengkap True
(artinya kita berhasil mewarnai grafik)
Langkah Rekursif:
2. Iterasi atas setiap warna:
Periksa apakah pewarnaan saat ini valid:
2.1. Warna yang diberikan vertex
2.2. Lakukan pemeriksaan panggilan rekursif
jika pewarnaan ini mengarah pada pemecahan masalah
2.4. jika pewarnaan saat ini mengarah ke pengembalian solusi
2.5. Uncolor diberikan vertex
>>> graph = [[0, 1, 0, 0, 0],
... [1, 0, 1, 0, 1],
... [0, 1, 0, 1, 0],
... [0, 1, 1, 0, 0],
... [0, 1, 0, 0, 0]]
>>> max_colors = 3
>>> colored_vertices = [0, 1, 0, 0, 0]
>>> index = 3
>>> util_color(graph, max_colors, colored_vertices, index)
True
>>> max_colors = 2
>>> util_color(graph, max_colors, colored_vertices, index)
False
"""
if index == len(graph):
return True
for i in range(max_color):
if coloring(graph[index], colored_vertices, i):
colored_vertices[index] = i
if util_color(graph, max_color, colored_vertices, index + 1):
return True
colored_vertices[i] = -1
return False
| 5,339,004
|
def reverse_complement(sequence):
""" Return reverse complement of a sequence. """
complement_bases = {
'g':'c', 'c':'g', 'a':'t', 't':'a', 'n':'n',
'G':'C', 'C':'G', 'A':'T', 'T':'A', 'N':'N', "-":"-",
"R":"Y", "Y":"R", "S":"W", "W":"S", "K":"M", "M":"K",
"B":"V", "V":"B", "D": "H", "H": "D",
"r":"y", "y":"r", "s":"w", "w":"s", "k":"m", "m":"k",
"b":"v", "v":"b", "d": "h", "h": "d"
}
bases = list(sequence)
bases.reverse()
revcomp = []
for base in bases:
try:
revcomp.append(complement_bases[base])
except KeyError:
print("Unexpected base encountered: ", base, " returned as X!!!")
revcomp.append("X")
return "".join(revcomp)
| 5,339,005
|
def saveT(T, sbs, out):
"""
Save a complex T matrix, input as an Nx2x2, into a text file. Dumps it as a CSV
where the first four columns are the real components, the last four are imaginary
:param T:
:param out:
:return:
"""
T = np.array(T.transpose(2, 0, 1))
## I'm nervous of trusting how numpy handles .view() on complex types. I feel like
# I've seen it swap orders or something, where I've had to change the loadT function
# to compensate. I guess when in doubt, process data from scratch, save it and
# reload it and make sure the memory and disk matrices agree.
# 01/04/19 No, fuck this. I don't trust view at all. I'm looking at two different
# T matrices, and in one instance this gets reordered as
# ReT++,ReT+-,ReT-+,ReT--,ImT++,ImT+-,ImT-+,ImT--
# while in another, it does it as
# ReT++,ImT++,ReT+-,ImT+-,ReT-+,ImT-+,ReT--,ImT--
#
# I have no fucking clue why it does it that way, but I'm sick and fucking tired of it
# So no more
#
# flatT = T.reshape(-1, 4).view(float).reshape(-1, 8)
flatT = T.reshape(-1, 4)
flatT = np.column_stack((flatT.real, flatT.imag))
# I'm also going to complicate this, because I want to save it like qile's matlab
# code save his files, so that we can use the same loading.
# As of 12/19/18, I believe the above code should be ordering columns as,
### 0 1 2 3 4 5 6 7
### ReT++,ReT+-,ReT-+,ReT--,ImT++,ImT+-,ImT-+,ImT--
# Qile saves as
### 0 1 2 3 4 5 6 7
### ReT--,ImT--,ReT+-,ImT+-,ReT-+,ImT-+,ReT++,ImT++
reorder = [ 3, 7, 1, 5, 2, 6, 0, 4 ]
flatT = flatT[:, reorder]
flatT = np.column_stack((sbs, flatT))
header = "SB,ReT++,ImT++,ReT+-,ImT+-,ReT-+,ImT-+,ReT--,ImT--"
header = "SB,ReT++,ReT+-,ReT-+,ReT--,Im++,Im+-,Im-+,Im--"
header = "SB,ReT--,ImT--,ReT+-,ImT+-,ReT-+,ImT-+,ReT++,ImT++"
np.savetxt(out,
flatT, header=header, comments='', delimiter=',',
fmt="%.6f")
print("saved {}\n".format(out))
| 5,339,006
|
def get_variable_ddi(
name, shape, value, init, initializer=None, dtype=tf.float32,
regularizer=None, trainable=True):
"""Wrapper for data-dependent initialization."""
kwargs = {"trainable": trainable}
if initializer:
kwargs["initializer"] = initializer
if regularizer:
kwargs["regularizer"] = regularizer
w = tf.get_variable(name, shape, dtype, **kwargs)
if isinstance(init, bool):
if init:
return assign(w, value)
return w
else:
return tf.cond(init, lambda: assign(w, value), lambda: w)
| 5,339,007
|
def convert_to_mp3(path, start=None, end=None, cleanup_after_done=True):
"""Covert to mp3 using the python ffmpeg module."""
new_name = path + '_new.mp3'
params = {
"loglevel": "panic",
"ar": 44100,
"ac": 2,
"ab": '{}k'.format(defaults.DEFAULT.SONG_QUALITY),
"f": "mp3"
}
try:
if start is not None and end is not None:
params["ss"] = start
params["to"] = end
job = ffmpeg.input(path).output(
new_name,
**params
)
job.run()
# Delete the temp file now
if cleanup_after_done:
remove(path)
return new_name
except ffmpeg._run.Error:
# This error is usually thrown where ffmpeg doesn't have to
# overwrite a file.
# The bug is from ffmpeg, I'm just adding this catch to
# handle that.
return new_name
| 5,339,008
|
def conv3x3(in_planes, out_planes, Conv=nn.Conv2d, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return Conv(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
| 5,339,009
|
async def async_setup(hass, config):
"""Set up the AirVisual component."""
hass.data[DOMAIN] = {}
hass.data[DOMAIN][DATA_CLIENT] = {}
hass.data[DOMAIN][DATA_LISTENER] = {}
if DOMAIN not in config:
return True
conf = config[DOMAIN]
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_IMPORT}, data=conf
)
)
return True
| 5,339,010
|
def hasConnection(document):
"""
Check whether document has a child of :class:`Sea.adapter.connection.Connection`.
:param document: a :class:`FreeCAD.Document` instance
"""
return _hasObject(document, 'Connection')
| 5,339,011
|
def trsfrm_aggregeate_mulindex(df:pd.DataFrame,
grouped_cols:List[str],
agg_col:str,
operation:str,
k:int=5):
"""transform aggregate statistics for multiindex
Examples:
>>> df_agg = trsfrm_aggregeate_mulindex( df_train, ["store", "item"], 'sales', 'mean')
"""
cols = ["sum", "mean", "median", "std", "min", "max", "skew"]
lvl0,lvl1 = grouped_cols
df_agg = pd.DataFrame( df.groupby(grouped_cols)[agg_col].agg(cols) )[operation]
df_agg = df_agg.groupby(level=lvl0).nlargest(k).reset_index(level=1, drop=True)
df_agg = df_agg.reset_index()
df_agg[lvl1] = df_agg.item.astype('category')
return df_agg
| 5,339,012
|
def get_hub_virtual_network_connection(connection_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
virtual_hub_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetHubVirtualNetworkConnectionResult:
"""
HubVirtualNetworkConnection Resource.
:param str connection_name: The name of the vpn connection.
:param str resource_group_name: The resource group name of the VirtualHub.
:param str virtual_hub_name: The name of the VirtualHub.
"""
__args__ = dict()
__args__['connectionName'] = connection_name
__args__['resourceGroupName'] = resource_group_name
__args__['virtualHubName'] = virtual_hub_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:network/v20200501:getHubVirtualNetworkConnection', __args__, opts=opts, typ=GetHubVirtualNetworkConnectionResult).value
return AwaitableGetHubVirtualNetworkConnectionResult(
allow_hub_to_remote_vnet_transit=__ret__.allow_hub_to_remote_vnet_transit,
allow_remote_vnet_to_use_hub_vnet_gateways=__ret__.allow_remote_vnet_to_use_hub_vnet_gateways,
enable_internet_security=__ret__.enable_internet_security,
etag=__ret__.etag,
id=__ret__.id,
name=__ret__.name,
provisioning_state=__ret__.provisioning_state,
remote_virtual_network=__ret__.remote_virtual_network,
routing_configuration=__ret__.routing_configuration)
| 5,339,013
|
def run_ab3dmot(
classname: str,
pose_dir: str,
dets_dump_dir: str,
tracks_dump_dir: str,
max_age: int = 3,
min_hits: int = 1,
min_conf: float = 0.3
) -> None:
"""
#path to argoverse tracking dataset test set, we will add our predicted labels into per_sweep_annotations_amodal/
#inside this folder
Filtering occurs in the city frame, not the egovehicle frame.
Args:
- classname: string, either 'VEHICLE' or 'PEDESTRIAN'
- pose_dir: string
- dets_dump_dir: string
- tracks_dump_dir: string
- max_age: integer
- min_hits: integer
Returns:
- None
"""
dl = SimpleArgoverseTrackingDataLoader(data_dir=pose_dir, labels_dir=dets_dump_dir)
for log_id in tqdm(dl.sdb.get_valid_logs()):
print(log_id)
labels_folder = dets_dump_dir + "/" + log_id + "/per_sweep_annotations_amodal/"
lis = os.listdir(labels_folder)
lidar_timestamps = [ int(file.split(".")[0].split("_")[-1]) for file in lis]
lidar_timestamps.sort()
previous_frame_bbox = []
ab3dmot = AB3DMOT(max_age=max_age,min_hits=min_hits)
print(labels_folder)
tracked_labels_copy = []
for j, current_lidar_timestamp in enumerate(lidar_timestamps):
#print(current_lidar_timestamp)
dets = dl.get_labels_at_lidar_timestamp(log_id, current_lidar_timestamp)
#print(f'There are {len(dets)} detections!')
dets_copy = dets
transforms = []
city_SE3_egovehicle = dl.get_city_to_egovehicle_se3(log_id, current_lidar_timestamp)
egovehicle_SE3_city = city_SE3_egovehicle.inverse()
transformed_labels = []
sdc_labels = []
for l_idx, l in enumerate(dets):
if l['label_class'] != classname:
# will revisit in other tracking pass
continue
if l["score"] < min_conf:
# print('Skipping det with confidence ', l["score"])
continue
det_obj = json_label_dict_to_obj_record(l)
det_corners_egovehicle_fr = det_obj.as_3d_bbox()
transforms += [city_SE3_egovehicle]
if city_SE3_egovehicle is None:
print('Was None')
# convert detection from egovehicle frame to city frame
det_corners_city_fr = city_SE3_egovehicle.transform_point_cloud(det_corners_egovehicle_fr)
ego_xyz = np.mean(det_corners_city_fr, axis=0)
origin = np.zeros((1,3))
origin = city_SE3_egovehicle.transform_point_cloud(origin)
#get vehicle frame xyz
sdc_xyz = np.mean(det_corners_city_fr, axis=0)
sdc_xyz -= origin[0]
# print(origin)
sdc_labels += [ [sdc_xyz[0], sdc_xyz[1], sdc_xyz[2]] ]
yaw = yaw_from_bbox_corners(det_corners_city_fr)
transformed_labels += [ [ego_xyz[0], ego_xyz[1], ego_xyz[2], yaw, l["length"],l["width"],l["height"]] ]
if len(transformed_labels) > 0:
transformed_labels = np.array(transformed_labels)
else:
transformed_labels = np.empty((0,7))
if len(sdc_labels) > 0:
sdc_labels = np.array(sdc_labels)
else:
sdc_labels = np.empty((0,3))
# print(sdc_labels)
dets_all = {
"dets":transformed_labels,
"info": np.zeros(transformed_labels.shape),
"sdc":sdc_labels
}
# perform measurement update in the city frame.
dets_with_object_id = ab3dmot.update(dets_all)
tracked_labels = []
for det in dets_with_object_id:
# move city frame tracks back to ego-vehicle frame
xyz_city = np.array([det[0].item(), det[1].item(), det[2].item()]).reshape(1,3)
city_yaw_object = det[3]
city_se2_object = SE2(rotation=rotmat2d(city_yaw_object), translation=xyz_city.squeeze()[:2])
city_se2_egovehicle, city_yaw_ego = get_B_SE2_A(city_SE3_egovehicle)
ego_se2_city = city_se2_egovehicle.inverse()
egovehicle_se2_object = ego_se2_city.right_multiply_with_se2(city_se2_object)
# recreate all 8 points
# transform them
# compute yaw from 8 points once more
egovehicle_SE3_city = city_SE3_egovehicle.inverse()
xyz_ego = egovehicle_SE3_city.transform_point_cloud(xyz_city).squeeze()
# update for new yaw
# transform all 8 points at once, then compute yaw on the fly
ego_yaw_obj = se2_to_yaw(egovehicle_se2_object)
qx,qy,qz,qw = yaw_to_quaternion3d(ego_yaw_obj)
tracked_labels.append({
"center": {"x": xyz_ego[0], "y": xyz_ego[1], "z": xyz_ego[2]},
"rotation": {"x": qx , "y": qy, "z": qz , "w": qw},
"length": det[4],
"width": det[5],
"height": det[6],
"track_label_uuid": uuid_gen.get_uuid(det[7]),
"timestamp": current_lidar_timestamp ,
"label_class": classname
})
tracked_labels_copy = copy.deepcopy(tracked_labels)
label_dir = os.path.join(tracks_dump_dir, log_id, "per_sweep_annotations_amodal")
check_mkdir(label_dir)
json_fname = f"tracked_object_labels_{current_lidar_timestamp}.json"
json_fpath = os.path.join(label_dir, json_fname)
if Path(json_fpath).exists():
# accumulate tracks of another class together
prev_tracked_labels = read_json_file(json_fpath)
tracked_labels.extend(prev_tracked_labels)
save_json_dict(json_fpath, tracked_labels)
| 5,339,014
|
def newcombe_binomial_ratio_err(k1,n1, k2,n2, z=1.0):
""" Newcombe-Brice-Bonnett ratio confidence interval of two binomial proportions.
"""
RR = (k1/n1) / (k2/n2) # mean
logRR = np.log(RR)
seLogRR = np.sqrt(1/k1 + 1/k2 - 1/n1 - 1/n2)
ash = 2 * np.arcsinh(z/2 * seLogRR)
lower = np.exp(logRR - ash)
upper = np.exp(logRR + ash)
return np.array([lower, upper])
| 5,339,015
|
def parse_metrics(rpcs: Any, detokenizer: Optional[detokenize.Detokenizer],
timeout_s: Optional[float]):
"""Detokenizes metric names and retrieves their values."""
# Creates a defaultdict that can infinitely have other defaultdicts
# without a specified type.
metrics: defaultdict = _tree()
if not detokenizer:
_LOG.error('No metrics token database set.')
return metrics
stream_response = rpcs.pw.metric.MetricService.Get(
pw_rpc_timeout_s=timeout_s)
if not stream_response.status.ok():
_LOG.error('Unexpected status %s', stream_response.status)
return metrics
for metric_response in stream_response.responses:
for metric in metric_response.metrics:
path_names = []
for path in metric.token_path:
path_name = str(
detokenize.DetokenizedString(path,
detokenizer.lookup(path), b'',
False)).strip('"')
path_names.append(path_name)
value = metric.as_float if metric.HasField(
'as_float') else metric.as_int
# inserting path_names into metrics.
_insert(metrics, path_names, value)
# Converts default dict objects into standard dictionaries.
return json.loads(json.dumps(metrics))
| 5,339,016
|
def missing_logic(scenario, fn):
"""Figure out what to do when this filename is missing"""
print("Searching for replacement for '%s'" % (fn,))
lon = float(fn[17:23])
lat = float(fn[24:30])
if not os.path.isdir(os.path.dirname(fn)):
os.makedirs(os.path.dirname(fn))
# So there should be a file at an interval of 0.25
lon2 = lon - (lon * 100 % 25) / 100.0
lat2 = lat - (lat * 100 % 25) / 100.0
testfn = get_cli_fname(lon2, lat2, scenario)
if not os.path.isfile(testfn):
print("Whoa, why doesn't %s exist?" % (testfn,))
sys.exit()
print("%s->%s" % (testfn, fn))
shutil.copyfile(testfn, fn)
| 5,339,017
|
def cigarlist_to_cigarstring(cigar_list):
"""
Convert a list of tuples into a cigar string.
Example::
[ (0, 10), (1, 1), (0, 75), (2, 2), (0, 20) ]
=> 10M 1I 75M 2D 20M
=> 10M1I75M2D20M
:param cigar_list: a list of tuples (code, length)
:type cigar_list: list
:return: the cigar string
:rtype: string
:raises: :class:`.exceptions.G2GCigarFormatError` on invalid cigar string
"""
cigar = ''
if isinstance(cigar_list, Cigar):
try:
for i in cigar_list:
cigar += str(i.length) + i.code
except KeyError:
raise exceptions.G2GCigarFormatError("Invalid cigar code: " + str(i))
else:
try:
for i in cigar_list:
cigar += str(i[1]) + CIGAR_N2C[i[0]]
except KeyError:
raise exceptions.G2GCigarFormatError("Invalid cigar code: " + str(i))
return cigar
| 5,339,018
|
def check_transaction_entries(tsplit, entries):
"""Check that a list of Transaction Entries are equivalent.
Validate a list of TransactionEntry objects are equivalent, meaning they
contain the same items but do not necessarily share ordering.
"""
assert tsplit.entries is not None
assert isinstance(tsplit.entries, list)
assert len(tsplit.entries) == len(entries)
for i in range(len(tsplit.entries)):
assert (
tsplit.entries[i].line.account,
tsplit.entries[i].amount,
tsplit.entries[i].currency,
) in entries
| 5,339,019
|
def add(x, y):
"""Add two numbers"""
return x+y
| 5,339,020
|
def generateCSR(host_id, key):
"""Generate a Certificate Signing Request"""
pod_name = os.environ['MY_POD_NAME']
namespace = os.environ['TEST_APP_NAMESPACE']
SANURI = f'spiffe://cluster.local/namespace/{namespace}/podname/{pod_name}'
req = crypto.X509Req()
req.get_subject().CN = host_id
req.set_pubkey(key)
formatted_SAN = f'URI:{SANURI}'
req.add_extensions([
crypto.X509Extension(
'subjectAltName'.encode('ascii'), False, formatted_SAN.encode('ascii')
)
])
req.sign(key, "sha1")
return crypto.dump_certificate_request(crypto.FILETYPE_PEM, req)
| 5,339,021
|
def get_s4_function_details(carray, item):
"""
Gets function details for S4 Class Functions
Details are appended to carray
:param item: Node to be queried.
:type item: Node
:return: None
"""
found = False
etags, etxt, elist = get_child_list(item)
idx = find_sublist_index(['OP-LEFT-PAREN', 'expr', 'OP-COMMA', 'expr', 'OP-COMMA', 'expr'], etags)
idx2 = find_sublist_index(
['OP-LEFT-PAREN', 'expr', 'OP-COMMA', 'SYMBOL_SUB', 'EQ_SUB', 'expr', 'OP-COMMA', 'SYMBOL_SUB', 'EQ_SUB',
'expr'], etags)
if (len(idx)):
a, b = idx[0]
sym = find_child(elist[a + 1], 'STR_CONST')
if (sym != ''):
func = get_string_without_quotes(sym)
symlst = find_grand_children(elist[a + 3], 'STR_CONST')
if (len(symlst)):
cname = get_string_without_quotes(symlst[0])
line1 = elist[b].get("line1")
line2 = elist[b].get("line2")
tag3, txt3, el3 = get_child_list(elist[b])
if (tag3[0] == 'FUNCTION'):
found = True
elif len(idx2):
a, b = idx2[0]
sym = find_child(elist[a + 1], 'STR_CONST')
if (sym != ''):
func = get_string_without_quotes(sym)
symlst = find_grand_children(elist[a + 5], 'STR_CONST')
if (len(symlst)):
cname = get_string_without_quotes(symlst[0])
line1 = elist[b].get("line1")
line2 = elist[b].get("line2")
tag3, txt3, el3 = get_child_list(elist[b])
if (tag3[0] == 'FUNCTION'):
found = True
if (found):
for i in range(len(carray)):
if (cname in carray[i].keys()):
carray[i][cname]["public"]["methods"].append((func, line1, line2))
| 5,339,022
|
def sdecorator(decoratorHandleDelete: bool = False, expectedProperties: list = None, genUUID: bool = True,
enforceUseOfClass: bool = False, hideResourceDeleteFailure: bool = False,
redactConfig: RedactionConfig = None, timeoutFunction: bool = True):
"""Decorate a function to add input validation for resource handler functions, exception handling and send
CloudFormation responses.
Usage with Lambda:
import accustom
@accustom.sdecorator(expectedProperties=['key1','key2'],genUUID=False)
def resource_handler(event, context):
sum = (float(event['ResourceProperties']['key1']) +
float(event['ResourceProperties']['key2']))
return { 'sum' : sum }
Usage outside Lambda:
import accustom
@accustom.sdecorator(expectedProperties=['key1','key2'])
def resource_handler(event, context=None)
sum = (float(event['ResourceProperties']['key1']) +
float(event['ResourceProperties']['key2']))
r = accustom.ResponseObject(data={'sum':sum},physicalResourceId=event['PhysicalResourceId'])
return r
Args:
decoratorHandleDelete (boolean): When set to true, if a delete request is made in event the decorator will
return SUCCESS to CloudFormation without actually executing the decorated function
genUUID (boolean): When set to true, if the PhysicalResourceId in the event is not set, automatically generate
a UUID4 and put it in the PhysicalResourceId field.
expectedProperties (list of expected properties): Pass in a list or tuple of properties that you want to check
for before running the decorated function.
enforceUseOfClass (boolean): When true send a FAILED signal if a ResponseObject class is not utilised.
This is implicitly set to true if no Lambda Context is provided.
hideResourceDeleteFailure (boolean): When true will return SUCCESS even on getting an Exception for DELETE
requests. Note that this particular flag is made redundant if decoratorHandleDelete is set to True.
redactConfig (StandaloneRedactionConfig): Configuration of how to redact the event object.
timeoutFunction (boolean): Will automatically send a failure signal to CloudFormation 1 second before Lambda
timeout provided that this function is executed in Lambda
Returns:
The response object sent to CloudFormation
Raises:
FailedToSendResponseException
NotValidRequestObjectException
"""
if not isinstance(redactConfig, StandaloneRedactionConfig) and logger.getEffectiveLevel() <= logging.DEBUG:
logger.warning('A non valid StandaloneRedactionConfig was provided, and ignored')
redactConfig = None
def standalone_decorator_inner(func):
@wraps(func)
@decorator(enforceUseOfClass=enforceUseOfClass, hideResourceDeleteFailure=hideResourceDeleteFailure,
redactConfig=redactConfig, timeoutFunction=timeoutFunction)
@rdecorator(decoratorHandleDelete=decoratorHandleDelete, expectedProperties=expectedProperties, genUUID=genUUID)
def standalone_decorator_handler(event: dict, context: dict = None):
return func(event, context)
return standalone_decorator_handler
return standalone_decorator_inner
| 5,339,023
|
def export_jsx(session, program, start_date, end_date, title):
"""Do not mess with this function it exports roundups"""
filename = program + '.html'
f = open(filename, 'w')
opening_wrapper = f"""<html>
<head>
<title>{title}</title>
</head>
<body><p>{title}</p>"""#.format(title)
f.write(opening_wrapper)
section_query = session.query(Section)
section_query = section_query.all()
for section in section_query:
f.write(section.wrapped_jsx_string)
for category in section.categories:
f.write(category.wrapped_jsx_string)
entry_map = map(wrapStringJSX, [i for i in category.entries if (i.date >=start_date) and (i.date <=end_date)])
entry_str = '\n'.join(entry_map)
f.write(entry_str)
#for entry in category.entries:
#if (entry.date >= start_date) and (entry.date <= end_date):
#f.write(entry.wrapped_html_string)
closing_wrapper = """</body>
</html>"""
f.write(closing_wrapper)
| 5,339,024
|
def send_mail(user,server):
#def send_mail():
"""Sending the mails by voice 3 arguments required
1.to_address
2.subject
3.Message
these all info you can give by voice"""
fromaddr = '' #your email_id from this it can send the mails
tolist = to_addrs()
#print tolist
sub = subj()
#print sub
body1 = body()
#print body1
msg = email.MIMEMultipart.MIMEMultipart()
msg['From'] = fromaddr
msg['To'] = email.Utils.COMMASPACE.join(tolist)
msg['Subject'] = sub
msg.attach(MIMEText(body1))
msg.attach(MIMEText('\n\n\nsent via JarvisPython', 'plain'))
server.sendmail(user,tolist,msg.as_string())
voiceOut.Speak('message is sent to respected person thank you')
| 5,339,025
|
def clean_acl(name, value):
"""
Returns a cleaned ACL header value, validating that it meets the formatting
requirements for standard Swift ACL strings.
The ACL format is::
[item[,item...]]
Each item can be a group name to give access to or a referrer designation
to grant or deny based on the HTTP Referer header.
The referrer designation format is::
.r:[-]value
The ``.r`` can also be ``.ref``, ``.referer``, or ``.referrer``; though it
will be shortened to just ``.r`` for decreased character count usage.
The value can be ``*`` to specify any referrer host is allowed access, a
specific host name like ``www.example.com``, or if it has a leading period
``.`` or leading ``*.`` it is a domain name specification, like
``.example.com`` or ``*.example.com``. The leading minus sign ``-``
indicates referrer hosts that should be denied access.
Referrer access is applied in the order they are specified. For example,
.r:.example.com,.r:-thief.example.com would allow all hosts ending with
.example.com except for the specific host thief.example.com.
Example valid ACLs::
.r:*
.r:*,.r:-.thief.com
.r:*,.r:.example.com,.r:-thief.example.com
.r:*,.r:-.thief.com,bobs_account,sues_account:sue
bobs_account,sues_account:sue
Example invalid ACLs::
.r:
.r:-
By default, allowing read access via .r will not allow listing objects in
the container -- just retrieving objects from the container. To turn on
listings, use the .rlistings directive.
Also, .r designations aren't allowed in headers whose names include the
word 'write'.
ACLs that are "messy" will be cleaned up. Examples:
====================== ======================
Original Cleaned
---------------------- ----------------------
``bob, sue`` ``bob,sue``
``bob , sue`` ``bob,sue``
``bob,,,sue`` ``bob,sue``
``.referrer : *`` ``.r:*``
``.ref:*.example.com`` ``.r:.example.com``
``.r:*, .rlistings`` ``.r:*,.rlistings``
====================== ======================
:param name: The name of the header being cleaned, such as X-Container-Read
or X-Container-Write.
:param value: The value of the header being cleaned.
:returns: The value, cleaned of extraneous formatting.
:raises ValueError: If the value does not meet the ACL formatting
requirements; the error message will indicate why.
"""
name = name.lower()
values = []
for raw_value in value.split(','):
raw_value = raw_value.strip()
if not raw_value:
continue
if ':' not in raw_value:
values.append(raw_value)
continue
first, second = (v.strip() for v in raw_value.split(':', 1))
if not first or not first.startswith('.'):
values.append(raw_value)
elif first in ('.r', '.ref', '.referer', '.referrer'):
if 'write' in name:
raise ValueError('Referrers not allowed in write ACL: '
'%s' % repr(raw_value))
negate = False
if second and second.startswith('-'):
negate = True
second = second[1:].strip()
if second and second != '*' and second.startswith('*'):
second = second[1:].strip()
if not second or second == '.':
raise ValueError('No host/domain value after referrer '
'designation in ACL: %s' % repr(raw_value))
values.append('.r:%s%s' % ('-' if negate else '', second))
else:
raise ValueError('Unknown designator %s in ACL: %s' %
(repr(first), repr(raw_value)))
return ','.join(values)
| 5,339,026
|
def test(X, Y, perms=10000, method="pearson", tail="two-tail", ignore_nans=False):
"""
Takes two distance matrices (either redundant matrices or condensed vectors)
and performs a Mantel test. The Mantel test is a significance test of the
correlation between two distance matrices.
Parameters
----------
X : array_like
First distance matrix (condensed or redundant).
Y : array_like
Second distance matrix (condensed or redundant), where the order of
elements corresponds to the order of elements in the first matrix.
perms : int, optional
The number of permutations to perform (default: 10000). A larger
number gives more reliable results but takes longer to run. If the
number of possible permutations is smaller, all permutations will
be tested. This can be forced by setting perms to 0.
method : str, optional
Type of correlation coefficient to use; either 'pearson' or 'spearman'
(default: 'pearson').
tail : str, optional
Which tail to test in the calculation of the empirical p-value; either
'upper', 'lower', or 'two-tail' (default: 'two-tail').
ignore_nans : bool, optional
Ignore NaN values in the Y matrix (default: False). This can be
useful if you have missing values in one of the matrices.
Returns
-------
r : float
Veridical correlation
p : float
Empirical p-value
z : float
Standard score (z-score)
"""
# Ensure that X and Y are represented as Numpy arrays.
X = np.asarray(X)
Y = np.asarray(Y)
# Check that X and Y are valid distance matrices.
if (
spatial.distance.is_valid_dm(np.nan_to_num(X)) == False
and spatial.distance.is_valid_y(X) == False
):
raise ValueError("X is not a valid condensed or redundant distance matrix")
if (
spatial.distance.is_valid_dm(np.nan_to_num(Y)) == False
and spatial.distance.is_valid_y(Y) == False
):
raise ValueError("Y is not a valid condensed or redundant distance matrix")
# If X or Y is a redundant distance matrix, reduce it to a condensed distance matrix.
if len(X.shape) == 2:
X = spatial.distance.squareform(X, force="tovector", checks=False)
if len(Y.shape) == 2:
Y = spatial.distance.squareform(Y, force="tovector", checks=False)
# Check for size equality.
if len(X) != len(Y):
raise ValueError("X and Y are not of equal size")
# Check for minimum size.
if len(X) < 3:
raise ValueError("X and Y should represent at least 3 objects")
# Check finiteness of X and Y
if not np.isfinite(X).all():
raise ValueError(
"X cannot contain NaNs (but Y may contain NaNs, so consider reordering X and Y)"
)
finite_Y = np.isfinite(Y)
if not ignore_nans and not finite_Y.all():
raise ValueError('Y may contain NaNs, but "ignore_nans" must be set to True')
if ignore_nans and finite_Y.all():
ignore_nans = False # ignore_nans is True but Y contains no nans
# If Spearman correlation is requested, convert X and Y to ranks.
method = method.lower()
if method == "spearman":
X, Y = stats.rankdata(X), stats.rankdata(Y)
Y[~finite_Y] = np.nan # retain any nans, so that these can be ignored later
# Check for valid method parameter.
elif method != "pearson":
raise ValueError('The method should be set to "pearson" or "spearman"')
# Check for valid tail parameter.
tail = tail.lower()
if tail not in ["upper", "lower", "two-tail"]:
raise ValueError('The tail should be set to "upper", "lower", or "two-tail"')
# Now we're ready to start the Mantel test using a number of optimizations:
#
# 1. Rather than compute correlation coefficients, we'll just compute the
# covariances. This works because the denominator in the equation for the
# correlation coefficient will yield the same result however the objects
# are permuted, making it redundant. Removing the denominator leaves us
# with the covariance.
#
# 2. Rather than permute the Y distances and derive the residuals to calculate
# the covariance with the X distances, we'll represent the Y residuals in
# the matrix and shuffle those directly.
#
# 3. If the number of possible permutations is less than the number of
# permutations that were requested, we'll run a deterministic test where
# we try all possible permutations rather than sample the permutation
# space. This gives a faster, deterministic result.
# Calculate the X and Y residuals, which will be used to compute the
# covariance under each permutation.
X_residuals = X - np.mean(X[finite_Y])
Y_residuals = Y - np.mean(Y[finite_Y])
# Expand the Y residuals to a redundant matrix.
Y_residuals_as_matrix = spatial.distance.squareform(
Y_residuals, force="tomatrix", checks=False
)
m = len(Y_residuals_as_matrix) # number of objects
n = np.math.factorial(m) # number of possible matrix permutations
# If the number of requested permutations is greater than the number of
# possible permutations (m!) or the perms parameter is set to 0, then run a
# deterministic Mantel test
if perms >= n or perms == 0:
if ignore_nans:
correlations = deterministic_test_with_nans(m, n, X, Y_residuals_as_matrix)
else:
correlations = deterministic_test(m, n, X_residuals, Y_residuals_as_matrix)
# correlations[0] is the veridical correlation
else:
if ignore_nans:
correlations = stochastic_test_with_nans(m, perms, X, Y_residuals_as_matrix)
else:
correlations = stochastic_test(m, perms, X_residuals, Y_residuals_as_matrix)
correlations[0] = sum(X_residuals[finite_Y] * Y_residuals[finite_Y]) / np.sqrt(
sum(X_residuals[finite_Y] ** 2) * sum(Y_residuals[finite_Y] ** 2)
) # compute veridical correlation and place in positon 0
r = correlations[0]
if tail == "upper":
p = sum(correlations >= r) / len(correlations)
elif tail == "lower":
p = sum(correlations <= r) / len(correlations)
elif tail == "two-tail":
p = sum(abs(correlations) >= abs(r)) / len(correlations)
z = (r - np.mean(correlations)) / np.std(correlations)
return r, p, z
| 5,339,027
|
def get_format_datestr(date_str, to_format='%Y-%m-%d'):
"""
Args:
date_str (str): ''
to_format (str): '%Y-%m-%d'
Returns:
date string (str)
"""
date_obj = parser.parse(date_str).date()
return date_obj.strftime(to_format)
| 5,339,028
|
def lor(*goalconsts):
""" Logical or for goal constructors
>>> from logpy.arith import lor, eq, gt
>>> gte = lor(eq, gt) # greater than or equal to is `eq or gt`
"""
def goal(*args):
return lany(*[gc(*args) for gc in goalconsts])
return goal
| 5,339,029
|
def kalman_smoother(Z, M_inv, plotting=False):
"""
X: state
U: control
Z: observation (position and forces)
F: state transition model
B: control input model
Q: process variance
R: observation variance
"""
t_steps = Z.shape[0]
x0 = np.r_[Z[0,0:6],
np.zeros(4*6)]
P0 = np.eye(5*6)
P0[0*6:1*6,0*6:1*6] *= 0.005 # small
P0[1*6:2*6,1*6:2*6] *= 0 # small
P0[2*6:3*6,2*6:3*6] *= 0 # small
P0[3*6:4*6,3*6:4*6] *= 100 # medium # TODO
P0[4*6:5*6,4*6:5*6] *= 1 # high # TODO
# transition matrix
F = np.zeros((t_steps-1, 5*6, 5*6))
# observation matrix
H = np.r_[np.c_[ np.eye(6), np.zeros((6,6)), np.zeros((6,6)), np.zeros((6,6)), np.zeros((6,6))],
np.c_[np.zeros((6,6)), np.zeros((6,6)), np.zeros((6,6)), np.zeros((6,6)), np.eye(6)]]
for t in range(t_steps-1):
F[t] = np.r_[np.c_[ np.eye(6), DT*np.eye(6), (DT**2)*np.eye(6), np.zeros((6,6)), np.zeros((6,6))],
np.c_[np.zeros((6,6)), np.eye(6), DT*np.eye(6), np.zeros((6,6)), np.zeros((6,6))],
np.c_[np.zeros((6,6)), np.zeros((6,6)), np.zeros((6,6)), M_inv[t], M_inv[t]],
np.c_[np.zeros((6,6)), np.zeros((6,6)), np.zeros((6,6)), np.eye(6), np.zeros((6,6))],
np.c_[np.zeros((6,6)), np.zeros((6,6)), np.zeros((6,6)), np.zeros((6,6)), np.eye(6)]]
# transition covariance
Q = np.eye(5*6)
Q[0*6:1*6,0*6:1*6] *= 0.005 # small
Q[1*6:2*6,1*6:2*6] *= (0.005/DT) # small
Q[2*6:3*6,2*6:3*6] *= (1/(DT*DT)) # small
Q[3*6:4*6,3*6:4*6] *= 100 # medium # TODO
Q[4*6:5*6,4*6:5*6] *= 1 # high # TODO
# observation covariance
R = np.eye(2*6)
R[0*6:1*6,0*6:1*6] *= 0.005
R[1*6:2*6,1*6:2*6] *= 5 # TODO
def em_transition_matrix(transition_offsets, smoothed_state_means,
smoothed_state_covariances, pairwise_covariances):
res = F.copy()
n_timesteps, n_dim_state, _ = smoothed_state_covariances.shape
print "em_transition_matrix"
import time
time_start = time.time()
for tt in range(1, n_timesteps):
if tt % 100 == 0:
print tt
t_start = np.clip(tt-500, 1, n_timesteps)
t_end = np.clip(tt+500+1, 1, n_timesteps)
res1 = np.zeros((n_dim_state, n_dim_state))
res2 = np.zeros((n_dim_state, n_dim_state))
ws = np.exp(-((np.arange(t_start, t_end)-tt)**2)/(200.0**2))
ws /= ws.sum()
for t, w in zip(range(t_start, t_end), ws):
transition_offset = _last_dims(transition_offsets, t - 1, ndims=1)
res1 += w * (
pairwise_covariances[t]
+ np.outer(smoothed_state_means[t],
smoothed_state_means[t - 1])
- np.outer(transition_offset, smoothed_state_means[t - 1])
)
res2 += w * (
smoothed_state_covariances[t - 1]
+ np.outer(smoothed_state_means[t - 1],
smoothed_state_means[t - 1])
)
# res[tt-1] = np.linalg.solve(res2.T, res1.T).T
# M_inv = np.linalg.solve((res2[0*6:1*6,:] + res2[1*6:2*6,:]).T, res1[2*6:3*6,:].T)
F_tmp = np.dot(res1, np.linalg.pinv(res2))
m_inv0 = F_tmp[2*6:3*6,3*6:4*6]
m_inv1 = F_tmp[2*6:3*6,4*6:5*6]
m_inv = (m_inv0 + m_inv1) / 2.
res[tt-1,2*6:3*6,3*6:4*6] = m_inv
res[tt-1,2*6:3*6,4*6:5*6] = m_inv
print "time", time.time() - time_start
return res
kf = KalmanFilter(transition_matrices=F, observation_matrices=H, transition_covariance=Q, observation_covariance=R,
initial_state_mean=x0, initial_state_covariance=P0)
kf = kf.em(Z, n_iter=5, em_vars=['transition_covariance', 'observation_covariance'])
# kf = kf.em(Z, n_iter=5, em_vars=['transition_matrices'], em_transition_matrix=em_transition_matrix)
(X_smoothed, P_smoothed) = kf.smooth(Z)
if plotting:
plt.ion()
fig = plt.figure()
for i in range(5):
plt.subplot(5,2,2*i+1)
plt.plot(X_smoothed[:,6*i], 'r')
plt.plot(X_smoothed[:,6*i+1], 'g')
plt.plot(X_smoothed[:,6*i+2], 'b')
plt.subplot(5,2,2*i+1+1)
plt.plot(X_smoothed[:,6*i+3], 'r')
plt.plot(X_smoothed[:,6*i+4], 'g')
plt.plot(X_smoothed[:,6*i+5], 'b')
plt.draw()
fig = plt.figure()
for i in range(2):
plt.subplot(2,2,2*i+1)
plt.plot(Z[:,6*i], 'r')
plt.plot(Z[:,6*i+1], 'g')
plt.plot(Z[:,6*i+2], 'b')
plt.subplot(2,2,2*i+1+1)
plt.plot(Z[:,6*i+3], 'r')
plt.plot(Z[:,6*i+4], 'g')
plt.plot(Z[:,6*i+5], 'b')
plt.draw()
return X_smoothed
| 5,339,030
|
def create_barplot(ax, relevances, y_pred, x_lim=1.1, title='', x_label='', concept_names=None, **kwargs):
"""Creates a bar plot of relevances.
Parameters
----------
ax : pyplot axes object
The axes on which the bar plot should be created.
relevances: torch.tensor
The relevances for which the bar plot should be generated. shape: (1, NUM_CONCEPTS, NUM_CLASSES)
y_pred: torch.tensor (int)
The prediction of the model for the corresponding relevances. shape: scalar value
x_lim: float
the limits of the plot
title: str
the title of the plot
x_label: str
the label of the X-axis of the plot
concept_names: list[str]
the names of each feature on the plot
"""
# Example data
y_pred = y_pred.item()
if len(relevances.squeeze().size()) == 2:
relevances = relevances[:, y_pred]
relevances = relevances.squeeze()
if concept_names is None:
concept_names = ['C. {}'.format(i + 1) for i in range(len(relevances))]
else:
concept_names = concept_names.copy()
concept_names.reverse()
y_pos = np.arange(len(concept_names))
colors = ['b' if r > 0 else 'r' for r in relevances]
colors.reverse()
ax.barh(y_pos, np.flip(relevances.detach().cpu().numpy()), align='center', color=colors)
ax.set_yticks(y_pos)
ax.set_yticklabels(concept_names)
ax.set_xlim(-x_lim, x_lim)
ax.set_xlabel(x_label, fontsize=18)
ax.set_title(title, fontsize=18)
| 5,339,031
|
def deploy():
"""Run deployment tasks."""
from flask_migrate import init, migrate, upgrade
# migrate database to latest revision
try: init()
except: pass
migrate()
upgrade()
| 5,339,032
|
def get_convex_hull(coords, dim = 2, needs_at_least_n_points = 6): #FIXME restrict only for 2D?
"""
For fitting an ellipse, at least 6 points are needed
Parameters
----------
coords : 2D np.array of points
dim : dimensions to keep when calculating convex hull
Returns
---------
coords_hull : 2D np.array of points
keeps original number of dimension as input coords
"""
assert len(coords[0]) >= dim
hull = ConvexHull([i[:dim] for i in coords])
coords_hull = [coords[i] for i in range(len(coords)) if i in hull.vertices]
for i in range(needs_at_least_n_points - len(hull.vertices)):
coords_hull.append(0.9999 * coords_hull[i]) #making the point slightly different
coords_hull = np.array(coords_hull)
return coords_hull
| 5,339,033
|
def sk_page(
name,
html_file,
ts_entry_point,
scss_entry_point = None,
ts_deps = [],
sass_deps = [],
sk_element_deps = [],
assets_serving_path = "/",
nonce = None):
"""Builds a static HTML page, and its CSS and JavaScript development and production bundles.
This macro generates the following files, where <name> is the given target name:
development/<name>.html
development/<name>.js
development/<name>.css
production/<name>.html
production/<name>.js
production/<name>.css
The <name> target defined by this macro generates all of the above files.
Tags <script> and <link> will be inserted into the output HTML pointing to the generated
bundles. The serving path for said bundles defaults to "/" and can be overridden via the
assets_serving_path argument.
A timestamp will be appended to the URLs for any referenced assets for cache busting purposes,
e.g. <script src="/index.js?v=27396986"></script>.
If the nonce argument is provided, a nonce attribute will be inserted to all <link> and <script>
tags. For example, if the nonce argument is set to "{% .Nonce %}", then the generated HTML will
contain tags such as <script nonce="{% .Nonce %}" src="/index.js?v=27396986"></script>.
This macro is designed to work side by side with the existing Webpack build without requiring
any major changes to the pages in question.
Args:
name: The prefix used for the names of all the targets generated by this macro.
html_file: The page's HTML file.
ts_entry_point: TypeScript file used as the entry point for the JavaScript bundles.
scss_entry_point: Sass file used as the entry point for the CSS bundles.
ts_deps: Any ts_library dependencies.
sass_deps: Any sass_library dependencies.
sk_element_deps: Any sk_element dependencies. Equivalent to adding the ts_library and
sass_library of each sk_element to deps and sass_deps, respectively.
assets_serving_path: Path prefix for the inserted <script> and <link> tags.
nonce: If set, its contents will be added as a "nonce" attributes to any inserted <script> and
<link> tags.
"""
# Extend ts_deps and sass_deps with the ts_library and sass_library targets produced by each
# sk_element dependency in the sk_element_deps argument.
all_ts_deps = [dep for dep in ts_deps]
all_sass_deps = [dep for dep in sass_deps]
for sk_element_dep in sk_element_deps:
all_ts_deps.append(sk_element_dep)
all_sass_deps.append(make_label_target_explicit(sk_element_dep) + "_styles")
# Output directories.
DEV_OUT_DIR = "development"
PROD_OUT_DIR = "production"
#######################
# JavaScript bundles. #
#######################
ts_library(
name = "%s_ts_lib" % name,
srcs = [ts_entry_point],
deps = all_ts_deps,
)
# Generates file <name>_js_bundle.js. Intermediate result; do not use.
rollup_bundle(
name = "%s_js_bundle" % name,
deps = [
":%s_ts_lib" % name,
"@npm//@rollup/plugin-node-resolve",
"@npm//@rollup/plugin-commonjs",
"@npm//rollup-plugin-sourcemaps",
],
entry_point = ts_entry_point,
format = "umd",
config_file = "//infra-sk:rollup.config.js",
)
# Generates file <name>_js_bundle_minified.js. Intermediate result; do not use.
terser_minified(
name = "%s_js_bundle_minified" % name,
src = "%s_js_bundle.js" % name,
sourcemap = False,
)
# Generates file development/<name>.js.
copy_file(
name = "%s_js_dev" % name,
src = "%s_js_bundle.js" % name,
dst = "%s/%s.js" % (DEV_OUT_DIR, name),
visibility = ["//visibility:public"],
)
# Generates file production/<name>.js.
copy_file(
name = "%s_js_prod" % name,
# For some reason the output of the terser_minified rule above is not directly visible as a
# source file, so we use the rule name instead (i.e. we drop the ".js" extension).
src = "%s_js_bundle_minified" % name,
dst = "%s/%s.js" % (PROD_OUT_DIR, name),
visibility = ["//visibility:public"],
)
################
# CSS Bundles. #
################
# Generate a blank Sass entry-point file to appease the sass_library rule, if one is not given.
if not scss_entry_point:
scss_entry_point = name + "__generated_empty_scss_entry_point"
native.genrule(
name = scss_entry_point,
outs = [scss_entry_point + ".scss"],
cmd = "touch $@",
)
# Generate a Sass stylesheet with any elements-sk imports required by the TypeScript
# entry-point file.
generate_sass_stylesheet_with_elements_sk_imports_from_typescript_sources(
name = name + "_elements_sk_deps_scss",
ts_srcs = [ts_entry_point],
scss_output_file = name + "__generated_elements_sk_deps.scss",
)
# Create a sass_library including the scss_entry_point file, and all the Sass dependencies.
sass_library(
name = name + "_styles",
srcs = [
scss_entry_point,
name + "_elements_sk_deps_scss",
],
deps = all_sass_deps,
)
# Generate a "ghost" Sass entry-point stylesheet with import statements for the following
# files:
#
# - This page's Sass entry-point file (scss_entry_point argument).
# - The generated Sass stylesheet with elements-sk imports.
# - The "ghost" entry-point stylesheets of each sk_element in the sk_element_deps argument.
#
# We will use this generated stylesheet as the entry-points for the sass_binaries below.
generate_sass_stylesheet_with_imports(
name = name + "_ghost_entrypoint_scss",
scss_files_to_import = ([scss_entry_point] if scss_entry_point else []) +
[name + "_elements_sk_deps_scss"] +
[
make_label_target_explicit(dep) + "_ghost_entrypoint_scss"
for dep in sk_element_deps
],
scss_output_file = name + "__generated_ghost_entrypoint.scss",
)
# Notes:
# - Sass compilation errors are not visible unless "bazel build" is invoked with flag
# "--strategy=SassCompiler=sandboxed" (now set by default in //.bazelrc). This is due to a
# known issue with sass_binary. For more details please see
# https://github.com/bazelbuild/rules_sass/issues/96.
# Generates file development/<name>.css.
sass_binary(
name = "%s_css_dev" % name,
src = name + "_ghost_entrypoint_scss",
output_name = "%s/%s.css" % (DEV_OUT_DIR, name),
deps = [name + "_styles"],
include_paths = ["//node_modules"],
output_style = "expanded",
sourcemap = True,
sourcemap_embed_sources = True,
visibility = ["//visibility:public"],
)
# Generates file production/<name>.css.
sass_binary(
name = "%s_css_prod" % name,
src = name + "_ghost_entrypoint_scss",
output_name = "%s/%s.css" % (PROD_OUT_DIR, name),
deps = [name + "_styles"],
include_paths = ["//node_modules"],
output_style = "compressed",
sourcemap = False,
visibility = ["//visibility:public"],
)
###############
# HTML files. #
###############
# Generates file <name>.with_assets.html. Intermediate result; do not use.
#
# See https://www.npmjs.com/package/html-insert-assets.
html_insert_assets(
name = "%s_html" % name,
outs = ["%s.with_assets.html" % name],
args = [
"--html=$(location %s)" % html_file,
"--out=$@",
"--roots=$(RULEDIR)",
"--assets",
# This is OK because html-insert-assets normalizes paths with successive slashes.
"%s/%s.js" % (assets_serving_path, name),
"%s/%s.css" % (assets_serving_path, name),
],
data = [
html_file,
# This rule does not use the bundles directly, but by declaring them as dependencies via
# the "data" argument, we guarantee that Bazel will rebuild <name>.with_assets.html any
# time the bundles change. This refreshes the asset URL query parameters added by this
# rule for cache busting purposes.
"%s_js_dev" % name,
"%s_js_prod" % name,
"%s_css_dev" % name,
"%s_css_prod" % name,
],
)
if nonce:
# Generates file <name>.with_assets_and_nonce.html. Intermediate result; do not use.
html_insert_nonce_attribute(
name = "%s_html_nonce" % name,
src = "%s.with_assets.html" % name,
out = "%s.with_assets_and_nonce.html" % name,
nonce = nonce,
)
instrumented_html = ("%s.with_assets_and_nonce.html" if nonce else "%s.with_assets.html") % name
# Generates file development/<name>.html.
copy_file(
name = "%s_html_dev" % name,
src = instrumented_html,
dst = "%s/%s.html" % (DEV_OUT_DIR, name),
visibility = ["//visibility:public"],
)
# Generates file production/<name>.html.
copy_file(
name = "%s_html_prod" % name,
src = instrumented_html,
dst = "%s/%s.html" % (PROD_OUT_DIR, name),
visibility = ["//visibility:public"],
)
###########################
# Convenience filegroups. #
###########################
# Generates all output files (that is, the development and production bundles).
native.filegroup(
name = name,
srcs = [
":%s_dev" % name,
":%s_prod" % name,
],
visibility = ["//visibility:public"],
)
# Generates the development bundle.
native.filegroup(
name = "%s_dev" % name,
srcs = [
"development/%s.html" % name,
"development/%s.js" % name,
"development/%s.css" % name,
"development/%s.css.map" % name,
],
visibility = ["//visibility:public"],
)
# Generates the production bundle.
native.filegroup(
name = "%s_prod" % name,
srcs = [
"production/%s.html" % name,
"production/%s.js" % name,
"production/%s.css" % name,
],
visibility = ["//visibility:public"],
)
| 5,339,034
|
def score(input,
index,
output=None,
scoring="+U,+u,-s,-t,+1,-i,-a",
filter=None, # "1,2,25"
quality=None,
compress=False,
threads=1,
raw=False,
remove_existing=False):
"""Score the input. In addition, you can specify a tuple with (<score_strata_to_keep>,<max_strata_distance>,<max_alignments>) to
filter the result further.
"""
if compress and output is None:
logging.warning("Disabeling stream compression")
compress = False
if compress and not output.endswith(".gz"):
output += ".gz"
quality = _prepare_quality_parameter(quality)
if quality in ['none', 'ignore']:
quality = 'offset-33'
index = _prepare_index_parameter(index, gem_suffix=True)
score_p = [executables['gem-2-gem'],
'-I', index,
'-q', quality,
'-s', scoring,
'-T', str(threads)
]
if filter is not None:
score_p.append("-f")
ff = filter
if not isinstance(filter, basestring):
ff = ",".join([str(f) for f in filter])
score_p.append(ff)
if raw or isinstance(input, gt.InputFile):
raw = True
if isinstance(input, gt.InputFile) and remove_existing:
input.remove_scores = True
raw = False
#input = input.raw_stream()
tools = [score_p]
if compress:
gzip = _compressor(threads=threads)
tools.append(gzip)
process = utils.run_tools(tools, input=input, output=output, name="GEM-Score", write_map=True, raw=raw)
return _prepare_output(process, output=output)
| 5,339,035
|
def keyword_search(queryset: QuerySet, keywords: str) -> QuerySet:
"""
Performs a keyword search over a QuerySet
Uses PostgreSQL's full text search features
Args:
queryset (QuerySet): A QuerySet to be searched
keywords (str): A string of keywords to search the QuerySet
Returns:
QuerySet: A QuerySet filtered by keywords
"""
query = SearchQuery(keywords)
rank_annotation = SearchRank(F("search_vector"), query)
filtered_queryset = (
queryset.annotate(rank=rank_annotation)
.filter(search_vector=query)
.order_by("-rank")
)
return filtered_queryset
| 5,339,036
|
def classification_loss(hidden, labels, n_class, initializer, name, reuse=None,
return_logits=False):
"""
Different classification tasks should use different scope names to ensure
different dense layers (parameters) are used to produce the logits.
An exception will be in transfer learning, where one hopes to transfer
the classification weights.
"""
logits = fluid.layers.fc(
input=hidden,
size=n_class,
param_attr=fluid.ParamAttr(name=name+'_logits', initializer=initializer))
one_hot_target = fluid.layers.one_hot(labels, depth=n_class, dtype=hidden.dtype)
loss = -fuid.layers.reduce_sum(fluid.layers.log_softmax(logits) * one_hot_target, -1)
if return_logits:
return loss, logits
return loss
| 5,339,037
|
def do_pdfimages(pdf_file, state, page_number=None, use_tmp_identifier=True):
"""Convert a PDF file to images in the TIFF format.
:param pdf_file: The input file.
:type pdf_file: jfscripts._utils.FilePath
:param state: The state object.
:type state: jfscripts.pdf_compress.State
:param int page_number: Extract only the page with a specific page number.
:return: The return value of `subprocess.run`.
:rtype: subprocess.CompletedProcess
"""
if use_tmp_identifier:
image_root = '{}_{}'.format(pdf_file.basename, tmp_identifier)
else:
image_root = pdf_file.basename
command = ['pdfimages', '-tiff', str(pdf_file), image_root]
if page_number:
page_number = str(page_number)
page_segments = ['-f', page_number, '-l', page_number]
command = command[:2] + page_segments + command[2:]
return run.run(command, cwd=state.common_path)
| 5,339,038
|
def git_patch_tracked(path: Path) -> str:
""" Generate a patchfile of the diff for all tracked files in the repo
This function catches all exceptions to make it safe to call at the end of
dataset creation or model training
Args:
path (Path): path to a directory inside a git repo. Unless you have a reason
not to, this should be the root of the repo for maximum coverage
Returns:
str: patchfile for tracked files, or error message if unable to excecute cmd
"""
# store cwd and change to path
cwd = Path.cwd()
chdir(path)
# execute command
tracked_patch = "git --no-pager diff -u ."
try:
out = subprocess.check_output(tracked_patch.split()).decode('utf-8')
except Exception as e:
out = str(e)
# restore cwd
chdir(cwd)
return out
| 5,339,039
|
def connect(transport=None, host='localhost', username='admin',
password='', port=None, key_file=None, cert_file=None,
ca_file=None, timeout=60, return_node=False, **kwargs):
""" Creates a connection using the supplied settings
This function will create a connection to an Arista EOS node using
the arguments. All arguments are optional with default values.
Args:
transport (str): Specifies the type of connection transport to use.
Valid values for the connection are socket, http_local, http, and
https. The default value is specified in DEFAULT_TRANSPORT
host (str): The IP addres or DNS host name of the connection device.
The default value is 'localhost'
username (str): The username to pass to the device to authenticate
the eAPI connection. The default value is 'admin'
password (str): The password to pass to the device to authenticate
the eAPI connection. The default value is ''
port (int): The TCP port of the endpoint for the eAPI connection. If
this keyword is not specified, the default value is automatically
determined by the transport type. (http=80, https=443)
key_file (str): Path to private key file for ssl validation
cert_file (str): Path to PEM formatted cert file for ssl validation
ca_file (str): Path to CA PEM formatted cert file for ssl validation
timeout (int): timeout
return_node (bool): Returns a Node object if True, otherwise
returns an EapiConnection object.
Returns:
An instance of an EapiConnection object for the specified transport.
"""
transport = transport or DEFAULT_TRANSPORT
connection = make_connection(transport, host=host, username=username,
password=password, key_file=key_file,
cert_file=cert_file, ca_file=ca_file,
port=port, timeout=timeout)
if return_node:
return Node(connection, transport=transport, host=host,
username=username, password=password, key_file=key_file,
cert_file=cert_file, ca_file=ca_file, port=port, **kwargs)
return connection
| 5,339,040
|
def cov_hc2(results):
"""
See statsmodels.RegressionResults
"""
# probably could be optimized
h = np.diag(np.dot(results.model.exog,
np.dot(results.normalized_cov_params,
results.model.exog.T)))
het_scale = results.resid**2/(1-h)
cov_hc2_ = _HCCM(results, het_scale)
return cov_hc2_
| 5,339,041
|
def add_people():
"""
Show add form
"""
if request.method == 'POST':
#save data to database
db_conn = get_connection()
cur = db_conn.cursor()
print ('>'*10, request.form)
firstname = request.form['first-name']
lastname = request.form['last-name']
address = request.form['address']
country = request.form['country']
# if firstname is not empty, insert into table:
if firstname.strip():
_add_sql = '''
INSERT INTO peoples(firstname, lastname, address, country)
VALUES(?,?,?,?)
'''
cur.execute(_add_sql, (firstname.strip(),
lastname.strip(), address.strip(), country.strip()
))
db_conn.commit()
#redirect to list page
return redirect(url_for('list_people'))
else:
#redirect to add page with error
return redirect(url_for('add_people'))
return render_template('add.jinja2')
| 5,339,042
|
def api_to_schema(api: "lightbus.Api") -> dict:
"""Produce a lightbus schema for the given API"""
schema = {"rpcs": {}, "events": {}}
if isinstance(api, type):
raise InvalidApiForSchemaCreation(
"An attempt was made to derive an API schema from a type/class, rather than "
"from an instance of an API. This is probably because you are passing an API "
"class to api_to_schema(), rather than an instance of the API class."
)
for member_name, member in inspect.getmembers(api):
if member_name.startswith("_"):
# Don't create schema from private methods
continue
if hasattr(Api, member_name):
# Don't create schema for methods defined on Api class
continue
if inspect.ismethod(member):
schema["rpcs"][member_name] = {
"parameters": make_rpc_parameter_schema(api.meta.name, member_name, method=member),
"response": make_response_schema(api.meta.name, member_name, method=member),
}
elif isinstance(member, Event):
schema["events"][member_name] = {
"parameters": make_event_parameter_schema(api.meta.name, member_name, event=member)
}
return schema
| 5,339,043
|
def sum2(u : SignalUserTemplate, initial_state=0):
"""Accumulative sum
Parameters
----------
u : SignalUserTemplate
the input signal
initial_state : float, SignalUserTemplate
the initial state
Returns
-------
SignalUserTemplate
the output signal of the filter
Details:
--------
The difference equation
y[k+1] = y[k] + u[k]
is evaluated. The return values are
y[k], y[k+1]
"""
y_k = dy.signal()
y_kp1 = y_k + u
y_k << dy.delay(y_kp1, initial_state=initial_state)
return y_k, y_kp1
| 5,339,044
|
def approve_report(id):
"""
Function to approve a report
"""
# Approve the vulnerability_document record
resource = s3db.resource("vulnerability_document", id=id, unapproved=True)
resource.approve()
# Read the record details
vdoc_table = db.vulnerability_document
record = db(vdoc_table.id == id).select(vdoc_table.document_type,
vdoc_table.doc_id,
vdoc_table.source_id,
limitby=(0, 1)).first()
# Approve the linked records
document_type = record.document_type
if document_type == "indicator":
tablename = "vulnerability_data"
table = s3db[tablename]
query = (table.source_id == record.source_id)
agg_function = "vulnerability_update_aggregates"
elif document_type == "demographic":
tablename = "stats_demographic_data"
table = s3db[tablename]
query = (table.source_id == record.source_id)
agg_function = "stats_demographic_update_aggregates"
elif document_type in ("map", "image"):
tablename = "doc_image"
query = (s3db[tablename].doc_id == record.doc_id)
elif document_type in ("vca", "other"):
tablename = "doc_document"
query = (s3db[tablename].doc_id == record.doc_id)
else:
current.log.error("Report not Approved as unknown type", document_type)
return False
resource = s3db.resource(tablename, filter=query, unapproved=True)
resource.approve()
if document_type in ("indicator", "demographic"):
# Rebuild the relevant aggregates
rows = resource.select(fields=["data_id",
"parameter_id",
"date",
"location_id",
"value"],
as_rows=True)
s3task.run_async(agg_function, vars = {"records": rows.json()})
return True
| 5,339,045
|
def sparse_column_multiply(E, a):
"""
Multiply each columns of the sparse matrix E by a scalar a
Parameters
----------
E: `np.array` or `sp.spmatrix`
a: `np.array`
A scalar vector.
Returns
-------
Rescaled sparse matrix
"""
ncol = E.shape[1]
if ncol != a.shape[0]:
logg.error("Dimension mismatch, multiplication failed")
return E
else:
w = ssp.lil_matrix((ncol, ncol))
w.setdiag(a)
return ssp.csr_matrix(E) * w
| 5,339,046
|
def soup_extract_enzymelinks(tabletag):
"""Extract all URLs for enzyme families from first table."""
return {link.string: link['href']
for link in tabletag.find_all("a", href=True)}
| 5,339,047
|
def choose(db_issue: Issue, db_user: User, pgroup_ids: [int], history: str, path: str) -> dict:
"""
Initialize the choose step for more than one premise in a discussion. Creates helper and returns a dictionary
containing several feedback options regarding this argument.
:param db_issue:
:param db_user:
:param pgroup_ids:
:param history:
:param path:
:return:
"""
LOG.debug("Entering choose function")
issue_dict = issue_helper.prepare_json_of_issue(db_issue, db_user)
disc_ui_locales = issue_dict['lang']
created_argument: Argument = DBDiscussionSession.query(Argument).filter(
Argument.premisegroup_uid == pgroup_ids[0]).one()
is_supportive = created_argument.is_supportive
conclusion_is_argument = created_argument.attacks is not None
if conclusion_is_argument:
conclusion = created_argument.attacks
else:
conclusion = created_argument.conclusion
_ddh = DiscussionDictHelper(disc_ui_locales, db_user.nickname, history, slug=db_issue.slug)
_idh = ItemDictHelper(disc_ui_locales, db_issue, path=path, history=history)
discussion_dict = _ddh.get_dict_for_choosing(conclusion.uid, conclusion_is_argument, is_supportive)
item_dict = _idh.get_array_for_choosing(conclusion.uid, pgroup_ids, conclusion_is_argument, is_supportive,
db_user.nickname)
return {
'issues': issue_dict,
'discussion': discussion_dict,
'items': item_dict,
'title': issue_dict['title']
}
| 5,339,048
|
def loops_NumbaJit_parallelFast(csm, r0, rm, kj):
""" This method implements the prange over the Gridpoints, which is a direct
implementation of the currently used c++ methods created with scipy.wave.
Very strange: Just like with Cython, this implementation (prange over Gridpoints)
produces wrong results. If one doesn't parallelize -> everything is good
(just like with Cython). Maybe Cython and Numba.jit use the same interpreter
to generate OpenMP-parallelizable code.
BUT: If one uncomments the 'steerVec' declaration in the prange-loop over the
gridpoints an error occurs. After commenting the line again and executing
the script once more, THE BEAMFORMER-RESULTS ARE CORRECT (for repeated tries).
Funny enough the method is now twice as slow in comparison to the
'wrong version' (before invoking the error).
"""
# init
nFreqs = csm.shape[0]
nGridPoints = len(r0)
nMics = csm.shape[1]
beamformOutput = np.zeros((nFreqs, nGridPoints), np.float64)
steerVec = np.zeros((nMics), np.complex128)
for cntFreqs in xrange(nFreqs):
kjj = kj[cntFreqs].imag
for cntGrid in prange(nGridPoints):
# steerVec = np.zeros((nMics), np.complex128) # This is the line that has to be uncommented (see this methods documentation comment)
rs = 0
r01 = r0[cntGrid]
for cntMics in xrange(nMics):
rm1 = rm[cntGrid, cntMics]
rs += 1.0 / (rm1**2)
temp3 = np.float32(kjj * (rm1 - r01))
steerVec[cntMics] = (np.cos(temp3) - 1j * np.sin(temp3)) * rm1
rs = r01 ** 2
temp1 = 0.0
for cntMics in xrange(nMics):
temp2 = 0.0
for cntMics2 in xrange(cntMics):
temp2 = temp2 + csm[cntFreqs, cntMics2, cntMics] * steerVec[cntMics2]
temp1 = temp1 + 2 * (temp2 * steerVec[cntMics].conjugate()).real
temp1 = temp1 + (csm[cntFreqs, cntMics, cntMics] * np.conjugate(steerVec[cntMics]) * steerVec[cntMics]).real
beamformOutput[cntFreqs, cntGrid] = (temp1 / rs).real
return beamformOutput
| 5,339,049
|
def sobel_vertical_gradient(image: numpy.ndarray) -> numpy.ndarray:
"""
Computes the Sobel gradient in the vertical direction.
Args:
image: A two dimensional array, representing the image from which the vertical gradient will be calculated.
Returns:
A two dimensional array, representing the vertical gradient of the image.
"""
ky = numpy.array([[1, 2, 1],
[0, 0, 0],
[-1, -2, -1]])
return scipy.ndimage.convolve(image, ky)
| 5,339,050
|
def custom_field_sum(issues, custom_field):
"""Sums custom field values together.
Args:
issues: List The issue list from the JQL query
custom_field: String The custom field to sum.
Returns:
Integer of the sum of all the found values of the custom_field.
"""
custom_field_running_total = 0
for issue in issues:
if getattr(issue.fields, custom_field) is None:
custom_field_running_total = custom_field_running_total + 2
else:
custom_field_running_total = custom_field_running_total + \
getattr(issue.fields, custom_field)
return custom_field_running_total
| 5,339,051
|
def routingAreaUpdateReject():
"""ROUTING AREA UPDATE REJECT Section 9.4.17"""
a = TpPd(pd=0x3)
b = MessageType(mesType=0xb) # 00001011
c = GmmCause()
d = ForceToStandbyAndSpareHalfOctets()
packet = a / b / c / d
return packet
| 5,339,052
|
def parse_file_list(file_list_file, in_img_data_dir, out_img_data_dir, write_txt = True):
"""
do the following:
1. put the list of image to outdir/file_list.txt, with name mapping
2. link the image from the original data directory to outdir/
"""
p = re.compile("\w+/[\w,\_]+/([\w,\_,.,\-,\+]+)")
new_img_list_file = []
for img_fn in file_list_file:
new_fn = p.match(img_fn).group(1)
new_img_list_file.append(new_fn)
# link the image files
for src, dst in zip(file_list_file, new_img_list_file):
src = os.path.join(os.getcwd(), in_img_data_dir, src)
dst = os.path.join(out_img_data_dir, dst)
assert os.path.exists(src), "the source file does not exist"
#print(src, dst)
os.symlink(src, dst)
# write the file list to the file
if write_txt:
dst = os.path.join(out_img_data_dir, "file_list.txt")
with open(dst, "w") as f:
for fn in new_img_list_file:
f.write(fn + "\n")
| 5,339,053
|
def compile_gyms_table():
""" supposed to run from /py directory """
os.chdir('../gyms')
compiled_gyms_table = ""
for file in filter(lambda _: os.path.splitext(_)[1] == '.json', os.listdir()):
gym = json.loads(open(file, 'r', encoding='utf-8').read())
assert gym['type'] == 'gym'
compiled_gyms_table += f''' <tr _cat="{gym['_cat']}" _grade="{gym['_grade']}" _diff="{gym['_diff']}" сlass=\"gymitem\">
<td class="gym-source">{gym['source']}</td>
<td class="gym-category">{LOCAL_CATEGORY_NAMES_UPPERCASE[gym['_cat']]}</td>
<td class="gym-topic">{gym['name']}:
<a href="/MathForces/gyms/{gym['pdf_link']}.pdf">[pdf]</a>,
<a href="/MathForces/gyms/{gym['id']}_t">[web]</a>,
<span style="color: gray; font-size: smaller;">{gym['_num_probs']} задач{'і' if 2 <= gym['_num_probs'] <= 4 else ''}</span></td>
<td class="gym-date">{gym['_date']}</td>
<td class="gym-grade">{gym['_grade']}</td>
<td class="gym-difficulty">{LOCAL_DIFFICULTY_NAMES_UPPERCASE[gym['_diff']]}</td>
</tr>
'''
# update index:
index_lines = open('index.html', 'r', encoding='utf-8').readlines()
table_body_start = index_lines.index(' <tbody id="gyms_table_body">\n')
table_body_end = index_lines.index(' </tbody>\n')
index_lines = index_lines[:table_body_start + 1] + [compiled_gyms_table, ] + index_lines[table_body_end:]
open('index.html', 'w', encoding='utf-8').writelines(index_lines)
os.chdir('../py')
| 5,339,054
|
def parse(complete_file_name, stop_at_line_number=None):
"""
:param complete_file_name:
:param stop_at_line_number: if None, parses the whole file, if not None, stops parsing line "stop_at_line_number"
:return: an iterator of ProteinRow
"""
c = 0
def tsv_iterator():
with open(complete_file_name) as file:
tsvin = csv.reader(file, delimiter='\t')
c = 0
for row in tsvin:
if stop_at_line_number is not None and c >= stop_at_line_number:
break
c += 1
if row[0] == '':
continue
# replace string "path" with list (higher level) :
row[0] = row[0].split(".")
# add line number at the end for debugging :
row.append(c)
yield row
for _, tree_rows in itertools.groupby(tsv_iterator(), lambda row: row[0][0]):
# root is always first
root_row = next(tree_rows)
# children follow
children_rows = list(tree_rows)
peptide_rows = []
psm_rows = []
for child_row in children_rows:
if len(child_row[0]) == 2:
peptide_rows.append(child_row)
elif len(child_row[0]) == 3:
psm_rows.append(child_row)
yield ProteinRow(root_row, [
PeptideRow(
peptide_row,
[ PSMRow(psm)
for psm in filter(lambda r: __array_is_strict_prefix_of(peptide_row[0], r[0]), psm_rows)
]
)
for peptide_row in peptide_rows
])
| 5,339,055
|
def doRipsFiltration(X, maxHomDim, thresh = -1, coeff = 2, getCocycles = False):
"""
Run ripser assuming Euclidean distance of a point cloud X
:param X: An N x d dimensional point cloud
:param maxHomDim: The dimension up to which to compute persistent homology
:param thresh: Threshold up to which to add edges. If not specified, add all
edges up to the full clique
:param coeff: A prime to use as the field coefficients for the PH computation
:param getCocycles: True if cocycles should be computed and returned
:return: PDs (array of all persistence diagrams from 0D up to maxHomDim).
Each persistence diagram is a numpy array
OR
tuple (PDs, Cocycles) if returning cocycles
"""
D = getSSM(X)
return doRipsFiltrationDM(D, maxHomDim, thresh, coeff, getCocycles)
| 5,339,056
|
def test_contest(gc: GetContestsDocument):
"""Tests contest document"""
contest = random.choice(gc.contests)
assert isinstance(contest, ContestDocument)
| 5,339,057
|
def build_target_areas(entry):
"""Cleanup the raw target areas description string"""
target_areas = []
areas = str(entry['cap:areaDesc']).split(';')
for area in areas:
target_areas.append(area.strip())
return target_areas
| 5,339,058
|
def convert_to_celcius(scale, temp):
"""Convert the specified temperature to Celcius scale.
:param int scale: The scale to convert to Celcius.
:param float temp: The temperature value to convert.
:returns: The temperature in degrees Celcius.
:rtype: float
"""
if scale == temp_scale.FARENHEIT:
return convert_farenheit_to_celcius(temp)
elif scale == temp_scale.CELCIUS:
return temp
elif scale == temp_scale.KELVIN:
return convert_kelvin_to_celcius(temp)
elif scale == temp_scale.RANKINE:
return convert_rankine_to_celcius(temp)
else:
return 0.0
| 5,339,059
|
def setMotor(id, speed):
""" Set a motor speed """
_controllers[id].set(speed)
| 5,339,060
|
def config_file_settings(request):
"""
Update file metadata settings
"""
if request.user.username != 'admin':
return redirect('project-admin:home')
if request.method == 'POST':
update_file_metadata(request.POST)
return redirect('project-admin:home')
files = FileMetaData.objects.all()
for file in files:
file.tags = file.get_tags()
return render(request, 'project_admin/config-file-settings.html',
context={"files": files})
| 5,339,061
|
def roundtrip(sender, receiver):
"""
Send datagrams from `sender` to `receiver` and back.
"""
return transfer(sender, receiver), transfer(receiver, sender)
| 5,339,062
|
def basic_takeoff(altitude):
"""
This function take-off the vehicle from the ground to the desired
altitude by using dronekit's simple_takeoff() function.
Inputs:
1. altitude - TakeOff Altitude
"""
vehicle.mode = VehicleMode("GUIDED")
vehicle.armed = True
time.sleep(2)
vehicle.simple_takeoff(altitude)
while True:
print("Reached Height = ", vehicle.location.global_relative_frame.alt)
if vehicle.location.global_relative_frame.alt >= (altitude - 1.5):
break
| 5,339,063
|
def match_demo():
"""
2. match()
传入要匹配的字符串以及正则表达式,就可以检测这个正则表达式是否匹配字符串。
match()方法会尝试从字符串的起始位置匹配正则表达式,如果匹配,就返回匹配成功
的结果;如果不匹配,就返回None
"""
content = 'Hello 123 4567 World_This is a Regex Demo'
# print(len(content))
result = re.match(r"^Hello\s\d\d\d\s\d{4}\s\w{10}", content)
# 使用.*号,.代表任意字符,*代表匹配前面的字符无限次
result2 = re.match(r"Hello.*Demo$", content)
# 非贪婪的方式获取数字.*?
result3 = re.match(r"He.*?(\d+)\s(\d+).*$", content)
print(result)
print(result.group())
print(result.span())
print(result2.group())
print(result3.group())
print(result3.group(1))
print(result3.group(2))
| 5,339,064
|
def loadHashDictionaries():
"""
Load dictionaries containing id -> hash and hash -> id mappings
These dictionaries are essential due to some restrictive properties
of the anserini repository
Return both dictionaries
"""
with open(PATH + PATH_ID_TO_HASH, "r") as f:
id_to_hash_dict = json.load(f)
with open(PATH + PATH_HASH_TO_ID, "r") as f:
hash_to_id_dict = json.load(f)
return id_to_hash_dict, hash_to_id_dict
| 5,339,065
|
def preprocess(tensor_dict, preprocess_options, func_arg_map=None):
"""Preprocess images and bounding boxes.
Various types of preprocessing (to be implemented) based on the
preprocess_options dictionary e.g. "crop image" (affects image and possibly
boxes), "white balance image" (affects only image), etc. If self._options
is None, no preprocessing is done.
Args:
tensor_dict: dictionary that contains images, boxes, and can contain other
things as well.
images-> rank 4 float32 tensor contains
1 image -> [1, height, width, 3].
with pixel values varying between [0, 1]
boxes-> rank 2 float32 tensor containing
the bounding boxes -> [N, 4].
Boxes are in normalized form meaning
their coordinates vary between [0, 1].
Each row is in the form
of [ymin, xmin, ymax, xmax].
preprocess_options: It is a list of tuples, where each tuple contains a
function and a dictionary that contains arguments and
their values.
func_arg_map: mapping from preprocessing functions to arguments that they
expect to receive and return.
Returns:
tensor_dict: which contains the preprocessed images, bounding boxes, etc.
Raises:
ValueError: (a) If the functions passed to Preprocess
are not in func_arg_map.
(b) If the arguments that a function needs
do not exist in tensor_dict.
(c) If image in tensor_dict is not rank 4
"""
if func_arg_map is None:
func_arg_map = get_default_func_arg_map()
# changes the images to image (rank 4 to rank 3) since the functions
# receive rank 3 tensor for image
if fields.InputDataFields.image in tensor_dict:
image = tensor_dict[fields.InputDataFields.image]
# if len(images.get_shape()) != 4:
# raise ValueError('images in tensor_dict should be rank 4')
# image = tf.squeeze(images, squeeze_dims=[0])
if len(image.get_shape()) != 3:
raise ValueError('images in tensor_dict should be rank 3')
tensor_dict[fields.InputDataFields.image] = image
# Preprocess inputs based on preprocess_options
for option in preprocess_options:
func, params = option
if func not in func_arg_map:
raise ValueError('The function %s does not exist in func_arg_map' %
(func.__name__))
arg_names = func_arg_map[func]
for a in arg_names:
if a is not None and a not in tensor_dict:
raise ValueError('The function %s requires argument %s' %
(func.__name__, a))
def get_arg(key):
return tensor_dict[key] if key is not None else None
args = [get_arg(a) for a in arg_names]
results = func(*args, **params)
if not isinstance(results, (list, tuple)):
results = (results,)
# Removes None args since the return values will not contain those.
arg_names = [arg_name for arg_name in arg_names if arg_name is not None]
for res, arg_name in zip(results, arg_names):
tensor_dict[arg_name] = res
# # changes the image to images (rank 3 to rank 4) to be compatible to what
# # we received in the first place
# if fields.InputDataFields.image in tensor_dict:
# image = tensor_dict[fields.InputDataFields.image]
# images = tf.expand_dims(image, 0)
# tensor_dict[fields.InputDataFields.image] = images
return tensor_dict
| 5,339,066
|
def update_comment(id):
"""修改单条评论"""
comment = Comment.query.get_or_404(id)
if g.current_user != comment.author and not g.current_user.can(Permission.COMMENT):
return error_response(403)
data = request.get_json()
if not data:
return bad_request('You must put JSON data.')
comment.from_dict(data)
db.session.commit()
return jsonify(comment.to_dict())
| 5,339,067
|
def resnet18(num_classes, pretrained=False, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
encoder = ResNetEncoder(BasicBlock, [2, 2, 2, 2])
if pretrained:
encoder.load_state_dict(model_zoo.load_url(model_urls['resnet18'], model_dir='models'), strict=False)
model = RetinaNet(encoder=encoder, num_classes=num_classes)
return model
| 5,339,068
|
def display_page(pathname):
"""displays dash page"""
if pathname == '/':
return main.layout
elif pathname == '/explore':
return explore.layout
elif pathname == '/eval':
return eval.layout
elif pathname == '/train':
return train.layout
else:
return html.Div(dbc.Col(dbc.Jumbotron(
[
html.H1("404: Not found", className="text-danger"),
html.Hr(),
html.P(f"The pathname {pathname} was not recognized..."),
]
), width = 9), style = CONTENT_STYLE
)
| 5,339,069
|
def getFilterDict(args):
"""
Function: An entire function just to notify the user of the arguments they've passed to the script? Seems reasonable.
Called from: main
"""
## Set variables for organization; this can be probably be removed later
outText = {}
outAction = ""
userString = ""
ipString = ""
ctryString = ""
domainString = ""
evntString = ""
## Split the modifierData variable if user passed multiple values in a comma-separated list
if args.modifierData:
modData = args.modifierData.split(",")
## Set analysis type as one of the three main functions available
if args.lIPs:
outAction = "Analysis Type: IP dump"
elif args.topNum:
outAction = "Analysis Type: Log summary"
else:
outAction = "Analysis Type: Detailed Analysis"
## Determine if results will be filtered or excluded by user & create output string. Note
## that usernames passed in DOMAIN\USERNAME format will need to be converted back to a
## single backslash (\) where the user escaped command input with a double backslash (\\)
try:
if args.filterType.lower() == "user" or args.excludeType.lower() == "user":
for i in range(0,(len(modData))):
if userString == "":
userString = modData[i].replace("\\\\","\\")
else:
userString = userString + ", " + modData[i].replace("\\\\","\\")
if args.filterType:
userString = " Users - Only " + userString
else:
userString = " Users - All except " + userString
except:
pass
## Determine if results will be filtered or excluded by IP address & create output string
try:
if args.filterType.lower() == "ip" or args.excludeType.lower() == "ip":
for i in range(0,(len(modData))):
if ipString == "":
ipString = modData[i]
else:
ipString = ipString + ", " + modData[i]
if args.filterType:
ipString = " IPs - Only " + ipString
else:
ipString = " IPs - All except " + ipString
except:
pass
## If the user passed the -P argument to omit private IP addresses, add it to IP line
if args.privIP:
if ipString == "":
ipString = " IPs - All except internal addresses"
else:
ipString += ", and internal addresses"
## Determine if results will be filtered or excluded by country & create output string
try:
if args.filterType.lower() == "country" or args.excludeType.lower() == "country":
for i in range(0,(len(modData))):
if ctryString == "":
ctryString = modData[i]
else:
ctryString = ctryString + ", " + modData[i]
if args.filterType:
ctryString = " Countries - Only " + ctryString
else:
ctryString = " Countries - All except " + ctryString
except:
pass
## Determine if results will be filtered or excluded by domain & create output string
try:
if args.filterType.lower() == "domain" or args.excludeType.lower() == "domain":
for i in range(0,(len(modData))):
if domainString == "":
domainString = modData[i]
else:
domainString = domainString + ", " + modData[i]
if args.filterType:
domainString = " Domains - Only " + domainString
else:
domainString = " Domains - All except " + domainString
except:
pass
## Determine if benign 'garbage' events will be filtered out and update misc event filter string
if args.logGarbage:
evntString = "No garbage events"
## Determine if only known cities will be presented in the results and update misc event filter string
if args.kCity:
if evntString == "":
evntString = "No unknown cities"
else:
evntString = evntString + ", no unknown cities"
## Determine if events will only be filtered to IPs with foreign geolocation and update filter string
if args.warnIP:
if ipString == "":
ipString = " IPs - Only IPs foreign to current location"
else:
ipString = ipString + ", only IPs foreign to current location"
## If any filter strings are empty, replace them with notice that all events of the given type will be included in output
if userString == "":
userString = " Users - ALL"
if ipString == "":
ipString = " IPs - ALL"
if ctryString == "":
ctryString = " Countries - ALL"
if domainString == "":
domainString = " Domains - ALL"
if evntString == "":
evntString = " Events - ALL"
else:
evntString = " Events - " + evntString
## Arrange the outText dictionary to be passed back to main and ship it
outText["outAction"] = outAction
outText["userString"] = userString
outText["ipString"] = ipString
outText["ctryString"] = ctryString
outText["domainString"] = domainString
outText["evntString"] = evntString
return outText
| 5,339,070
|
def iter_object_annotations(obj) -> (str, CasterUnion, Any):
"""Iter over annotations of object."""
if isclass(obj):
yield from iter_class_annotations(obj)
elif isroutine(obj):
yield from iter_function_annotations(obj)
else:
raise InvalidSchema(obj)
| 5,339,071
|
def pointwise_multiply(A, B):
"""Pointwise multiply
Args:
-----------------------------
A: tvm.te.tensor.Tensor
shape [...]
B: tvm.te.tensor.Tensor
shape same as A
-----------------------------
Returns:
-----------------------------
tvm.te.tensor.Tensor
shape same as A
-----------------------------
"""
assert_print(len(A.shape) == len(B.shape))
for i in range(len(A.shape)):
assert_print(A.shape[i].value == B.shape[i].value)
def _mul(*args):
return A[args] * B[args]
return tvm.te.compute(A.shape, _mul)
| 5,339,072
|
def generate_negative_data(data):
"""Generate negative data."""
# Big movement -> around straight line
for i in range(100):
dic = {config.DATA_NAME: [], config.LABEL_NAME: "negative"}
start_x = (random.random() - 0.5) * 2000
start_y = (random.random() - 0.5) * 2000
start_z = (random.random() - 0.5) * 2000
x_increase = (random.random() - 0.5) * 10
y_increase = (random.random() - 0.5) * 10
z_increase = (random.random() - 0.5) * 10
for j in range(config.seq_length):
dic[config.DATA_NAME].append([
start_x + j * x_increase + (random.random() - 0.5) * 6,
start_y + j * y_increase + (random.random() - 0.5) * 6,
start_z + j * z_increase + (random.random() - 0.5) * 6
])
data.append(dic)
# Random
for i in range(100):
dic = {config.DATA_NAME: [], config.LABEL_NAME: "negative"}
for _ in range(config.seq_length):
dic[config.DATA_NAME].append([(random.random() - 0.5) * 1000,
(random.random() - 0.5) * 1000,
(random.random() - 0.5) * 1000])
data.append(dic)
# Stay still
for i in range(100):
dic = {config.DATA_NAME: [], config.LABEL_NAME: "negative"}
start_x = (random.random() - 0.5) * 2000
start_y = (random.random() - 0.5) * 2000
start_z = (random.random() - 0.5) * 2000
for _ in range(config.seq_length):
dic[config.DATA_NAME].append([
start_x + (random.random() - 0.5) * 40,
start_y + (random.random() - 0.5) * 40,
start_z + (random.random() - 0.5) * 40
])
data.append(dic)
| 5,339,073
|
def create_website(self):
"""
:param self:
:return:
"""
try:
query = {}
show = {"_id": 0}
website_list = yield self.mongodb.website.find(query, show)
return website_list
except:
logger.error(traceback.format_exc())
return ""
| 5,339,074
|
def _get_sparsity(A, tolerance=0.01):
"""Returns ~% of zeros."""
positives = np.abs(A) > tolerance
non_zeros = np.count_nonzero(positives)
return (A.size - non_zeros) / float(A.size)
| 5,339,075
|
def findPeaks(hist):
"""
Take in histogram
Go through each bin in the histogram and:
Find local maximum and:
Fit a parabola around the two neighbor bins and local max bin
Calculate the critical point that produces the max of the parabola
(critical point represents orientation, max is the peak)
Add both to list of peaks
Return sorted list of peaks
"""
peaks = []
offsets = []
binRanges = np.arange(-175, 185, 10)
max = np.max(hist)
for i in range(len(hist)):
if i == 0:
left, right = -1, 1
elif i == len(hist) - 1:
left, right = -2, 0
else:
left, right = i-1, i+1
if (hist[i] - hist[left]) >= (0.01*max) \
and (hist[i] - hist[right]) >= (0.01*max):
a = (hist[right] - 2*hist[i] + hist[left]) / 2
b = (hist[right] - hist[left]) / 2
c = hist[i]
aDx = a*2
bDx = -1*b
#critical point
x = bDx/aDx
# max
max = a*(x**2) + b*x + c
offset = (x*10) + binRanges[i]
peaks.append((max, offset))
return sorted(peaks, reverse=True)
| 5,339,076
|
def tph_chart_view(request, template_name="monitor/chart.html", **kwargs):
"""Create example view.
that inserts content into the dash context passed to the dash application.
"""
logger.debug('start')
context = {
'site_title': 'TPH monitor',
'title': 'TPH chart via Plotly Dash for Django.',
'year': ts.COPYRIGHT_YEAR,
'owner': ts.OWNER,
}
# create some context to send over to Dash:
dash_context = request.session.get("django_plotly_dash", dict())
dash_context['django_to_dash_context'] = "I am Dash receiving context from Django"
request.session['django_plotly_dash'] = dash_context
logger.debug('end')
return render(request, template_name=template_name, context=context)
| 5,339,077
|
def gradient_descent(x_0, a, eta, alpha, beta, it_max, *args, **kwargs):
"""Perform simple gradient descent with back-tracking line search.
"""
# Get a copy of x_0 so we don't modify it for other project parts.
x = x_0.copy()
# Get an initial gradient.
g = gradient(x, a)
# Compute the norm.
norm = np.linalg.norm(g)
# Initialize lists to track our objective values and step sizes.
obj_list = []
t_list = []
# Loop while the norm is less than eta.
i = 0
while (eta <= norm) and (i < it_max):
# Perform back-tracking line search to get our step size.
t = backtrack_line_search(x=x, a=a, g=g, dx=-g, alpha=alpha, beta=beta)
t_list.append(t)
# Perform the x update.
x = x - t * g
# Compute new gradient and norm.
g = gradient(x, a)
norm = np.linalg.norm(g)
# Compute new value of objective function, append to list.
obj_list.append(objective(x, a))
if np.isnan(obj_list[-1]):
raise ValueError(
'NaN objective value encountered in gradient_descent')
# Update iteration counter.
i += 1
if i >= it_max:
raise ValueError(f'Hit {i} iterations in gradient_descent.')
return x, np.array(obj_list), t_list
| 5,339,078
|
def ExtractCalledByNatives(contents):
"""Parses all methods annotated with @CalledByNative.
Args:
contents: the contents of the java file.
Returns:
A list of dict with information about the annotated methods.
TODO(bulach): return a CalledByNative object.
Raises:
ParseError: if unable to parse.
"""
called_by_natives = []
for match in re.finditer(RE_CALLED_BY_NATIVE, contents):
called_by_natives += [CalledByNative(
system_class=False,
unchecked='Unchecked' in match.group('Unchecked'),
static='static' in match.group('prefix'),
java_class_name=match.group('annotation') or '',
return_type=match.group('return_type'),
name=match.group('name'),
params=JniParams.Parse(match.group('params')))]
# Check for any @CalledByNative occurrences that weren't matched.
unmatched_lines = re.sub(RE_CALLED_BY_NATIVE, '', contents).split('\n')
for line1, line2 in zip(unmatched_lines, unmatched_lines[1:]):
if '@CalledByNative' in line1:
raise ParseError('could not parse @CalledByNative method signature',
line1, line2)
return MangleCalledByNatives(called_by_natives)
| 5,339,079
|
def test_raw_html_642a():
"""
Test case 642a: variation on 642 with a closing tag character without a valid closing tag name
"""
# Arrange
source_markdown = """</>"""
expected_tokens = [
"[para(1,1):]",
"[text(1,1):\a<\a<\a/\a>\a>\a:]",
"[end-para:::True]",
]
expected_gfm = """<p></></p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
| 5,339,080
|
def deckedit(ctx: EndpointContext) -> None:
"""Handles requests for the deck editor page.
Serves the deck editor template.
Args:
ctx: The request's context.
"""
# Populate symbol table
symtab = {"nickname": ctx.session["nickname"],
"theme": ctx.session["theme"],
"showLogout": "" if DeckeditController.logout_shown else None}
# Parse the template
data = Parser.get_template("./res/tpl/deckedit.html", symtab)
ctx.ok("text/html; charset=utf-8", data)
| 5,339,081
|
def knn_python(input_x, dataset, labels, k):
"""
:param input_x: 待分类的输入向量
:param dataset: 作为参考计算距离的训练样本集
:param labels: 数据样本对应的分类标签
:param k: 选择最近邻样本的数目
"""
# 1. 计算待测样本与参考样本之间的欧式距离
dist = np.sum((input_x - dataset) ** 2, axis=1) ** 0.5
# 2. 选取 k 个最近邻样本的标签
k_labels = [labels[index] for index in dist.argsort()[0: k]]
# 3. 得到出现次数最多的标签作为最终的分类类别
label = Counter(k_labels).most_common(1)[0][0]
return label
| 5,339,082
|
def charReplace(contentData, modificationFlag):
"""
Attempts to convert PowerShell char data types using Hex and Int values into ASCII.
Args:
contentData: [char]101
modificationFlag: Boolean
Returns:
contentData: "e"
modificationFlag: Boolean
"""
startTime = datetime.now()
# Hex needs to go first otherwise the 0x gets gobbled by second Int loop/PCRE (0x41 -> 65 -> "A")
for value in re.findall("\[char\]0x[0-9a-z]{1,2}", contentData):
charConvert = int(value.split("]")[1], 0)
if 10 <= charConvert <= 127:
contentData = contentData.replace(value, '"%s"' % chr(charConvert))
modificationFlag = True
# Int values
for value in re.findall("\[char\][0-9]{1,3}", contentData, re.IGNORECASE):
charConvert = int(value.split("]")[1])
if 10 <= charConvert <= 127:
contentData = contentData.replace(value, '"%s"' % chr(charConvert))
modificationFlag = True
if debugFlag:
print("\t[!] Char Replace - %s: %s" % (modificationFlag, datetime.now() - startTime))
return contentData, modificationFlag
| 5,339,083
|
def _prepare_config(separate, resources, flavor_ref,
git_command, zip_patch,
directory, image_ref, architecture, use_arestor):
"""Prepare the Argus config file."""
conf = six.moves.configparser.SafeConfigParser()
conf.add_section("argus")
conf.add_section("openstack")
conf.set("argus", "output_directory", os.path.join(directory, "output"))
conf.set("argus", "argus_log_file", os.path.join(directory, "argus.log"))
conf.set("argus", "git_command", str(git_command))
conf.set("argus", "patch_install", str(zip_patch))
conf.set("argus", "log_each_scenario", str(separate))
conf.set("argus", "arch", str(architecture))
conf.set("argus", "use_arestor", str(use_arestor))
conf.set("openstack", "image_ref", str(image_ref))
if resources:
conf.set("argus", "resources", str(resources))
if flavor_ref:
conf.set("openstack", "flavor_ref", str(flavor_ref))
config_path = os.path.join(directory, "argus.conf")
with open(config_path, 'w') as file_handle:
conf.write(file_handle)
return config_path
| 5,339,084
|
def print_verb_conjugation(verb):
"""Print a verb conjugations in different output formats"""
if parameters["ABU output"]:
print_ABU_inflections(verb)
elif parameters["DELA output"]:
print_DELA_inflections(verb)
elif parameters["Display columns"] == 1:
print_verb_conjugation_odd_columns(verb)
else:
print_verb_conjugation_even_columns(verb)
| 5,339,085
|
def all_logit_coverage_function(coverage_batches):
"""Computes coverage based on the sum of the absolute values of the logits.
Args:
coverage_batches: Numpy arrays containing coverage information pulled from
a call to sess.run. In this case, we assume that these correspond to a
batch of logits.
Returns:
A python integer corresponding to the sum of the absolute values of the
logits.
"""
coverage_batch = coverage_batches[0]
coverage_list = []
for idx in range(coverage_batch.shape[0]):
elt = coverage_batch[idx]
elt = np.expand_dims(np.sum(np.abs(elt)), 0)
coverage_list.append(elt)
return coverage_list
| 5,339,086
|
async def create_audio(request):
"""Process the request from the 'asterisk_ws_monitor' and creates the audio file"""
try:
message = request.rel_url.query["message"]
except KeyError:
message = None
LOGGER.error(f"No 'message' parameter passed on: '{request.rel_url}'")
raise web.HTTPClientError(
reason=GENERATE_AUDIO_ERROR, body=None, text=None, content_type=None
)
try:
msg_chk_sum = request.rel_url.query["msg_chk_sum"]
except KeyError:
msg_chk_sum = None
LOGGER.error(f"No 'msg_chk_sum' parameter passed on: '{request.rel_url}'")
raise web.HTTPClientError(
reason=GENERATE_AUDIO_ERROR, body=None, text=None, content_type=None
)
inner_loop = asyncio.get_running_loop()
executor = ThreadPoolExecutor(max_workers=NUM_OF_CPUS)
futures = inner_loop.run_in_executor(
executor, create_audio_file, message, msg_chk_sum
)
try:
await asyncio.ensure_future(futures)
status_code = 200
except Exception as e:
status_code = 500
LOGGER.error(f"Unable to generate the audio file: '{e}'")
return web.json_response({"status": status_code})
| 5,339,087
|
def checkConfigChanges(configuration, director):
"""
A scheduled checker for configration changes.
"""
if not configuration.manager.configCheckEnabled:
return
util.saveConfig(configuration, director)
| 5,339,088
|
def reinterpret_axis(block, axis, label, scale=None, units=None):
""" Manually reinterpret the scale and/or units on an axis """
def header_transform(hdr, axis=axis, label=label, scale=scale, units=units):
tensor = hdr['_tensor']
if isinstance(axis, basestring):
axis = tensor['labels'].index(axis)
if label is not None:
tensor['labels'][axis] = label
if scale is not None:
tensor['scales'][axis] = scale
if units is not None:
tensor['units'][axis] = units
return hdr
return block_view(block, header_transform)
| 5,339,089
|
def duel(board_size, player_map):
"""
:param board_size: the board size (i.e. a 2-tuple)
:param player_map: a dict, where the key is an int, 0 or 1, representing the player, and the value is the policy
:return: the resulting game outcomes
"""
board_state = init_board_state(board_size)
results = {p: {"won": 0, "lost": 0, "tied": 0} for p in player_map}
for player in player_map:
for edge_index in range(len(board_state)):
players = [player, (1 - player)]
if edge_index % 2 == 0:
players = [x for x in reversed(players)]
game = Game(board_size, players)
current_player = game.get_current_player()
# select the first edge for the first player
current_player, _ = game.select_edge(edge_index, current_player)
while not game.is_finished():
state = game.get_board_state()
edge = player_map[current_player].select_edge(
state, game.get_score(current_player), game.get_score(1 - current_player))
current_player, _ = game.select_edge(edge, current_player)
p0_score = game.get_score(0)
p1_score = game.get_score(1)
if p0_score > p1_score:
results[0]["won"] += 1
results[1]["lost"] += 1
if p1_score > p0_score:
results[1]["won"] += 1
results[0]["lost"] += 1
if p0_score == p1_score:
results[0]["tied"] += 1
results[1]["tied"] += 1
return results
| 5,339,090
|
def set_processor_type(*args):
"""
set_processor_type(procname, level) -> bool
Set target processor type. Once a processor module is loaded, it
cannot be replaced until we close the idb.
@param procname: name of processor type (one of names present in
\ph{psnames}) (C++: const char *)
@param level: SETPROC_ (C++: setproc_level_t)
@return: success
"""
return _ida_idp.set_processor_type(*args)
| 5,339,091
|
def convert_repeat(node, **kwargs):
"""Map MXNet's repeat operator attributes to onnx's Tile operator.
"""
from onnx.helper import make_node
from onnx import TensorProto
name, input_nodes, attrs = get_inputs(node, kwargs)
opset_version = kwargs['opset_version']
if opset_version < 11:
raise AttributeError('ONNX opset 11 or greater is required to export this operator')
repeats = int(attrs.get('repeats', 1))
axis = attrs.get('axis', 'None')
if repeats <= 0:
raise NotImplementedError('repeat operator does not support parameter repeats==0')
nodes = []
if axis == 'None':
create_tensor([-1], name+'_-1', kwargs['initializer'])
create_tensor([repeats], name+'_rep', kwargs['initializer'])
create_tensor([1, repeats], name+'_repeats', kwargs['initializer'])
nodes += [
make_node('Shape', [input_nodes[0]], [name+'_shape']),
make_node('ReduceProd', [name+'_shape'], [name+'_size']),
make_node('Reshape', [input_nodes[0], name+'_size'], [name+'_flat']),
make_node('Unsqueeze', [name+'_flat', name+'_-1'], [name+'_unsqueeze']),
make_node('Tile', [name+'_unsqueeze', name+'_repeats'], [name+'_tile']),
make_node('Mul', [name+'_size', name+'_rep'], [name+'_new_size']),
make_node('Reshape', [name+'_tile', name+'_new_size'], [name], name=name)
]
else:
axis = int(axis)
repeats -= 1
create_tensor([repeats], name+'_repeats', kwargs['initializer'])
create_tensor([1], name+'_1', kwargs['initializer'])
create_tensor([0], name+'_0', kwargs['initializer'])
create_tensor([axis], name+'_axis', kwargs['initializer'])
create_const_scalar_node(name+"_0_s", np.int64(0), kwargs)
create_const_scalar_node(name+"_1_s", np.int64(1), kwargs)
nodes += [
make_node('Shape', [input_nodes[0]], [name+'_shape']),
make_node('Shape', [name+'_shape'], [name+'_dim']),
make_node('Squeeze', [name+'_dim', name+'_0'], [name+'_dim_s']),
make_node('Range', [name+'_0_s', name+'_dim_s', name+'_1_s'], [name+'_range'])
]
if axis < 0:
nodes += [
make_node('Add', [name+'_axis', name+'_dim'], [name+'_true_axis']),
make_node('Equal', [name+'_range', name+'_true_axis'], [name+'_one_hot'])
]
else:
nodes += [
make_node('Equal', [name+'_range', name+'_axis'], [name+'_one_hot'])
]
nodes += [
make_node('Cast', [name+'_one_hot'], [name+'_one_hot_int'], to=int(TensorProto.INT64)),
make_node('Mul', [name+'_repeats', name+'_one_hot_int'], [name+'_mul']),
make_node('Add', [name+'_mul', name+'_1'], [name+'_add']),
make_node('Concat', [name+'_1', name+'_add'], [name+'_repeats_tensor'], axis=0)
]
if axis == -1:
nodes += [
make_node('Concat', [name+'_shape', name+'_1'], [name+'_unsqueeze_shape'], axis=0),
make_node('Reshape', [input_nodes[0], name+'_unsqueeze_shape'],
[name+'_unsqueeze'])
]
else:
create_tensor([axis+1], name+'_axis+1', kwargs['initializer'])
nodes += [
make_node('Unsqueeze', [input_nodes[0], name+'_axis+1'], [name+'_unsqueeze'])
]
nodes += [
make_node('Tile', [name+'_unsqueeze', name+'_repeats_tensor'], [name+'_tile']),
make_node('Mul', [name+'_shape', name+'_add'], [name+'_new_shape']),
make_node('Reshape', [name+'_tile', name+'_new_shape'], [name], name=name)
]
return nodes
| 5,339,092
|
def on_message(client, _u, msg):
"""
defines callback for message handling, inits db table from first data row received
"""
data = json.loads(msg.payload.decode("utf-8"))[0]
if not client.db_con.table_created:
client.data_is_dict = init_table(data, client.db_cols, client, client.env_args)
if "messtellen" in data:
df = create_energy_box_df(data, client.db_con.cols)
else:
if client.data_is_dict:
df = pandas.DataFrame(list(data.values()), columns=list(data.keys()))
else:
df = pandas.DataFrame(list(data.values()))
client.db_con.append_table(df, client.env_args.timescaledb_table_name)
logging.info(f"wrote {data} to table {client.env_args.timescaledb_table_name}")
| 5,339,093
|
def my_quote(s, safe = '/'):
"""quote('abc def') -> 'abc%20def'
Each part of a URL, e.g. the path info, the query, etc., has a
different set of reserved characters that must be quoted.
RFC 2396 Uniform Resource Identifiers (URI): Generic Syntax lists
the following reserved characters.
reserved = ";" | "/" | "?" | ":" | "@" | "&" | "=" | "+" |
"$" | ","
Each of these characters is reserved in some component of a URL,
but not necessarily in all of them.
By default, the quote function is intended for quoting the path
section of a URL. Thus, it will not encode '/'. This character
is reserved, but in typical usage the quote function is being
called on a path where the existing slash characters are used as
reserved characters.
"""
cachekey = (safe, always_safe)
try:
safe_map = _safemaps[cachekey]
except KeyError:
safe += always_safe
safe_map = {}
for i in range(256):
c = chr(i)
safe_map[c] = (c in safe) and c or ('%%%02x' % i)
_safemaps[cachekey] = safe_map
res = map(safe_map.__getitem__, s)
return ''.join(res)
| 5,339,094
|
def test_onfail_requisite_no_state_module(state, state_tree):
"""
Tests a simple state using the onfail requisite
"""
sls_contents = """
failing_state:
cmd.run:
- name: asdf
non_failing_state:
cmd.run:
- name: echo "Non-failing state"
test_failing_state:
cmd.run:
- name: echo "Success!"
- onfail:
- failing_state
test_non_failing_state:
cmd.run:
- name: echo "Should not run"
- onfail:
- non_failing_state
"""
with pytest.helpers.temp_file("requisite.sls", sls_contents, state_tree):
ret = state.sls("requisite")
assert (
ret['cmd_|-test_failing_state_|-echo "Success!"_|-run'].comment
== 'Command "echo "Success!"" run'
)
assert (
ret['cmd_|-test_non_failing_state_|-echo "Should not run"_|-run'].comment
== "State was not run because onfail req did not change"
)
| 5,339,095
|
def GetExperimentStatus(experiment, knobs, exp_data, track='stable'):
"""Determine the status and source of a given experiment.
Take into account all ways that a given experiment may be enabled and allow
the client to determine why a given experiment has a particular status.
Experiments at 100% are always on.
If the machine is set to ignore experiments, it will ignore any experiments
not at 100%.
If the machine is set to always apply experiments, the experiment will be on.
If the machine is in an explicitly enabled track, the experiment will be on.
If the machine is manually opted in or out, that option is applied.
Otherwise the bucket algorithm is applied.
Args:
experiment: a string identifier for a given experiment.
knobs: knobs for a host (in dict form)
exp_data: a dict containing experiment data (yaml.load(...))
track: a string of the machine's release track
Returns:
an object with three attributes, status, source, and rollout_percent
"""
ReturnEarly = lambda ret: ret.source is not None # pylint: disable=g-bad-name
ret = type('obj', (object,), {})
ret.status = DISABLED
ret.source = None
ret.rollout_percent = float(exp_data.get(experiment, {}).get(PERCENT_KEY, -1))
if ret.rollout_percent == 100:
logging.debug('Experiment %s is at 100%%, enabling', experiment)
ret.status = ENABLED
ret.source = ALWAYS
return ret
auto_knob = knobs.get(EXPERIMENTS_KNOB, 'recommended')
if auto_knob == ALWAYS:
ret.status = ENABLED
ret.source = ALWAYS
elif auto_knob == NEVER:
ret.status = DISABLED
ret.source = ALWAYS
if ReturnEarly(ret): return ret
manual_on_knob = knobs.get(MANUAL_ON_KNOB, [])
manual_off_knob = knobs.get(MANUAL_OFF_KNOB, [])
if experiment in manual_on_knob:
ret.status = ENABLED
ret.source = MANUAL
elif experiment in manual_off_knob:
ret.status = DISABLED
ret.source = MANUAL
if ReturnEarly(ret): return ret
enable_unstable = exp_data.get(experiment, {}).get(ENABLE_UNSTABLE, False)
enable_testing = exp_data.get(experiment, {}).get(ENABLE_TESTING, False)
if ((track == 'testing' and enable_testing) or
(track == 'unstable' and (enable_unstable or enable_testing))):
ret.status = ENABLED
ret.source = ALWAYS
if ReturnEarly(ret): return ret
try:
mach_uuid = FetchUUID()
except ExperimentsError, e:
raise MissingUUID(e)
logging.debug('Found uuid %s', mach_uuid)
return ExperimentIsBucket(experiment, exp_data, mach_uuid)
| 5,339,096
|
def test_plot_crystal_field_calculation():
"""
Test of the plot illustrating the potential and charge density going into the calculation
"""
from masci_tools.tools.cf_calculation import CFCalculation, plot_crystal_field_calculation
cf = CFCalculation()
cf.readPot('files/cf_calculation/CFdata.hdf')
cf.readCDN('files/cf_calculation/CFdata.hdf')
plt.gcf().clear()
plot_crystal_field_calculation(cf, show=False)
return plt.gcf()
| 5,339,097
|
async def submit_changesheet(
uploaded_file: UploadFile = File(...),
mdb: MongoDatabase = Depends(get_mongo_db),
user: User = Depends(get_current_active_user),
):
"""
Example changesheet [here](https://github.com/microbiomedata/nmdc-runtime/blob/main/metadata-translation/notebooks/data/changesheet-without-separator3.tsv).
"""
allowed_to_submit = ("dehays", "dwinston")
if user.username not in allowed_to_submit:
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
detail=(
f"Only users {allowed_to_submit} "
"are allowed to apply changesheets at this time."
),
)
sheet_in = await raw_changesheet_from_uploaded_file(uploaded_file)
df_change = df_from_sheet_in(sheet_in, mdb)
_ = _validate_changesheet(df_change, mdb)
# create object (backed by gridfs). use "gfs0" id shoulder for drs_object access_id.
sheet_id = generate_one_id(mdb, ns="changesheets", shoulder="gfs0")
mdb_fs = GridFS(mdb)
filename = re.sub(r"[^A-Za-z0-9\.\_\-]", "_", sheet_in.name)
PortableFilename(filename) # validates
sheet_text = sheet_in.text
drs_id = local_part(sheet_id)
DrsId(drs_id) # validates
mdb_fs.put(
sheet_text,
_id=drs_id,
filename=filename,
content_type=sheet_in.content_type,
encoding="utf-8",
)
with tempfile.TemporaryDirectory() as save_dir:
filepath = str(Path(save_dir).joinpath(filename))
with open(filepath, "w") as f:
f.write(sheet_text)
object_in = DrsObjectIn(
**drs_metadata_for(
filepath,
base={
"description": f"changesheet submitted by {user.username}",
"access_methods": [{"access_id": drs_id}],
},
)
)
self_uri = f"drs://{HOSTNAME_EXTERNAL}/{drs_id}"
drs_obj_doc = _create_object(
mdb, object_in, mgr_site="nmdc-runtime", drs_id=drs_id, self_uri=self_uri
)
doc_after = mdb.objects.find_one_and_update(
{"id": drs_obj_doc["id"]},
{"$set": {"types": ["metadata-changesheet"]}},
return_document=ReturnDocument.AFTER,
)
return doc_after
| 5,339,098
|
def get_notes(request, course, page=DEFAULT_PAGE, page_size=DEFAULT_PAGE_SIZE, text=None):
"""
Returns paginated list of notes for the user.
Arguments:
request: HTTP request object
course: Course descriptor
page: requested or default page number
page_size: requested or default page size
text: text to search. If None then return all results for the current logged in user.
Returns:
Paginated dictionary with these key:
start: start of the current page
current_page: current page number
next: url for next page
previous: url for previous page
count: total number of notes available for the sent query
num_pages: number of pages available
results: list with notes info dictionary. each item in this list will be a dict
"""
path = 'search' if text else 'annotations'
response = send_request(request.user, course.id, page, page_size, path, text)
try:
collection = json.loads(response.content.decode('utf-8'))
except ValueError:
log.error("Invalid JSON response received from notes api: response_content=%s", response.content)
raise EdxNotesParseError(_("Invalid JSON response received from notes api.")) # lint-amnesty, pylint: disable=raise-missing-from
# Verify response dict structure
expected_keys = ['total', 'rows', 'num_pages', 'start', 'next', 'previous', 'current_page']
keys = list(collection.keys())
if not keys or not all(key in expected_keys for key in keys):
log.error("Incorrect data received from notes api: collection_data=%s", str(collection))
raise EdxNotesParseError(_("Incorrect data received from notes api."))
filtered_results = preprocess_collection(request.user, course, collection['rows'])
# Notes API is called from:
# 1. The annotatorjs in courseware. It expects these attributes to be named "total" and "rows".
# 2. The Notes tab Javascript proxied through LMS. It expects these attributes to be called "count" and "results".
collection['count'] = collection['total']
del collection['total']
collection['results'] = filtered_results
del collection['rows']
collection['next'], collection['previous'] = construct_pagination_urls(
request,
course.id,
collection['next'],
collection['previous']
)
return collection
| 5,339,099
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.