content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
def group_node_intro_times(filt, groups, n_sents):
"""
Returns lists of addition times of nodes into particular groups
"""
devs = [[] for _ in range(len(set(groups)))]
for i in range(len(groups)):
intro = int(filt[i, i])
devs[groups[i]].append(intro/n_sents) # still normalize addition time
return devs
|
b5da0e97c76683201a9b81fce1b1f1c7f25e4d6d
| 3,640,500
|
def svn_client_version():
"""svn_client_version() -> svn_version_t const *"""
return _client.svn_client_version()
|
2ffab063bce4e32010eb1f3aa57306e60a0f2417
| 3,640,501
|
import os
def get_slice_name(data_dir, imname, delta=0):
"""Infer slice name with an offset"""
if delta == 0:
imname = imname + '.npy'
#print('imname0000',imname )
return imname
delta = int(delta)
dirname, slicename = imname.split(os.sep)
#slice_idx = int(slicename[:-4])
slice_idx = int(slicename)
#imname1 = '%s%s%03d.npy' % (dirname, os.sep, slice_idx + delta)
imname1 = '%s%s' % (dirname, os.sep) + str(slice_idx + delta) + '.npy'
#print('imname11111',imname1 )
# if the slice is not in the dataset, use its neighboring slice
while not os.path.exists(os.path.join(data_dir, imname1)):
# print('file not found:', imname1)
delta -= np.sign(delta)
#imname1 = '%s%s%03d.npy' % (dirname, os.sep, slice_idx + delta)
imname1 = '%s%s' % (dirname, os.sep) + str(slice_idx + delta) + '.npy'
if delta == 0:
break
return imname1
|
e62988a20af829a5c821b0f8e0de971d8257f3cc
| 3,640,502
|
import os
import tqdm
def download(name, destination=None, chunksize=4096, force=False):
"""
Checks if there is an actual version of the specified file on the device,
and if not, downloads it from servers.
Files, theirs checksums and links to them must be specified
in the tps.downloader._content dictionary.
:param name: str
Name of the file.
:param destination: Optional[str]
See get_download_dir:data_dir
:param chunksize: int
What chunksize is used while downloading the file.
:return: str
Path to the file.
"""
try:
url, checksum = _content_info[name]
except KeyError:
logger.warning("There is no file named {} in content dictionary, None will be returned. Possible names: {}".
format(name, list(_content_info.keys())))
return
if not force:
filepath = find(name, destination, False, checksum)
if filepath is not None:
return filepath
destination = get_download_dir(destination)
if destination is None:
logger.warning("Can not download file due to access permissions.")
return
filepath = os.path.join(destination, name)
if os.path.exists(filepath) and checksum == calc_checksum(filepath):
logger.info("The actual version of the file is already downloaded and can be found along the path: {}"
.format(filepath))
return filepath
try:
infile = urlopen(url)
length = infile.length
chunk_n = ceil(length / chunksize)
with open(filepath, "wb") as outfile:
for _ in tqdm(range(chunk_n)):
chunk = infile.read(chunksize)
outfile.write(chunk)
if not chunk:
break
infile.close()
except IOError as e:
logger.error("Error downloading {} from {}:\n{}".format(name, url, e))
return
return filepath
|
62781bba6908f2e103115a01580c6df284f3aeb3
| 3,640,503
|
def getportnum(port):
"""
Accepts a port name or number and returns the port number as an int.
Returns -1 in case of invalid port name.
"""
try:
portnum = int(port)
if portnum < 0 or portnum > 65535:
logger.error("invalid port number: %s" % port)
portnum = -1
except:
try:
p = socket.getservbyname(port)
portnum = int(p)
except socket.error, e:
logger.error("%s: %s" % (e, port))
portnum = -1
return portnum
|
7a5e287a0014afc1fa2933fcaae389a3c8aa50e8
| 3,640,504
|
def getParafromMinibatchModel(X_train, Y_train, X_test, Y_test, learning_rate = 0.0001,
num_epochs = 1500, minibatch_size = 32, print_cost = True):
"""
Implements a three-layer tensorflow neural network: LINEAR->RELU->LINEAR->RELU->LINEAR->SOFTMAX.
Arguments:
X_train -- training set, of shape (input size = 12288, number of training examples = 1080)
Y_train -- test set, of shape (output size = 6, number of training examples = 1080)
X_test -- training set, of shape (input size = 12288, number of training examples = 120)
Y_test -- test set, of shape (output size = 6, number of test examples = 120)
learning_rate -- learning rate of the optimization
num_epochs -- number of epochs of the optimization loop
minibatch_size -- size of a minibatch
print_cost -- True to print the cost every 100 epochs
Returns:
parameters -- parameters learnt by the model. They can then be used to predict.
"""
ops.reset_default_graph() # to be able to rerun the model without overwriting tf variables
(n_x, m) = X_train.shape # (n_x: input size, m : number of examples in the train set)
n_y = Y_train.shape[0] # n_y : output size
costs = [] # To keep track of the cost
# Create Placeholders of shape (n_x, n_y)
X, Y = create_placeholders(n_x, n_y)
# Initialize parameters
parameters = initialize_parameters()
# Forward propagation: Build the forward propagation in the tensorflow graph
z3 = forward_propagation(X, parameters)
# Cost function: Add cost function to tensorflow graph
cost = compute_cost(z3, Y)
# Backpropagation: Define the tensorflow optimizer. Use an AdamOptimizer.
optimizer = tf.train.AdamOptimizer(learning_rate = learning_rate).minimize(cost)
# Initialize all the variables
init = tf.global_variables_initializer()
# Start the session to compute the tensorflow graph
with tf.Session() as sess:
# Run the initialization
sess.run(init)
# Do the training loop
for epoch in range(num_epochs):
minibatch_cost = 0.
num_minibatches = int(m / minibatch_size) # number of minibatches of size minibatch_size in the train set
minibatches = random_mini_batches(X_train, Y_train, minibatch_size)
for minibatch in minibatches:
# Select a minibatch
(minibatch_X, minibatch_Y) = minibatch
# IMPORTANT: The line that runs the graph on a minibatch.
# Run the session to execute the optimizer and the cost, the feedict should contain a minibatch for (X,Y).
_ , temp_cost = sess.run([optimizer, cost], feed_dict={X: minibatch_X, Y: minibatch_Y})
minibatch_cost += temp_cost / num_minibatches
# Print the cost every epoch
if print_cost == True and epoch % 100 == 0:
print ("Cost after epoch %i: %f" % (epoch, minibatch_cost))
if print_cost == True and epoch % 5 == 0:
costs.append(minibatch_cost)
# plot the cost
plt.plot(np.squeeze(costs))
plt.ylabel('cost')
plt.xlabel('iterations (per tens)')
plt.title("Learning rate =" + str(learning_rate))
plt.show()
# lets save the parameters in a variable
parameters = sess.run(parameters)
print ("Parameters have been trained!")
# Calculate the correct predictions
correct_prediction = tf.equal(tf.argmax(z3), tf.argmax(Y))
# Calculate accuracy on the test set
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
print ("Train Accuracy:", accuracy.eval({X: X_train, Y: Y_train}))
print ("Test Accuracy:", accuracy.eval({X: X_test, Y: Y_test}))
return parameters
|
2716d4a02b0ce5a0a7a640f80a51f180d280eb0d
| 3,640,505
|
def add_land(
ax=None, scale="10m", edgecolor=None, facecolor=None, linewidth=None, **kwargs
):
"""Add land to an existing map
Parameters
----------
ax : matplotlib axes object, optional
scale : str, optional
Resolution of NaturalEarth data to use ('10m’, ‘50m’, or ‘110m').
edgecolor : str or tuple, optional
Color to use for the landmass edges.
facecolor : str or tuple, optional
Color to use for the landmass faces.
linewidth : float, optional
Width of land edge in points
Other Parameters
----------------
Keyword args are passed on to NaturalEarthFeature.
Returns
-------
FeatureArtist
"""
if ax is None:
ax = plt.gca()
edgecolor = edgecolor or plt.rcParams.get(
"pyseas.border.color", props.dark.border.color
)
facecolor = facecolor or plt.rcParams.get(
"pyseas.land.color", props.dark.land.color
)
linewidth = linewidth or plt.rcParams.get("pyseas.border.linewidth", 0.4)
land = cfeature.NaturalEarthFeature(
"physical",
"land",
scale,
edgecolor=edgecolor,
facecolor=facecolor,
linewidth=linewidth,
**kwargs,
)
return ax.add_feature(land)
|
c2a5e97a7e6cb76ffe4a70b754fe86c59b71eb05
| 3,640,506
|
import CLSIDToClass
def EnsureDispatch(prog_id, bForDemand = 1): # New fn, so we default the new demand feature to on!
"""Given a COM prog_id, return an object that is using makepy support, building if necessary"""
disp = win32com.client.Dispatch(prog_id)
if not disp.__dict__.get("CLSID"): # Eeek - no makepy support - try and build it.
try:
ti = disp._oleobj_.GetTypeInfo()
disp_clsid = ti.GetTypeAttr()[0]
tlb, index = ti.GetContainingTypeLib()
tla = tlb.GetLibAttr()
mod = EnsureModule(tla[0], tla[1], tla[3], tla[4], bForDemand=bForDemand)
GetModuleForCLSID(disp_clsid)
# Get the class from the module.
disp_class = CLSIDToClass.GetClass(str(disp_clsid))
disp = disp_class(disp._oleobj_)
except pythoncom.com_error:
raise TypeError("This COM object can not automate the makepy process - please run makepy manually for this object")
return disp
|
9f9ed2d87ab5c0329ce729a4fca3078daf6e8d17
| 3,640,507
|
def _parallel_predict_log_proba(estimators, estimators_features, X, n_classes):
"""Private function used to compute log probabilities within a job."""
n_samples = X.shape[0]
log_proba = np.empty((n_samples, n_classes))
log_proba.fill(-np.inf)
all_classes = np.arange(n_classes, dtype=np.int)
for estimator, features in zip(estimators, estimators_features):
log_proba_estimator = estimator.predict_log_proba(X[:, features])
if n_classes == len(estimator.classes_):
log_proba = np.logaddexp(log_proba, log_proba_estimator)
else:
log_proba[:, estimator.classes_] = np.logaddexp(
log_proba[:, estimator.classes_],
log_proba_estimator[:, range(len(estimator.classes_))])
missing = np.setdiff1d(all_classes, estimator.classes_)
log_proba[:, missing] = np.logaddexp(log_proba[:, missing],
-np.inf)
return log_proba
|
a42510017d8b14ddf8f97de5902f6e1fb223da0d
| 3,640,508
|
import numpy
def _get_circles(img, board, pattern):
"""
Get circle centers for a symmetric or asymmetric grid
"""
h = img.shape[0]
w = img.shape[1]
if len(img.shape) == 3 and img.shape[2] == 3:
mono = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
else:
mono = img
flag = cv2.CALIB_CB_SYMMETRIC_GRID
if pattern == Patterns.ACircles:
flag = cv2.CALIB_CB_ASYMMETRIC_GRID
mono_arr = numpy.array(mono)
(ok, corners) = cv2.findCirclesGrid(mono_arr, (board.n_cols, board.n_rows), flags=flag)
# In symmetric case, findCirclesGrid does not detect the target if it's turned sideways. So we try
# again with dimensions swapped - not so efficient.
# TODO Better to add as second board? Corner ordering will change.
if not ok and pattern == Patterns.Circles:
(ok, corners) = cv2.findCirclesGrid(mono_arr, (board.n_rows, board.n_cols), flags=flag)
return (ok, corners)
|
c2a3d207d73a600250c65480b5cba7a4d9e62a04
| 3,640,509
|
def sort_coords(coords: np.ndarray) -> np.ndarray:
"""Sort coordinates based on the angle with first coord from the center.
Args:
coords (np.ndarray):
Coordinates to be sorted. The format of coords is as follows.
np.array([[x1, y1, z1], [x2, y2, z2], [x3, y3, z3]]
Returns:
np.ndarray for sorted coordinates.
"""
if len(coords[0]) != 3:
raise ValueError("Only valid for 3D vector")
center = np.average(coords, axis=0)
relative_coords = coords - center
external_prod = np.cross(relative_coords[0], relative_coords[1])
if abs(np.linalg.norm(external_prod)) < 1e-8: # Skip parallel vectors.
external_prod = np.cross(relative_coords[0], relative_coords[2])
normal_to_12_plane = external_prod / np.linalg.norm(external_prod)
v0 = relative_coords[0] / np.linalg.norm(relative_coords[0])
def angle_between_v0(index: int) -> float:
"""
Args:
index (int): index of coords.
Returns (float):
Angle between rays from the center to rel_coords[0] and
rel_coords[int].
"""
v = relative_coords[index] / np.linalg.norm(relative_coords[index])
matrix = concatenate(([v0], [v], [normal_to_12_plane]), axis=0)
determinant = det(matrix)
angle = arctan2(clip(dot(v0, v), -1.0, 1.0), determinant)
return angle
indices = [i for i in range(len(coords))]
indices.sort(key=angle_between_v0)
return coords[indices]
|
a05390e56e57e66e6f288096fd4583bee26da88f
| 3,640,510
|
import warnings
def to_fraction(value, den_limit=65536):
"""
Converts *value*, which can be any numeric type, an MMAL_RATIONAL_T, or a
(numerator, denominator) tuple to a :class:`~fractions.Fraction` limiting
the denominator to the range 0 < n <= *den_limit* (which defaults to
65536).
"""
try:
# int, long, or fraction
n, d = value.numerator, value.denominator
except AttributeError:
try:
# float
n, d = value.as_integer_ratio()
except AttributeError:
try:
n, d = value.num, value.den
except AttributeError:
try:
# tuple
n, d = value
warnings.warn(
PiCameraDeprecated(
"Setting framerate or gains as a tuple is "
"deprecated; please use one of Python's many "
"numeric classes like int, float, Decimal, or "
"Fraction instead"))
except (TypeError, ValueError):
# try and convert anything else to a Fraction directly
value = Fraction(value)
n, d = value.numerator, value.denominator
# Ensure denominator is reasonable
if d == 0:
raise PiCameraValueError("Denominator cannot be 0")
elif d > den_limit:
return Fraction(n, d).limit_denominator(den_limit)
else:
return Fraction(n, d)
|
6ee8c13ab17e08480f13012a834d5d928a7d4f51
| 3,640,511
|
def find_closest(myList, myNumber):
"""
Returns closest value to myNumber.
If two numbers are equally close, return the smallest number.
# adapted from
# https://stackoverflow.com/questions/12141150/from-list-of-integers-get-number-closest-to-a-given-value
"""
sortList = sorted(myList)
# print(sortList)
pos = bisect_left(sortList, myNumber)
if pos == 0:
return sortList[0]
if pos == len(sortList):
return sortList[-1]
before = sortList[pos - 1]
after = sortList[pos]
if after - myNumber < myNumber - before:
return after
else:
return before
|
0e4b6e2932aa4bb1886627e831d90d3a339b73b6
| 3,640,512
|
from pathlib import Path
def remove_uuid_file(file_path, dry=False):
"""
Renames a file without the UUID and returns the new pathlib.Path object
"""
file_path = Path(file_path)
name_parts = file_path.name.split('.')
if not is_uuid_string(name_parts[-2]):
return file_path
name_parts.pop(-2)
new_path = file_path.parent.joinpath('.'.join(name_parts))
if not dry and file_path.exists():
file_path.replace(new_path)
return new_path
|
f2c8aa77595081ff968596340b45f61c490d16ec
| 3,640,513
|
def get_9x9x9_scramble(n=120):
""" Gets a random scramble (SiGN notation) of length `n` for a 9x9x9 cube. """
return _MEGA_SCRAMBLER.call("megaScrambler.get999scramble", n)
|
7f5d11ad8cec05de5165fa0f90da40a7d9f17d97
| 3,640,514
|
import re
def youku(link):
"""Find youku player URL."""
pattern = r'http:\/\/v\.youku\.com\/v_show/id_([\w]+)\.html'
match = re.match(pattern, link)
if not match:
return None
return 'http://player.youku.com/embed/%s' % match.group(1)
|
efcf1394cc02503a1ae18d91abee34777958e545
| 3,640,515
|
from typing import Optional
def get_default_service_account(project: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetDefaultServiceAccountResult:
"""
Use this data source to retrieve default service account for this project
:param str project: The project ID. If it is not provided, the provider project is used.
"""
__args__ = dict()
__args__['project'] = project
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('gcp:compute/getDefaultServiceAccount:getDefaultServiceAccount', __args__, opts=opts, typ=GetDefaultServiceAccountResult).value
return AwaitableGetDefaultServiceAccountResult(
display_name=__ret__.display_name,
email=__ret__.email,
id=__ret__.id,
name=__ret__.name,
project=__ret__.project,
unique_id=__ret__.unique_id)
|
0d0c859771a11fe9a0772b9dd4aa2597a9081bd3
| 3,640,516
|
def combinationSum(candidates, target):
"""
:type candidates: List[int]
:type target: int
:rtype: List[List[int]]
"""
result = []
candidates = sorted(candidates)
def dfs(remain, stack):
if remain == 0:
result.append(stack)
return
for item in candidates:
if item > remain:
break
if stack and item < stack[-1]:
continue
else:
dfs(remain - item, stack + [item])
dfs(target, [])
return result
|
e8739c196c84aa7d15712ba1007e602a330fd625
| 3,640,517
|
from django.core.files.base import ContentFile
from django_webdav_storage.compat import PY3, TEXT_TYPE
import uuid
import os
def create_file(webdav_storage):
"""
Creates a file with a unique prefix in the WebDAV storage
"""
def inner(filename, content=b'', prefix=''):
if all((PY3, isinstance(content, TEXT_TYPE))):
content = content.encode('UTF-8')
col = str(uuid.uuid4())
key = os.path.join(prefix.lstrip('/') or col, filename)
webdav_storage.save(key, ContentFile(content, key))
return key
return inner
|
6e36df5f901380878dd321b2bf3f797fb1a6ca78
| 3,640,518
|
import re
def get_Xy(sentence):
"""将 sentence 处理成 [word1, w2, ..wn], [tag1, t2, ...tn]"""
words_tags = re.findall('(.)/(.)', sentence)
if words_tags:
words_tags = np.asarray(words_tags)
words = words_tags[:, 0]
tags = words_tags[:, 1]
return words, tags # 所有的字和tag分别存为 data / label
return None
|
9d850f74af6417c0172cb944b0e1ce4e3d931a96
| 3,640,519
|
def create_gap_token(rowidx=None):
"""returns a gap Token
Parameters
----------
rowidx: int (Optional)
row id
Returns
-------
Token
"""
return TT.Token(token_type=SupportedDataTypes.GAP, value='', rowidx=rowidx)
|
08f18bfcbf54e8861c684943111e22c33af2c69f
| 3,640,520
|
def get_local_bricks(volume: str) -> Result:
"""
Return all bricks that are being served locally in the volume
volume: Name of the volume to get local bricks for
"""
vol_info = volume_info(volume)
if vol_info.is_err():
return Err(vol_info.value)
local_ip = get_local_ip()
local_brick_list = []
for volume in vol_info.value:
for brick in volume.bricks:
if brick.peer.hostname == local_ip:
local_brick_list.append(brick)
return Ok(local_brick_list)
|
d49db6aac12d976a1cfbd72540862be6406f85c9
| 3,640,521
|
def unionWCT(m=6, n=6):
""" @ worst-case family union where
@m>=2 and n>=2 and k=3
:arg m: number of states
:arg n: number of states
:type m: integer
:type n: integer
:returns: two dfas
:rtype: (DFA,DFA)"""
if n < 2 or m < 2:
raise TestsError("number of states must both greater than 1")
d1, d2 = DFA(), DFA()
d1.setSigma(["a", "b", "c"])
d1.States = list(range(m))
d1.setInitial(0)
d1.addFinal(m - 1)
d1.addTransition(0, "a", 1)
d1.addTransition(0, "c", 0)
for i in range(1, m):
d1.addTransition(i, "a", (i + 1) % m)
d1.addTransition(i, "b", 0)
d1.addTransition(i, "c", i)
d2.setSigma(["a", "b", "c"])
d2.States = list(range(n))
d2.setInitial(0)
d2.addFinal(n - 1)
d2.addTransition(0, "a", 0)
d2.addTransition(0, "b", 1)
for i in range(1, n):
d2.addTransition(i, "b", (i + 1) % n)
d2.addTransition(i, "a", i)
d2.addTransition(i, "c", 1)
return d1, d2
|
2b93c22e380c0ed52db2c9dfb9042785541b5885
| 3,640,522
|
def week_changes (after, before, str_dates, offset = 0, limit = 3) :
"""Yield all elements of `str_dates` closest to week changes."""
return unit_changes (after, before, str_dates, "week", offset, limit)
|
0b744db3f2cc581ea1fd2c59bbdc569339b88737
| 3,640,523
|
def getHiddenStatus(data):
"""
使用Gaussian HMM对数据进行建模,并得到预测值
"""
cols = ["r_5", "r_20", "a_5", "a_20"]
model = GaussianHMM(n_components=3, covariance_type="full", n_iter=1000,
random_state=2010)
model.fit(data[cols])
hiddenStatus = model.predict(data[cols])
return hiddenStatus
|
4a613e426b8a4f16e02f535aebc2752d4a99ae25
| 3,640,524
|
def format_time(data, year):
"""Format any time variables in US.
Parameters
----------
data : pd.DataFrame
Data without time formatting.
year : int
The `year` of the wave being processed.
Returns
-------
data : pd.DataFrame
Data with time formatting.
"""
# See to do messages at the top of the file.
# Theres some wierd overlap in the pidp data. Theres essentially a gap in September 2008 with noone in it from
# BHPS which makes transition models fail.
# Following 2 lines are a stupid work around.
# if self.year <= 2008:
# self.year += 1
data["time"] = year
return data
|
858d7e48143a16e644d4f1241cd8918385dc7c5f
| 3,640,525
|
def get_connection(sid):
"""
Attempts to connect to the given server and
returns a connection.
"""
server = get_server(sid)
try:
shell = spur.SshShell(
hostname=server["host"],
username=server["username"],
password=server["password"],
port=server["port"],
missing_host_key=spur.ssh.MissingHostKey.accept,
connect_timeout=10)
shell.run(["echo", "connected"])
except spur.ssh.ConnectionError as e:
raise WebException(
"Cannot connect to {}@{}:{} with the specified password".format(
server["username"], server["host"], server["port"]))
return shell
|
933aa768640455ed21b914c4cb432436a7225e4e
| 3,640,526
|
import sys
import math
def compare_apertures(reference_aperture, comparison_aperture, absolute_tolerance=None, attribute_list=None, print_file=sys.stdout, fractional_tolerance=1e-6, verbose=False, ignore_attributes=None):
"""Compare the attributes of two apertures.
Parameters
----------
reference_aperture
comparison_aperture
absolute_tolerance
attribute_list
print_file
fractional_tolerance
verbose
Returns
-------
"""
if attribute_list is None:
attribute_list = PRD_REQUIRED_ATTRIBUTES_ORDERED
comparison_table = Table(names=('aperture', 'attribute', 'reference', 'comparison', 'difference', 'percent'), dtype=['S50']*6)
add_blank_line = False
for attribute in attribute_list:
if (ignore_attributes is not None) and (attribute in list(ignore_attributes)):
continue
show = False
reference_attr = getattr(reference_aperture, attribute)
comparison_attr = getattr(comparison_aperture, attribute)
if verbose:
print('Comparing {} {}: {}{} {}{}'.format(reference_aperture, attribute, type(reference_attr), reference_attr, type(comparison_attr), comparison_attr))
if reference_attr != comparison_attr:
show = True
# if isinstance(reference_attr, float) and isinstance(comparison_attr, float):
if (type(reference_attr) in [int, float, np.float64]) and (type(comparison_attr) in [int, float, np.float64]):
difference = np.abs(comparison_attr - reference_attr)
fractional_difference = difference / np.max(
[np.abs(reference_attr), np.abs(comparison_attr)])
if verbose:
print('difference={}, fractional_difference={}'.format(difference, fractional_difference))
if (absolute_tolerance is not None) and math.isclose(reference_attr, comparison_attr, abs_tol=absolute_tolerance):
show = False
elif fractional_difference <= fractional_tolerance:
show = False
else:
fractional_difference_percent_string = '{:.4f}'.format(fractional_difference*100.)
difference_string = '{:.6f}'.format(difference)
else:
difference_string = 'N/A'
fractional_difference_percent_string = 'N/A'
if show:
add_blank_line = True
print('{:25} {:>15} {:>21} {:>21} {:>15} {:>10}'.format(reference_aperture.AperName, attribute, str(reference_attr), str(comparison_attr), difference_string, fractional_difference_percent_string), file=print_file)
# add comparison data to table
comparison_table.add_row([reference_aperture.AperName, attribute, str(reference_attr), str(comparison_attr), difference_string, fractional_difference_percent_string])
if add_blank_line:
print('', file=print_file)
return comparison_table
|
2e167a2e70a26fac23d17c0f464d9cb1cdc14509
| 3,640,527
|
from typing import List
def tail(filename: str, nlines: int = 20, bsz: int = 4096) -> List[str]:
"""
Pure python equivalent of the UNIX ``tail`` command. Simply pass a filename and the number of lines you want to load
from the end of the file, and a ``List[str]`` of lines (in forward order) will be returned.
This function is simply a wrapper for the highly efficient :func:`.io_tail`, designed for usage with a small (<10,000) amount
of lines to be tailed. To allow for the lines to be returned in the correct order, it must load all ``nlines`` lines into memory
before it can return the data.
If you need to ``tail`` a large amount of data, e.g. 10,000+ lines of a logfile, you should consider using the lower level
function :func:`.io_tail` - which acts as a generator, only loading a certain amount of bytes into memory per iteration.
Example file ``/tmp/testing``::
this is an example 1
this is an example 2
this is an example 3
this is an example 4
this is an example 5
this is an example 6
Example usage::
>>> from privex.helpers import tail
>>> lines = tail('/tmp/testing', nlines=3)
>>> print("\\n".join(lines))
this is an example 4
this is an example 5
this is an example 6
:param str filename: Path to file to tail. Relative or absolute path. Absolute path is recommended for safety.
:param int nlines: Total number of lines to retrieve from the end of the file
:param int bsz: Block size (in bytes) to load with each iteration (default: 4096 bytes). DON'T CHANGE UNLESS YOU
UNDERSTAND WHAT THIS MEANS.
:return List[str] lines: The last 'nlines' lines of the file 'filename' - in forward order.
"""
res = []
with open(filename, 'rb') as fp:
for chunk in io_tail(f=fp, nlines=nlines, bsz=bsz):
res = chunk + res
return res
|
e5f94cdc349610189c85c82c66589c243063f5a6
| 3,640,528
|
def initialized(name, secret_shares=5, secret_threshold=3, pgp_keys=None,
keybase_users=None, unseal=True):
"""
Ensure that the vault instance has been initialized and run the
initialization if it has not.
:param name: The id used for the state definition
:param secret_shares: THe number of secret shares to use for the
initialization key
:param secret_threshold: The number of keys required to unseal the vault
:param pgp_keys: List of PGP public key strings to use for encrypting
the sealing keys
:param keybase_users: List of Keybase users to retrieve public PGP keys
for to use in encrypting the sealing keys
:param unseal: Whether to unseal the vault during initialization
:returns: Result of the execution
:rtype: dict
"""
ret = {'name': name,
'comment': '',
'result': '',
'changes': {}}
initialized = __salt__['vault.is_initialized']()
if initialized:
ret['result'] = True
ret['Comment'] = 'Vault is already initialized'
elif __opts__['test']:
ret['result'] = None
ret['comment'] = 'Vault will be initialized.'
else:
success, sealing_keys, root_token = __salt__['vault.initialize'](
secret_shares, secret_threshold, pgp_keys, keybase_users, unseal
) if not initialized else (True, {}, '')
ret['result'] = success
ret['changes'] = {
'root_credentials': {
'new': {
'sealing_keys': sealing_keys,
'root_token': root_token
},
'old': {}
}
}
ret['comment'] = 'Vault has {}initialized'.format(
'' if success else 'failed to be ')
return ret
|
c2b88bb8875ded7c7274b0695a9de9fb287b0b57
| 3,640,529
|
def plot(plot, x, y, **kwargs):
"""
Adds series to plot. By default this is displayed as continuous line.
Refer to matplotlib.pyplot.plot() help for more info. X and y coordinates
are expected to be in user's data units.
Args:
plot: matplotlib.pyplot
Plot to which series should be added.
x: (float,)
Collection of x-coordinates in user units.
y: (float,)
Collection of y-coordinates in user units.
title: str
Series legend.
"""
# add series
return plot.plot(x, y, **kwargs)
|
1e861243a87b61461fb49dcadf19ec9099fa5a1f
| 3,640,530
|
def glyph_by_hershey_code(hershey_code):
"""
Returns the Hershey glyph corresponding to `hershey_code`.
"""
glyph = glyphs_by_hershey_code.get(hershey_code)
if glyph is None:
raise ValueError("No glyph for hershey code %d" % hershey_code)
return glyph
|
54a8c9657466f2348e93667e8a638c3e44681adb
| 3,640,531
|
def _get_prefab_from_address(address):
"""
Parses an address of the format ip[:port] and return return a prefab object connected to the remote node
"""
try:
if ':' in address:
ip, port = address.split(':')
port = int(port)
else:
ip, port = address, 22
except Exception:
raise ValueError("Invalid node address")
return j.tools.prefab.getFromSSH(addr=ip, port=port)
|
3520dcca249073433ece88a4d9b31e8c2d73eb86
| 3,640,532
|
def interval_to_errors(value, low_bound, hi_bound):
"""
Convert error intervals to errors
:param value: central value
:param low_bound: interval low bound
:param hi_bound: interval high bound
:return: (error minus, error plus)
"""
error_plus = hi_bound - value
error_minus = value - low_bound
return error_minus, error_plus
|
ffee403968ddf5fd976df79a90bdbb62474ede11
| 3,640,533
|
from typing import Any
from typing import cast
def log_enabled_arg(request: Any) -> bool:
"""Using different log messages.
Args:
request: special fixture that returns the fixture params
Returns:
The params values are returned one at a time
"""
return cast(bool, request.param)
|
9ff97ab8f5cc8e3a0c548e613b75b5da050eb53d
| 3,640,534
|
def expsign(sign, exp):
"""
optimization of sign ** exp
"""
if sign == 1:
return 1
assert sign == -1
return -1 if exp % 2 else 1
|
d770aaa2a4d20c9530a213631047d1d0f9cca3f7
| 3,640,535
|
def convert_format(tensors, kind, target_kind):
"""Converts data from format 'kind' to one of the formats specified in 'target_kind'
This allows us to convert data to/from dataframe representations for operators that
only support certain reprentations
"""
# this is all much more difficult because of multihot columns, which don't have
# great representations in dicts of cpu/gpu arrays. we're representing multihots
# as tuples of (values, offsets) tensors in this case - but have to do work at
# each step in terms of converting.
if kind & target_kind:
return tensors, kind
elif target_kind & Supports.GPU_DICT_ARRAY:
if kind == Supports.CPU_DICT_ARRAY:
return _convert_array(tensors, cp.array), Supports.GPU_DICT_ARRAY
elif kind == Supports.CPU_DATAFRAME:
return _pandas_to_array(tensors, False), Supports.GPU_DICT_ARRAY
elif kind == Supports.GPU_DATAFRAME:
return _cudf_to_array(tensors, False), Supports.GPU_DICT_ARRAY
elif target_kind & Supports.CPU_DICT_ARRAY:
if kind == Supports.GPU_DICT_ARRAY:
return _convert_array(tensors, cp.asnumpy), Supports.CPU_DICT_ARRAY
elif kind == Supports.CPU_DATAFRAME:
return _pandas_to_array(tensors, True), Supports.CPU_DICT_ARRAY
elif kind == Supports.GPU_DATAFRAME:
return _cudf_to_array(tensors, True), Supports.CPU_DICT_ARRAY
elif target_kind & Supports.GPU_DATAFRAME:
if kind == Supports.CPU_DATAFRAME:
return cudf.DataFrame(tensors), Supports.GPU_DATAFRAME
return _array_to_cudf(tensors), Supports.GPU_DATAFRAME
elif target_kind & Supports.CPU_DATAFRAME:
if kind == Supports.GPU_DATAFRAME:
return tensors.to_pandas(), Supports.CPU_DATAFRAME
elif kind == Supports.CPU_DICT_ARRAY:
return _array_to_pandas(tensors), Supports.CPU_DATAFRAME
elif kind == Supports.GPU_DICT_ARRAY:
return _array_to_pandas(_convert_array(tensors, cp.asnumpy)), Supports.CPU_DATAFRAME
raise ValueError("unsupported target for converting tensors", target_kind)
|
8925d002395da05c6b5a7374a7288cc0511df1cb
| 3,640,536
|
import urllib
import re
def template2path(template, params, ranges=None):
"""Converts a template and a dict of parameters to a path fragment.
Converts a template, such as /{name}/ and a dictionary of parameter
values to a URL path (string).
Parameter values that are used for buildig the path are converted to
strings using `str()` and URI-escaped, then validated against the their
range. Unused parameters are ignored.
Any optional ([]) blocks in the template are skipped unless they contain at
least one parameter and all parameters needed to fill the block (including
nested blocks) are present in `params`.
Example:
>>> import rhino.mapper
>>> rhino.mapper.template2path("/{name}", {'name': 'fred'})
'/fred'
"""
if len(template) and -1 < template.find('|') < len(template) - 1:
raise InvalidTemplateError("'|' may only appear at the end, found at position %d in %s" % (template.find('|'), template))
if ranges is None:
ranges = DEFAULT_RANGES
# Stack for path components. A new list is added for each '[]' block
# encountered. When the closing ']' is reached, the last element is
# removed and either merged into the previous one (we keep the
# block) or discarded (we skip the block). At the end, this should
# contain a flat list of strings as its single element.
stack = [[]]
pattern = "[^/]+" # default range
name = "" # name of the current parameter
bracketdepth = 0 # current level of nested brackets
skip_to_depth = 0 # if > 1, skip until we're back at this bracket level
state = S_PATH
rangename = None # range name for the current parameter
seen_name = [False] # have we seen a named param in bracket level (index)?
for c in template_splitter.split(template):
if state == S_PATH:
if len(c) > 1:
stack[-1].append(c)
elif c == '[':
bracketdepth += 1
stack.append([])
seen_name.append(False)
elif c == ']':
bracketdepth -= 1
if bracketdepth < 0:
raise InvalidTemplateError("Mismatched brackets in %s" % template)
last_elem = stack.pop()
if seen_name.pop():
stack[-1].extend(last_elem)
seen_name[-1] = True
elif c == '{':
name = ""
state = S_TEMPLATE
elif c == '}':
raise InvalidTemplateError("Mismatched braces in %s" % template)
elif c == '|':
pass
else:
stack[-1].append(c)
elif state == S_SKIP:
if c == '[':
bracketdepth += 1
seen_name.append(False)
elif c == ']':
if bracketdepth == skip_to_depth:
stack.pop()
skip_to_depth = 0
state = S_PATH
bracketdepth -= 1
seen_name.pop()
else: # state == S_TEMPLATE
if c == '}':
if name not in params:
if bracketdepth:
# We're missing a parameter, but it's ok since
# we're inside a '[]' block. Skip everything
# until we reach the end of the current block.
skip_to_depth = bracketdepth
state = S_SKIP
else:
raise InvalidArgumentError("Missing parameter '%s' in %s" % (name, template))
else:
if rangename and rangename in ranges:
regex = ranges[rangename]
else:
regex = pattern
value_bytes = unicode(params[name]).encode('utf-8')
value = urllib.quote(value_bytes, safe='/:;')
if not re.match('^' + regex + '$', value):
raise InvalidArgumentError("Value '%s' for parameter '%s' does not match '^%s$' in %s" % (value, name, regex, template))
stack[-1].append(value)
state = S_PATH
rangename = None
else:
name = c
if name.find(":") > -1:
name, rangename = name.split(":")
seen_name[bracketdepth] = True
if bracketdepth != 0:
raise InvalidTemplateError("Mismatched brackets in %s" % template)
if state == S_TEMPLATE:
raise InvalidTemplateError("Mismatched braces in %s" % template)
# None of these Should Ever Happen [TM]
if state == S_SKIP: # pragma: no cover
raise MapperException("Internal error: end state is S_SKIP")
if len(stack) > 1: # pragma: no cover
raise MapperException("Internal error: stack not empty")
if len(seen_name) != 1: # pragma: no cover
raise MapperException("Internal error: seen_name not empty")
return "".join(stack[0])
|
daf628ab6ef1a6fddb612c0f4c817085ac23ce2c
| 3,640,537
|
from typing import Union
from typing import Dict
from typing import Any
def calculate_total_matched(
market_book: Union[Dict[str, Any], MarketBook]
) -> Union[int, float]:
"""
Calculate the total matched on this market from the amounts matched on each runner at each price point. Useful for historic data where this field is not populated
:param market_book: A market book either as a dictionary or betfairlightweight MarketBook object
:return: The total matched on this market
"""
if type(market_book) is MarketBook:
market_book = market_book._data
return sum(
ps["size"]
for r in market_book.get("runners", [])
for ps in r.get("ex", {}).get("tradedVolume", [])
)
|
7bc3d4680e5507d1400e94ab30213c0cc6d817bb
| 3,640,538
|
import argparse
def parse_args(args):
"""Build parser object with options for sample.
Returns:
Python argparse parsed object.
"""
parser = argparse.ArgumentParser(
description="A VCF editing utility which adds ref and all sequences to a SURVIVOR fasta file.")
parser.add_argument("--reference-fasta", "-r", required=True, type=str,
help="Reference fasta file.")
parser.add_argument("--survivor-insertions-fasta", "-i", required=True, type=str,
help="Insertions fasta file from SURVIVOR.")
parser.add_argument("--survivor-vcf-file", "-v", required=True, type=str,
help="VCF file from SURVIVOR.")
parser.add_argument("--output-vcf", "-o", required=True, type=str,
help="Output path of edited VCF.")
parser.add_argument("--debug", action="store_true",
help="Verbose logging")
args = parser.parse_args(args)
truvari.setup_logging(args.debug)
return args
|
36f9e13a65f13659e32dcfda9fbbca6d52ffd0e6
| 3,640,539
|
import re
def _newline_to_ret_token(instring):
"""Replaces newlines with the !RET token.
"""
return re.sub(r'\n', '!RET', instring)
|
4fcf60025f79811e99151019a479da04f25ba47c
| 3,640,540
|
def _ComputeLineCounts(old_lines, chunks):
"""Compute the length of the old and new sides of a diff.
Args:
old_lines: List of lines representing the original file.
chunks: List of chunks as returned by patching.ParsePatchToChunks().
Returns:
A tuple (old_len, new_len) representing len(old_lines) and
len(new_lines), where new_lines is the list representing the
result of applying the patch chunks to old_lines, however, without
actually computing new_lines.
"""
old_len = len(old_lines)
new_len = old_len
if chunks:
(_, old_b), (_, new_b), old_lines, _ = chunks[-1]
new_len += new_b - old_b
return old_len, new_len
|
ba99714016b69d87f260c8e7b8793468a2f7b04d
| 3,640,541
|
def _read_int(file_handle, data_size):
"""
Read a signed integer of defined data_size from file.
:param file_handle: The file handle to read from at current position
:param data_size: The data size in bytes of the integer to read
:returns: The integer read and decoded
"""
return int.from_bytes(file_handle.read(data_size), byteorder="little", signed=True)
|
4d2a7e82e9daa828c0e5b180250834f2fa9977d5
| 3,640,542
|
import numpy
def quaternion_to_matrix(quat):
"""OI
"""
qw = quat[0][0]
qx = quat[1][0]
qy = quat[2][0]
qz = quat[3][0]
rot = numpy.array([[1 - 2*qy*qy - 2*qz*qz, 2*qx*qy - 2*qz*qw, 2*qx*qz + 2*qy*qw],
[2*qx*qy + 2*qz*qw, 1 - 2*qx*qx - 2*qz*qz, 2*qy*qz - 2*qx*qw],
[2*qx*qz - 2*qy*qw, 2*qy*qz + 2*qx*qw, 1 - 2*qx*qx - 2*qy*qy]])
return rot
|
67f02ea97db1af4a763c3a97957f36de29da0157
| 3,640,543
|
def get_cart_from_request(request, create=False):
"""Returns Cart object for current user. If create option is True,
new cart will be saved to db"""
cookie_token = request.get_signed_cookie(
Cart.COOKIE_NAME, default=None)
if request.user.is_authenticated():
user = request.user
queryset = user.carts
token = get_user_open_cart_token(request.user)
else:
user = None
queryset = Cart.objects.anonymous()
token = cookie_token
try:
cart = queryset.open().get(token=token)
except Cart.DoesNotExist:
if create:
cart = Cart.objects.create(
user=user,
token=cookie_token)
else:
cart = Cart()
cart.discounts = request.discounts
return cart
|
d22c2587a20c12bac1fe713d40ddf069bfc5f40e
| 3,640,544
|
def make_concrete_rule(rule_no, zone_map, direction, zone, rule, concrete_port):
"""Take a rule and create a corresponding concrete rule."""
def make_rule(target_zone, port):
return ConcreteRule(source_rules=[rule], rule_no=rule_no, target_zone=target_zone,
direction=direction, port=port, action="allow")
target_zone = zone_map[rule.target_zone]
# Rule level ephemerality overrides zone level
if '+ephemeral_strict' in rule.tags:
ephem_start = 32768
elif '+ephemeral_loose' in rule.tags:
ephem_start = 1024
elif rule.direction == '>' and '+ephemeral_strict' in zone.tags and direction == 'ingress':
# An internal network with systems that use a tight ephemeral port range
ephem_start = 32768
else:
ephem_start = 1024
if concrete_port.proto == 'all':
# ISSUE: We should *maybe* prevent rules with the "all" protocol from being
# concretized. Because of the nature of "all" rules you can't restrict the
# return traffic at all. Really, this should be a policy level error?
return_port = ConcretePort(proto=concrete_port.proto, from_port=0, to_port=0)
else:
return_port = ConcretePort(proto=concrete_port.proto, from_port=ephem_start, to_port=65535)
if direction == 'ingress':
if rule.direction == '>':
if rule.zone == zone.name or rule.zone == 'all': # a > b (return traffic)
return make_rule(target_zone=rule.target_zone, port=return_port)
elif rule.target_zone == zone.name: # b > a (forward traffic)
return make_rule(target_zone=rule.zone, port=concrete_port)
else: # '<'
if rule.zone == zone.name: # a < b (forward traffic)
return make_rule(target_zone=rule.target_zone, port=concrete_port)
elif rule.target_zone == zone.name: # b < a
raise NotImplementedError("Receiving traffic from internal zone?")
else: # egress
if rule.direction == '>':
if rule.zone == zone.name or rule.zone == 'all': # a > b (forward traffic)
return make_rule(target_zone=rule.target_zone, port=concrete_port)
elif rule.target_zone == zone.name: # b > a (return traffic)
return make_rule(target_zone=rule.zone, port=return_port)
else: # '<'
if rule.zone == zone.name: # a < b (return traffic)
return make_rule(target_zone=rule.target_zone, port=return_port)
elif rule.target_zone == zone.name: # b < a
raise NotImplementedError("Receiving traffic from internal zone?")
raise AssertionError("should not reach here")
|
b7b1babc32c2d81193e62e90b5fd751ad8575ff1
| 3,640,545
|
from typing import List
def downcast(df: pd.DataFrame, signed_columns: List[str] = None) -> pd.DataFrame:
"""
Automatically check for signed/unsigned columns and downcast.
However, if a column can be signed while all the data in that column is unsigned, you don't want to downcast to
an unsigned column. You can explicitly pass these columns.
:arg df: Data as Pandas DataFrame
:arg signed_columns: List of signed columns (signed = positive and negative values, unsigned = only positive values).
"""
logger.info(f'Size before downcasting: {df.memory_size} KB')
for column in df.columns:
if df[column].dtype in [np.int8, np.int16, np.int32, np.int64]:
if (df[column] < 0).any() or (signed_columns is not None and df[column].name in signed_columns):
df[column] = pd.to_numeric(df[column], downcast='signed')
else:
df[column] = pd.to_numeric(df[column], downcast='unsigned')
elif df[column].dtype in [np.float16, np.float32, np.float64]:
df[column] = pd.to_numeric(df[column], downcast='float')
logger.info(f'Size after downcasting: {df.memory_size} KB')
return df
|
2eb2494e5a59630c4e20d114aac076c971f287a6
| 3,640,546
|
import sys
def get_setting(name):
"""
Hook for getting Django settings and using properties of this file as the
default.
"""
me = sys.modules[__name__]
return getattr(settings, name, getattr(me, name, None))
|
8bb594469a81f66c8e490b73a53db88cef4ca537
| 3,640,547
|
from typing import Optional
def entity_type(entity: dict) -> Optional[str]:
"""
Safely get the NGSI type of the given entity.
The type, if present, is expected to be a string, so we convert it if it
isn't.
:param entity: the entity.
:return: the type string if there's an type, `None` otherwise.
"""
return maybe_map(str, safe_get_value(entity, NGSI_TYPE))
|
e4d27b7499710951959cfef5c1191c6744bd02ce
| 3,640,548
|
def read_private_key_data(bio):
"""
Read enough data from bio to fully read a private key.
(The data read is thrown away, though.)
This is required since the format does not contain the actual length
of the privately-serialized private key data. The knowledge of what
to read for each key type is known by OpenSSH itself; see
https://github.com/openssh/openssh-portable/blob/c7670b091a7174760d619ef6738b4f26b2093301/sshkey.c#L2767
for the details.
:param bio: Seekable binary IO object to read from
:return: Tuple of (key format, private key data).
"""
key_format = read_openssh_string(bio)
start_idx = bio.tell()
reader = _readers.get(key_format.decode())
if not reader:
raise NotImplementedError('Unknown key format %r' % key_format)
reader(bio)
end_idx = bio.tell()
bytes_read = end_idx - start_idx
bio.seek(start_idx)
private_key_bytes = bio.read(bytes_read)
return (key_format, private_key_bytes)
|
de1e38c49fe81449b90b14ccab0b2aaf7de121bc
| 3,640,549
|
def list_check(lst):
"""Are all items in lst a list?
>>> list_check([[1], [2, 3]])
True
>>> list_check([[1], "nope"])
False
"""
t = [1 if isinstance(x, list) else 0 for x in lst]
return len(lst) == sum(t)
|
9e2c55cb6e15f89ff2b73a78d5f15310d3cac672
| 3,640,550
|
def check_for_peaks_in_residual(vel, data, errors, best_fit_list, dct,
fitted_residual_peaks, signal_ranges=None,
signal_mask=None, force_accept=False,
params_min=None, params_max=None, noise_spike_mask=None):
"""Try fit by adding new components, whose initial parameters were determined from residual peaks.
Parameters
----------
vel : numpy.ndarray
Velocity channels (unitless).
data : numpy.ndarray
Original data of spectrum.
errors : numpy.ndarray
Root-mean-square noise values.
best_fit_list : list
List containing parameters of the current best fit for the spectrum. It is of the form [{0} params_fit, {1} params_errs, {2} ncomps_fit, {3} best_fit, {4} residual, {5} rchi2, {6} aicc, {7} new_fit, {8} params_min, {9} params_max, {10} pvalue]
dct : dict
Dictionary containing parameter settings for the improved fitting.
fitted_residual_peaks : list
List of initial mean position guesses for new fit components determined from residual peaks that were already tried in previous iterations.
signal_ranges : list
Nested list containing info about ranges of the spectrum that were estimated to contain signal. The goodness-of-fit calculations are only performed for the spectral channels within these ranges.
signal_mask : numpy.ndarray
Boolean array containing the information of signal_ranges.
force_accept : bool
Experimental feature. Default is 'False'. If set to 'True', the new fit will be forced to become the best fit.
params_min : list
List of minimum limits for parameters: [min_amp1, ..., min_ampN, min_fwhm1, ..., min_fwhmN, min_mean1, ..., min_meanN]
params_max : list
List of maximum limits for parameters: [max_amp1, ..., max_ampN, max_fwhm1, ..., max_fwhmN, max_mean1, ..., max_meanN]
Returns
-------
best_fit_list : list
List containing parameters of the chosen best fit for the spectrum. It is of the form [{0} params_fit, {1} params_errs, {2} ncomps_fit, {3} best_fit, {4} residual, {5} rchi2, {6} aicc, {7} new_fit, {8} params_min, {9} params_max, {10} pvalue]
fitted_residual_peaks : list
Updated list of initial mean position guesses for new fit components determined from residual peaks.
"""
# TODO: remove params_min and params_max keywords
params_fit = best_fit_list[0]
ncomps_fit = best_fit_list[2]
residual = best_fit_list[4]
amps_fit, fwhms_fit, offsets_fit = split_params(params_fit, ncomps_fit)
amp_guesses, fwhm_guesses, offset_guesses = get_initial_guesses(
residual, errors[0], dct['snr'], dct['significance'],
peak='positive')
if amp_guesses.size == 0:
best_fit_list[7] = False
return best_fit_list, fitted_residual_peaks
if list(offset_guesses) in fitted_residual_peaks:
best_fit_list[7] = False
return best_fit_list, fitted_residual_peaks
fitted_residual_peaks.append(list(offset_guesses))
amps_fit = list(amps_fit) + list(amp_guesses)
fwhms_fit = list(fwhms_fit) + list(fwhm_guesses)
offsets_fit = list(offsets_fit) + list(offset_guesses)
params_fit = amps_fit + fwhms_fit + offsets_fit
best_fit_list = get_best_fit(
vel, data, errors, params_fit, dct, first=False,
best_fit_list=best_fit_list, signal_ranges=signal_ranges,
signal_mask=signal_mask, force_accept=force_accept,
params_min=params_min, params_max=params_max,
noise_spike_mask=noise_spike_mask)
return best_fit_list, fitted_residual_peaks
|
0cc000f140514dd7c9c52df4636f287b80c66b9e
| 3,640,551
|
from typing import Dict
def build_encoded_manifest_from_nested_directory(
data_directory_path: str,
) -> Dict[str, EncodedVideoInfo]:
"""
Creates a dictionary from video_id to EncodedVideoInfo for
encoded videos in the given directory.
Args:
data_directory_path (str): The folder to ls to find encoded
video files.
Returns:
Dict[str, EncodedVideoInfo] mapping video_id to EncodedVideoInfo
for each file in 'data_directory_path'
"""
encoded_video_infos = {}
for participant_id in g_pathmgr.ls(data_directory_path):
participant_folder_path = f"{data_directory_path}/{participant_id}"
for video_file_name in g_pathmgr.ls(participant_folder_path):
video_id = video_file_name[:6]
video_full_path = f"{participant_folder_path}/{video_file_name}"
encoded_video_infos[video_id] = EncodedVideoInfo(video_id, video_full_path)
return encoded_video_infos
|
2a908eb33b140e73d27bca02da449d09e4ac4c5d
| 3,640,552
|
def derive_question(doc):
"""
Return a string that rephrases an action in the
doc in the form of a question.
'doc' is expected to be a spaCy doc.
"""
verb_chunk = find_verb_chunk(doc)
if not verb_chunk:
return None
subj = verb_chunk['subject'].text
obj = verb_chunk['object'].text
if verb_chunk['verb'].tag_ != 'VB':
# If the verb is not in its base form ("to ____" form),
# use the spaCy lemmatizer to convert it to such
verb = verb_chunk['verb'].lemma_
else:
verb = verb_chunk['verb'].text
question = "Why did {} {} {}?".format(subj, verb, obj)
return question
|
876e6733f8cf3d9accf3af1af89241ded4a02481
| 3,640,553
|
def recover_label(pred_variable, gold_variable, mask_variable, label_alphabet, word_recover, sentence_classification=False):
"""
input:
pred_variable (batch_size, sent_len): pred tag result
gold_variable (batch_size, sent_len): gold result variable
mask_variable (batch_size, sent_len): mask variable
"""
pred_variable = pred_variable[word_recover]
# print("reordered labels: {}".format(pred_variable))
gold_variable = gold_variable[word_recover]
mask_variable = mask_variable[word_recover]
batch_size = gold_variable.size(0)
if sentence_classification:
pred_tag = pred_variable.cpu().data.numpy().tolist()
gold_tag = gold_variable.cpu().data.numpy().tolist()
pred_label = [label_alphabet.get_instance(pred) for pred in pred_tag]
gold_label = [label_alphabet.get_instance(gold) for gold in gold_tag]
else:
seq_len = gold_variable.size(1)
mask = mask_variable.cpu().data.numpy()
pred_tag = pred_variable.cpu().data.numpy()
gold_tag = gold_variable.cpu().data.numpy()
batch_size = mask.shape[0]
pred_label = []
gold_label = []
for idx in range(batch_size):
pred = [label_alphabet.get_instance(pred_tag[idx][idy]) for idy in range(seq_len) if mask[idx][idy] != 0]
gold = [label_alphabet.get_instance(gold_tag[idx][idy]) for idy in range(seq_len) if mask[idx][idy] != 0]
assert(len(pred)==len(gold))
pred_label.append(pred)
gold_label.append(gold)
return pred_label, gold_label
|
7f3efef4a0e9041e329c8d1c0c5641bf0c79ff58
| 3,640,554
|
def RegenerateOverview(*args, **kwargs):
"""
RegenerateOverview(Band srcBand, Band overviewBand, char const * resampling="average", GDALProgressFunc callback=0,
void * callback_data=None) -> int
"""
return _gdal.RegenerateOverview(*args, **kwargs)
|
8f05fcb7a12bf09d432b65b9cf049d2ff5cf23b1
| 3,640,555
|
import imp
def import_code(code, name):
""" code can be any object containing code -- string, file object, or
compiled code object. Returns a new module object initialized
by dynamically importing the given code. If the module has already
been imported - then it is returned and not imported a second time.
"""
# Check if 'code' has already been loaded
if (name in config.g_utils_import_dictionary):
return config.g_utils_import_dictionary[name]
# Load the 'code' into the memory
try:
module = imp.new_module(name)
config.g_utils_import_dictionary[name] = module
exec(code, module.__dict__)
return module
except Exception as e:
print("Error={}".format( str(e) ))
return None
|
309fb1e214225dcdf742bc5ea7d21cb502b05ae9
| 3,640,556
|
def two(data: np.ndarray) -> int:
"""
Use the binary numbers in your diagnostic report to calculate the oxygen generator rating and CO2 scrubber rating,
then multiply them together. What is the life support rating of the submarine? (Be sure to represent your answer in
decimal, not binary.)
"""
def loop(most_common: bool) -> int:
"""
Loop through each bit for both the Oxygen generator rating (True) and CO2 scrubber rating (False).
"""
n_bits = len(data[0])
rating_list = np.copy(data)
for pos in range(n_bits):
if len(rating_list) <= 1:
break
pos_data = rating_list[:, pos]
n_0, n_1 = (pos_data == 0).sum(), (pos_data == 1).sum()
if most_common:
bit = 1 if n_1 >= n_0 else 0
else:
bit = 0 if n_1 >= n_0 else 1
rating_list = rating_list[rating_list[:, pos] == bit]
return binary_to_int(rating_list[0])
return loop(most_common=True) * loop(most_common=False)
|
723984bf673ab23697ccff69e0c7e2529cce2e81
| 3,640,557
|
import six
def get_lr_fit(sess, model, x_train, y_train, x_test, num_steps=100):
"""Fit a multi-class logistic regression classifier.
Args:
x_train: [N, D]. Training data.
y_train: [N]. Training label, integer classes.
x_test: [M, D]. Test data.
Returns:
y_pred: [M]. Integer class prediction of test data.
"""
nbatches = x_train.shape[0]
y_pred = np.zeros([x_test.shape[0], x_test.shape[1]])
for ii in six.moves.xrange(nbatches):
x_train_ = x_train[ii].reshape([x_train[ii].shape[0], -1])
x_test_ = x_test[ii].reshape([x_test[ii].shape[0], -1])
y_train_ = y_train[ii]
# Reinitialize variables for a new episode.
var_to_init = list(
filter(lambda x: 'LRModel' in x.name, tf.global_variables()))
sess.run(tf.variables_initializer(var_to_init))
# Run LR training.
for step in six.moves.xrange(num_steps):
cost, acc, _ = sess.run(
[model.cost, model.acc, model.train_op],
feed_dict={
model.inputs: x_train_,
model.labels: y_train_
})
y_pred[ii] = np.argmax(
sess.run(model.prediction, feed_dict={
model.inputs: x_test_
}), axis=-1)
return y_pred
|
a60654d15e8f0f1c5e7ab11bc9c3e17f3440d286
| 3,640,558
|
import random
def make_block_trials(ntrials_block):
"""Creates a matrix of pseudo-random balanced trial parameters for a block of trials.
Parameters
----------
ntrials_block : int
Number of trials in the block.
Returns
-------
block : 2d array
Matrix of trial parameters (this is NOT random).
order : 1d array
Randomized order to run the trials in.
"""
## CREATE VECTORS OF TRIAL PARAMETER SETTINGS FOR A BLOCK OF TRIALS
# FOR EXAMPLE: COND_VEC = NP.APPEND(NP.ZEROS(NTRIAL_BLOCK/2), NP.ONES(NTRIAL_BLOCK/2))
# ^ CREATES A VECTOR TO HAVE 50% OF EACH OF TWO TRIAL CONDITIONS
# Collect run details into block object
block = Block()
# ADD BLOCK RUN
# EXAMPLE: block.CONDITION = COND_VEC
# Set up array for run order
order = range(0, len(ntrials_block))
random.shuffle(order)
return block, order
|
ed504af676a660befd3b548e9148e4a6cbc93183
| 3,640,559
|
def view_user(user_id: int):
"""Return the given user's history."""
return render_user(manager.get_user_by_id(user_id))
|
70b88f25b63697682650ae60591e4eee16253433
| 3,640,560
|
def first(c) -> col:
"""
In contrast to pyspark.sql.functions.first this function uses column name as alias
without prefixing it with the aggregation function name.
"""
if isinstance(c, str):
return F.first(c).alias(c)
columnName = c._jc.toString()
return F.first(c).alias(columnName)
|
0b7b0bb0d3e2f56c400f3a026f39cb2459b0e54f
| 3,640,561
|
def translate(root_list, use_bag_semantics=False):
"""
Translate a list of relational algebra trees into SQL statements.
:param root_list: a list of tree roots
:param use_bag_semantics: flag for using relational algebra bag semantics
:return: a list of SQL statements
"""
translator = (Translator() if use_bag_semantics else SetTranslator())
return [translator.translate(root).to_sql() for root in root_list]
|
b7a25d8af2e47ba134a6dbf490a0255391b330c1
| 3,640,562
|
import jsonschema
def replace_aliases(record):
"""
Replace all aliases associated with this DID / GUID
"""
# we set force=True so that if MIME type of request is not application/JSON,
# get_json will still throw a UserError.
aliases_json = flask.request.get_json(force=True)
try:
jsonschema.validate(aliases_json, RECORD_ALIAS_SCHEMA)
except jsonschema.ValidationError as err:
logger.warning(f"Bad request body:\n{err}")
raise UserError(err)
aliases = [record["value"] for record in aliases_json["aliases"]]
# authorization and error handling done in driver
blueprint.index_driver.replace_aliases_for_did(aliases, record)
aliases_payload = {"aliases": [{"value": alias} for alias in aliases]}
return flask.jsonify(aliases_payload), 200
|
a19335af1836f1899565b874640cdd0858247bcc
| 3,640,563
|
def pos_tag(docs, language=None, tagger_instance=None, doc_meta_key=None):
"""
Apply Part-of-Speech (POS) tagging to list of documents `docs`. Either load a tagger based on supplied `language`
or use the tagger instance `tagger` which must have a method ``tag()``. A tagger can be loaded via
:func:`~tmtoolkit.preprocess.load_pos_tagger_for_language`.
POS tagging so far only works for English and German. The English tagger uses the Penn Treebank tagset
(https://ling.upenn.edu/courses/Fall_2003/ling001/penn_treebank_pos.html), the
German tagger uses STTS (http://www.ims.uni-stuttgart.de/forschung/ressourcen/lexika/TagSets/stts-table.html).
:param docs: list of tokenized documents
:param language: the language for the POS tagger (currently only "english" and "german" are supported) if no
`tagger` is given
:param tagger_instance: a tagger instance to use for tagging if no `language` is given
:param doc_meta_key: if this is not None, it must be a string that specifies the key that is used for the
resulting dicts
:return: if `doc_meta_key` is None, return a list of N lists, where N is the number of documents; each of these
lists contains the POS tags for the respective tokens from `docs`, hence each POS list has the same length
as the respective token list of the corresponding document; if `doc_meta_key` is not None, the result list
contains dicts with the only key `doc_meta_key` that maps to the list of POS tags for the corresponding
document
"""
require_listlike(docs)
if tagger_instance is None:
tagger_instance, _ = load_pos_tagger_for_language(language or defaults.language)
docs_meta = []
for dtok in docs:
if len(dtok) > 0:
tokens_and_tags = tagger_instance.tag(dtok)
tags = list(list(zip(*tokens_and_tags))[1])
else:
tags = []
if doc_meta_key:
docs_meta.append({doc_meta_key: tags})
else:
docs_meta.append(tags)
return docs_meta
|
a990acc4caa33c7615c961593557b43ef6d5a6d0
| 3,640,564
|
def NOBE_GA_SH(G,K,topk):
"""detect SH spanners via NOBE-GA[1].
Parameters
----------
G : easygraph.Graph
An unweighted and undirected graph.
K : int
Embedding dimension k
topk : int
top - k structural hole spanners
Returns
-------
SHS : list
The top-k structural hole spanners.
Examples
--------
>>> NOBE_GA_SH(G,K=8,topk=5)
References
----------
.. [1] https://www.researchgate.net/publication/325004496_On_Spectral_Graph_Embedding_A_Non-Backtracking_Perspective_and_Graph_Approximation
"""
Y=eg.NOBE_GA(G,K)
if(isinstance(Y[0,0],complex)):
Y = abs(Y)
kmeans = KMeans(n_clusters=K, random_state=0).fit(Y)
com={}
cluster={}
a=0
for i in G.nodes:
com[i]=kmeans.labels_[a]
a+=1
for i in com:
if com[i] in cluster:
cluster[com[i]].append(i)
else:
cluster[com[i]]=[]
cluster[com[i]].append(i)
vector={}
a=0
for i in G.nodes:
vector[i]=Y[a]
a+=1
rds=RDS(com,cluster,vector,K)
rds_sort=sorted(rds.items(), key=lambda d: d[1],reverse=True)
SHS=list()
a=0
for i in rds_sort:
SHS.append(i[0])
a+=1
if a==topk:
break
return SHS
|
a1f3f8f041e4a89b9d09037479574c27505dd7fa
| 3,640,565
|
import torch
def calculate_correct_answers(model, dataloader, epoch):
"""Calculate correct over total answers"""
forward_backward_func = get_forward_backward_func()
for m in model:
m.eval()
def loss_func(labels, output_tensor):
logits = output_tensor
loss_dict = {}
# Compute the correct answers.
predicted = torch.argmax(logits, dim=-1)
corrects = (predicted == labels).float()
# Add to the counters.
loss_dict['total'] = labels.size(0)
loss_dict['correct'] = corrects.sum().item()
return 0, loss_dict
#defined inside to capture output_predictions
def correct_answers_forward_step(batch, model):
try:
batch_ = next(batch)
except BaseException:
batch_ = batch
images, labels = process_batch(batch_)
# Forward model.
output_tensor = model(images)
return output_tensor, partial(loss_func, labels)
with torch.no_grad():
# For all the batches in the dataset.
total = 0
correct = 0
for _, batch in enumerate(dataloader):
loss_dicts = forward_backward_func(correct_answers_forward_step, batch, model,
optimizer=None, timers=None, forward_only=True)
for loss_dict in loss_dicts:
total += loss_dict['total']
correct += loss_dict['correct']
for m in model:
m.train()
# Reduce.
if mpu.is_pipeline_last_stage():
unreduced = torch.cuda.LongTensor([correct, total])
torch.distributed.all_reduce(unreduced,
group=mpu.get_data_parallel_group())
# Print on screen.
correct_ans = unreduced[0].item()
total_count = unreduced[1].item()
return correct_ans, total_count
|
24e3196cd172719d16524b0bbd6c0848fec3c44e
| 3,640,566
|
from typing import Dict
from typing import Tuple
from typing import Any
import re
def set_template_parameters(
template: Template, template_metadata: TemplateMetadata, input_parameters: Dict[str, str], interactive=False
):
"""Set and verify template parameters' values in the template_metadata."""
if interactive and not communication.has_prompt():
raise errors.ParameterError("Cannot use interactive mode with no prompt")
def validate(var: TemplateParameter, val) -> Tuple[bool, Any]:
try:
return True, var.convert(val)
except ValueError as e:
communication.info(str(e))
return False, val
def read_valid_value(var: TemplateParameter, default_value=None):
"""Prompt the user for a template variable and return a valid value."""
while True:
variable_type = f", type: {var.type}" if var.type else ""
enum_values = f", options: {var.possible_values}" if var.possible_values else ""
default_value = default_value or to_string(var.default)
val = communication.prompt(
f"Enter a value for '{var.name}' ({var.description}{variable_type}{enum_values})",
default=default_value,
show_default=var.has_default,
)
valid, val = validate(var, val)
if valid:
return val
missing_values = []
for parameter in sorted(template.parameters, key=lambda v: v.name):
name = parameter.name
is_valid = True
if name in input_parameters: # NOTE: Inputs override other values. No prompt for them in interactive mode
is_valid, value = validate(parameter, input_parameters[name])
elif interactive:
value = read_valid_value(parameter, default_value=template_metadata.metadata.get(name))
elif name in template_metadata.metadata:
is_valid, value = validate(parameter, template_metadata.metadata[name])
elif parameter.has_default: # Use default value if no value is available in the metadata
value = parameter.default
elif communication.has_prompt():
value = read_valid_value(parameter)
else:
missing_values.append(name)
continue
if not is_valid:
if not communication.has_prompt():
raise errors.TemplateUpdateError(f"Invalid value '{value}' for variable '{name}'")
template_metadata.metadata[name] = read_valid_value(parameter)
else:
template_metadata.metadata[name] = value
if missing_values:
missing_values_str = ", ".join(missing_values)
raise errors.TemplateUpdateError(f"Can't update template, it now requires variable(s): {missing_values_str}")
# NOTE: Ignore internal variables, i.e. __\w__
internal_keys = re.compile(r"^__\w+__$")
metadata_variables = {v for v in template_metadata.metadata if not internal_keys.match(v)} | set(
input_parameters.keys()
)
template_variables = {v.name for v in template.parameters}
unused_metadata_variables = metadata_variables - template_variables
if len(unused_metadata_variables) > 0:
unused_str = "\n\t".join(unused_metadata_variables)
communication.info(f"These parameters are not used by the template and were ignored:\n\t{unused_str}\n")
|
fb14c28f754305e6907cff40086b2ffe55a55526
| 3,640,567
|
def calc_roll_pitch_yaw(yag, zag, yag_obs, zag_obs, sigma=None):
"""Calc S/C delta roll, pitch, and yaw for observed star positions relative to reference.
This function computes a S/C delta roll/pitch/yaw that transforms the
reference star positions yag/zag into the observed positions
yag_obs/zag_obs. The units for these values must be in arcsec.
The ``yag`` and ``zag`` values correspond to the reference star catalog
positions. These must be a 1-d list or array of length M (number of
stars).
The ``yag_obs`` and ``zag_obs`` values must be either a 1-d or 2-d array
with shape M (single readout of M stars) or shape N x M (N rows of M
stars).
The ``sigma`` parameter can be None or a 1-d array of length M.
The algorithm is a simple but fast linear least-squared solution which uses
a small angle assumption to linearize the rotation matrix from
[[cos(th) -sin(th)], [sin(th), cos(th)]] to [[1, -th], [th, 1]].
In practice anything below 1.0 degree is fine.
:param yag: reference yag (list or array, arcsec)
:param zag: reference zag (list or array, arcsec)
:param yag_obs: observed yag (list or array, arcsec)
:param zag_obs: observed zag (list or array, arcsec)
:param sigma: centroid uncertainties (None or list or array, arcsec)
:returns: roll, pitch, yaw (degrees)
"""
yag = np.array(yag)
zag = np.array(zag)
yag_obs = np.array(yag_obs)
zag_obs = np.array(zag_obs)
if yag.ndim != 1 or zag.ndim != 1 or yag.shape != zag.shape:
raise ValueError('yag and zag must be 1-d and equal length')
if (yag_obs.ndim not in (1, 2) or zag.ndim not in (1, 2) or
yag_obs.shape != zag_obs.shape):
raise ValueError('yag_obs and zag_obs must be 1-d or 2-d and equal shape')
n_stars = len(yag)
if yag_obs.shape[-1] != n_stars or zag.shape[-1] != n_stars:
raise ValueError('inconsistent number of stars in yag_obs or zag_obs')
one_d = yag_obs.ndim == 1
if one_d:
yag_obs.shape = 1, n_stars
zag_obs.shape = 1, n_stars
outs = []
for yo, zo in zip(yag_obs, zag_obs):
out = _calc_roll_pitch_yaw(yag, zag, yo, zo, sigma=sigma)
outs.append(out)
if one_d:
roll, pitch, yaw = outs[0]
else:
vals = np.array(outs)
roll, pitch, yaw = vals[:, 0], vals[:, 1], vals[:, 2]
return roll, pitch, yaw
|
e1cf3c1377a3613b9ea1fc76e7c9eecac1a6e175
| 3,640,568
|
def make_query_abs(db, table, start_dt, end_dt, dscfg, mode, no_part=False, cols=None):
"""절대 시간으로 질의를 만듦.
Args:
db (str): DB명
table (str): table명
start_dt (date): 시작일
end_dt (date): 종료일
dscfg (ConfigParser): 데이터 스크립트 설정
mode: 쿼리 모드 ('count' - 행 수 구하기, 'preview' - 프리뷰)
no_part: 테이블에 파티션이 없음. 기본 False
cols: 명시적 선택 컬럼
"""
assert type(start_dt) is date and type(end_dt) is date
start_dt = start_dt.strftime('%Y%m%d')
end_dt = end_dt.strftime('%Y%m%d')
return _make_query(db, table, start_dt, end_dt, dscfg, mode, no_part, cols)
|
113049d37ceaf1cbf9b9149b1d3a4278dad96aa6
| 3,640,569
|
import warnings
def tgl_forward_backward(
emp_cov,
alpha=0.01,
beta=1.0,
max_iter=100,
n_samples=None,
verbose=False,
tol=1e-4,
delta=1e-4,
gamma=1.0,
lamda=1.0,
eps=0.5,
debug=False,
return_history=False,
return_n_iter=True,
choose="gamma",
lamda_criterion="b",
time_norm=1,
compute_objective=True,
return_n_linesearch=False,
vareps=1e-5,
stop_at=None,
stop_when=1e-4,
laplacian_penalty=False,
init="empirical",
):
"""Time-varying graphical lasso solver with forward-backward splitting.
Solves the following problem via FBS:
min sum_{i=1}^T -n_i log_likelihood(S_i, K_i) + alpha*||K_i||_{od,1}
+ beta sum_{i=2}^T Psi(K_i - K_{i-1})
where S_i = (1/n_i) X_i^T \times X_i is the empirical covariance of data
matrix X (training observations by features).
Parameters
----------
emp_cov : ndarray, shape (n_times, n_features, n_features)
Empirical covariance of data.
alpha, beta : float, optional
Regularisation parameters.
max_iter : int, optional
Maximum number of iterations.
n_samples : ndarray
Number of samples available for each time point.
verbose : bool, default False
Print info at each iteration.
tol : float, optional
Absolute tolerance for convergence.
delta, gamma, lamda, eps : float, optional
FBS parameters.
debug : bool, default False
Run in debug mode.
return_history : bool, optional
Return the history of computed values.
return_n_iter : bool, optional
Return the number of iteration before convergence.
choose : ('gamma', 'lambda', 'fixed', 'both)
Search iteratively gamma / lambda / none / both.
lamda_criterion : ('a', 'b', 'c')
Criterion to choose lamda. See ref for details.
time_norm : float, optional
Choose the temporal norm between points.
compute_objective : bool, default True
Choose to compute the objective value.
return_n_linesearch : bool, optional
Return the number of line-search iterations before convergence.
vareps : float, optional
Jitter for the loss.
stop_at, stop_when : float, optional
Other convergence criteria, as used in the paper.
laplacian_penalty : bool, default False
Use Laplacian penalty.
init : {'empirical', 'zero', ndarray}
Choose how to initialize the precision matrix, with the inverse
empirical covariance, zero matrix or precomputed.
Returns
-------
K, covariance : numpy.array, 3-dimensional (T x d x d)
Solution to the problem for each time t=1...T .
history : list
If return_history, then also a structure that contains the
objective value, the primal and dual residual norms, and tolerances
for the primal and dual residual norms at each iteration.
"""
available_choose = ("gamma", "lamda", "fixed", "both")
if choose not in available_choose:
raise ValueError("`choose` parameter must be one of %s." % available_choose)
n_times, _, n_features = emp_cov.shape
K = init_precision(emp_cov, mode=init)
if laplacian_penalty:
obj_partial = partial(
objective_laplacian, n_samples=n_samples, emp_cov=emp_cov, alpha=alpha, beta=beta, vareps=vareps
)
function_f = partial(loss_laplacian, beta=beta, n_samples=n_samples, S=emp_cov, vareps=vareps)
gradient_f = partial(grad_loss_laplacian, emp_cov=emp_cov, beta=beta, n_samples=n_samples, vareps=vareps)
function_g = partial(penalty_laplacian, alpha=alpha)
else:
psi = partial(vector_p_norm, p=time_norm)
obj_partial = partial(
objective, n_samples=n_samples, emp_cov=emp_cov, alpha=alpha, beta=beta, psi=psi, vareps=vareps
)
function_f = partial(loss, n_samples=n_samples, S=emp_cov, vareps=vareps)
gradient_f = partial(grad_loss, emp_cov=emp_cov, n_samples=n_samples, vareps=vareps)
function_g = partial(penalty, alpha=alpha, beta=beta, psi=psi)
max_residual = -np.inf
n_linesearch = 0
checks = [convergence(obj=obj_partial(precision=K))]
for iteration_ in range(max_iter):
k_previous = K.copy()
x_inv = np.array([linalg.pinvh(x) for x in K])
grad = gradient_f(K, x_inv=x_inv)
if choose in ["gamma", "both"]:
gamma, y = choose_gamma(
gamma / eps if iteration_ > 0 else gamma,
K,
function_f=function_f,
beta=beta,
alpha=alpha,
lamda=lamda,
grad=grad,
delta=delta,
eps=eps,
max_iter=200,
p=time_norm,
x_inv=x_inv,
choose=choose,
laplacian_penalty=laplacian_penalty,
)
x_hat = K - gamma * grad
if choose not in ["gamma", "both"]:
if laplacian_penalty:
y = soft_thresholding_od(x_hat, alpha * gamma)
else:
y = prox_FL(x_hat, beta * gamma, alpha * gamma, p=time_norm, symmetric=True)
if choose in ("lamda", "both"):
lamda, n_ls = choose_lamda(
min(lamda / eps if iteration_ > 0 else lamda, 1),
K,
function_f=function_f,
objective_f=obj_partial,
gradient_f=gradient_f,
function_g=function_g,
gamma=gamma,
delta=delta,
eps=eps,
criterion=lamda_criterion,
max_iter=200,
p=time_norm,
grad=grad,
prox=y,
vareps=vareps,
)
n_linesearch += n_ls
K = K + min(max(lamda, 0), 1) * (y - K)
# K, t = fista_step(Y, Y - Y_old, t)
check = convergence(
obj=obj_partial(precision=K),
rnorm=np.linalg.norm(upper_diag_3d(K) - upper_diag_3d(k_previous)),
snorm=np.linalg.norm(obj_partial(precision=K) - obj_partial(precision=k_previous)),
e_pri=np.sqrt(upper_diag_3d(K).size) * tol
+ tol * max(np.linalg.norm(upper_diag_3d(K)), np.linalg.norm(upper_diag_3d(k_previous))),
e_dual=tol,
)
if verbose and iteration_ % (50 if verbose < 2 else 1) == 0:
print("obj: %.4f, rnorm: %.7f, snorm: %.4f," "eps_pri: %.4f, eps_dual: %.4f" % check[:5])
if return_history:
checks.append(check)
if np.isnan(check.rnorm) or np.isnan(check.snorm):
warnings.warn("precision is not positive definite.")
if stop_at is not None:
if abs(check.obj - stop_at) / abs(stop_at) < stop_when:
break
else:
# use this convergence criterion
subgrad = (x_hat - K) / gamma
if 0:
if laplacian_penalty:
grad = grad_loss_laplacian(K, emp_cov, n_samples, vareps=vareps)
else:
grad = grad_loss(K, emp_cov, n_samples, vareps=vareps)
res_norm = np.linalg.norm(grad + subgrad)
if iteration_ == 0:
normalizer = res_norm + 1e-6
max_residual = max(np.linalg.norm(grad), np.linalg.norm(subgrad)) + 1e-6
else:
res_norm = np.linalg.norm(K - k_previous) / gamma
max_residual = max(max_residual, res_norm)
normalizer = max(np.linalg.norm(grad), np.linalg.norm(subgrad)) + 1e-6
r_rel = res_norm / max_residual
r_norm = res_norm / normalizer
if not debug and (r_rel <= tol or r_norm <= tol) and iteration_ > 0: # or (
# check.rnorm <= check.e_pri and iteration_ > 0):
break
else:
warnings.warn("Objective did not converge.")
covariance_ = np.array([linalg.pinvh(k) for k in K])
return_list = [K, covariance_]
if return_history:
return_list.append(checks)
if return_n_iter:
return_list.append(iteration_ + 1)
if return_n_linesearch:
return_list.append(n_linesearch)
return return_list
|
d49e9882070e8fa28395fe47afac54e83cfc7021
| 3,640,570
|
def validate_task_rel_proposal(header, propose, rel_address, state):
"""Validates that the User exists, the Task exists, and the User is not
in the Task's relationship specified by rel_address.
Args:
header (TransactionHeader): The transaction header.
propose (ProposeAddTask_____): The Task relationship proposal.
rel_address (str): The Task relationship address produced by the Task
and the User.
state (sawtooth_sdk.Context): The way to communicate to the validator
the state gets and sets.
Returns:
(dict of addresses)
"""
user_address = addresser.make_user_address(propose.user_id)
task_address = addresser.make_task_attributes_address(propose.task_id)
proposal_address = addresser.make_proposal_address(
object_id=propose.task_id,
related_id=propose.user_id)
state_entries = get_state(state, [user_address,
task_address,
proposal_address,
rel_address])
validate_identifier_is_user(state_entries,
identifier=propose.user_id,
address=user_address)
user_entry = get_state_entry(state_entries, user_address)
user = get_user_from_container(
return_user_container(user_entry),
propose.user_id)
if header.signer_public_key not in [user.user_id, user.manager_id]:
raise InvalidTransaction(
"Txn signer {} is not the user or the user's "
"manager {}".format(header.signer_public_key,
[user.user_id, user.manager_id]))
validate_identifier_is_task(state_entries,
identifier=propose.task_id,
address=task_address)
try:
task_admins_entry = get_state_entry(state_entries, rel_address)
task_rel_container = return_task_rel_container(task_admins_entry)
if is_in_task_rel_container(
task_rel_container,
propose.task_id,
propose.user_id):
raise InvalidTransaction(
"User {} is already in the Role {} "
"relationship".format(propose.user_id,
propose.task_id))
except KeyError:
# The task rel container doesn't exist so no task relationship exists
pass
return state_entries
|
d9511f0cad43cbb7a2bc9c08b9f1d112d2d4bf7b
| 3,640,571
|
import json
def all_cells_run(event_str: str, expected_count: int) -> bool:
"""Wait for an event signalling all cells have run.
`execution_count` should equal number of nonempty cells.
"""
try:
event = json.loads(event_str)
msg_type = event["msg_type"]
content = event["content"]
execution_count = content["execution_count"]
status = content["status"]
except (TypeError, KeyError):
return False
return all(
(
msg_type == "execute_reply",
execution_count == expected_count,
status == "ok",
)
)
|
c3e1bb23f38ffdd09d4cc2ea3326d40b7cf54034
| 3,640,572
|
from typing import Union
def to_forecasting(
timeseries: np.ndarray,
forecast: int = 1,
axis: Union[int, float] = 0,
test_size: int = None,
):
"""Split a timeseries for forecasting tasks.
Transform a timeseries :math:`X` into a series of
input values :math:`X_t` and a series of output values
:math:`X_{t+\\mathrm{forecast}}`.
It is also possible to split the timeseries between training
timesteps and testing timesteps.
Parameters
----------
timeseries : np.ndarray
Timeseries to split.
forecast : int, optional
Number of time lag steps between
the timeseries :math:`X_t` and the timeseries
:math:`X_{t+\\mathrm{forecast}}`, by default 1,
i.e. returns two timeseries with a time difference
of 1 timesteps.
axis : int, optional
Time axis of the timeseries, by default 0
test_size : int or float, optional
If set, will also split the timeseries
into a training phase and a testing phase of
``test_size`` timesteps. Can also be specified
as a float ratio, by default None
Returns
-------
tuple of numpy.ndarray
:math:`X_t` and :math:`X_{t+\\mathrm{forecast}}`.
If ``test_size`` is specified, will return:
:math:`X_t`, :math:`X_t^{test}`,
:math:`X_{t+\\mathrm{forecast}}`, :math:`X_{t+\\mathrm{forecast}}^{test}`.
The size of the returned timeseries is therefore the size of
:math:`X` minus the forecasting length ``forecast``.
Raises
------
ValueError
If ``test_size`` is a float, it must be in [0, 1[.
"""
series_ = np.moveaxis(timeseries.view(), axis, 0)
time_len = series_.shape[0]
if test_size is not None:
if isinstance(test_size, float) and test_size < 1 and test_size >= 0:
test_len = round(time_len * test_size)
elif isinstance(test_size, int):
test_len = test_size
else:
raise ValueError(
"invalid test_size argument: "
"test_size can be an integer or a float "
f"in [0, 1[, but is {test_size}."
)
else:
test_len = 0
X = series_[:-forecast]
y = series_[forecast:]
if test_len > 0:
X_t = X[-test_len:]
y_t = y[-test_len:]
X = X[:-test_len]
y = y[:-test_len]
X = np.moveaxis(X, 0, axis)
X_t = np.moveaxis(X_t, 0, axis)
y = np.moveaxis(y, 0, axis)
y_t = np.moveaxis(y_t, 0, axis)
return X, X_t, y, y_t
return np.moveaxis(X, 0, axis), np.moveaxis(y, 0, axis)
|
7d77df1f52ee5a499b635dd9575ab08afaa7dda2
| 3,640,573
|
def build_task_environment() -> dm_env.Environment:
"""Returns the environment."""
# We first build the base task that contains the simulation model as well
# as all the initialization logic, the sensors and the effectors.
task, components = task_builder.build_task()
del components
env_builder = subtask_env_builder.SubtaskEnvBuilder()
env_builder.set_task(task)
# Build a composer environment.
task_env = env_builder.build_base_env()
# Define the action space. This defines what action spec is exposed to the
# agent along with how to project the action received by the agent to the one
# exposed by the composer environment. Here the action space is a collection
# of actions spaces, one for the arm and one for the gripper.
parent_action_spec = task.effectors_action_spec(physics=task_env.physics)
robot_action_spaces = []
for rbt in task.robots:
# Joint space control of each individual robot.
joint_action_space = action_spaces.ArmJointActionSpace(
af.prefix_slicer(parent_action_spec, rbt.arm_effector.prefix))
gripper_action_space = action_spaces.GripperActionSpace(
af.prefix_slicer(parent_action_spec, rbt.gripper_effector.prefix))
# Gripper isn't controlled by the agent for this task.
gripper_action_space = af.FixedActionSpace(
gripper_action_space,
gripper_action_space.spec().minimum)
robot_action_spaces.extend([joint_action_space, gripper_action_space])
env_builder.set_action_space(
af.CompositeActionSpace(robot_action_spaces))
# We add a preprocessor that casts all the observations to float32
env_builder.add_preprocessor(observation_transforms.CastPreprocessor())
env_builder.add_preprocessor(
rewards.L2Reward(obs0='robot0_tcp_pos', obs1='robot1_tcp_pos'))
# End episodes after 100 steps.
env_builder.add_preprocessor(subtask_termination.MaxStepsTermination(100))
return env_builder.build()
|
91618e066ef92a396ea2dc8f6ff36c9a98356e29
| 3,640,574
|
def searchInsert(nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: int
"""
try:
return nums.index(target)
except ValueError:
nums.append(target)
nums.sort()
return nums.index(target)
|
56a719b1595502a773c108d26c597fb5ac0201bb
| 3,640,575
|
def resource(author, tag) -> Resource:
"""Resource fixture"""
return Resource(
name="Sentiment Algorithm",
url="https://raw.githubusercontent.com/MarcSkovMadsen/awesome-streamlit/master/src/pages/gallery/contributions/marc_skov_madsen/sentiment_analyzer/sentiment_analyzer.py",
is_awesome=True,
tags=[tag],
author=author,
)
|
b4eb6bd4c8409e83d0ebb75f0dc390ce7d669512
| 3,640,576
|
def del_list(request, list_id: int, list_slug: str) -> HttpResponse:
"""Delete an entire list. Danger Will Robinson! Only staff members should be allowed to access this view.
"""
task_list = get_object_or_404(TaskList, slug=list_slug)
# Ensure user has permission to delete list. Admins can delete all lists.
# Get the group this list belongs to, and check whether current user is a member of that group.
# FIXME: This means any group member can delete lists, which is probably too permissive.
if task_list.group not in request.user.groups.all() and not request.user.is_staff:
raise PermissionDenied
if request.method == 'POST':
TaskList.objects.get(id=task_list.id).delete()
messages.success(request, "{list_name} is gone.".format(list_name=task_list.name))
return redirect('todo:lists')
else:
task_count_done = Task.objects.filter(task_list=task_list.id, completed=True).count()
task_count_undone = Task.objects.filter(task_list=task_list.id, completed=False).count()
task_count_total = Task.objects.filter(task_list=task_list.id).count()
context = {
"task_list": task_list,
"task_count_done": task_count_done,
"task_count_undone": task_count_undone,
"task_count_total": task_count_total,
}
return render(request, 'todo/del_list.html', context)
|
5183bc65bbb3025ec84511fa0fab32abab8da761
| 3,640,577
|
def model_softmax(input_data=None,
output_targets=None,
num_words=3000,
num_units=128,
num_layers=2,
num_tags=5,
batchsize=1,
train=True
):
"""
:param input_data:
:param output_targets:
:param num_words:
:param num_units:
:param num_layers:
:param num_tags:标签数量
:param batchsize:
:param train: 训练还是预测
:return:
"""
tensors = {}
with tf.name_scope('embedding'):
w = tf.Variable(tf.random_uniform([num_words, num_units], -1.0, 1.0), name="W")
# 词向量shape [?,?,num_units]
inputs = tf.nn.embedding_lookup(w, input_data)
with tf.name_scope('lstm'):
lstmcell = tf.nn.rnn_cell.BasicLSTMCell
cell_list = [lstmcell(num_units, state_is_tuple=True) for i in range(num_layers)]
cell_mul = tf.nn.rnn_cell.MultiRNNCell(cell_list, state_is_tuple=True)
initial_state = cell_mul.zero_state(batch_size=batchsize, dtype=tf.float32)
# 序列输出shape [?,?,num_units]
outputs, last_state = tf.nn.dynamic_rnn(cell_mul, inputs, initial_state=initial_state)
with tf.name_scope('softmax'):
output = tf.reshape(outputs, [-1, num_units])
weights = tf.Variable(tf.truncated_normal([num_units, num_tags]))
bias = tf.Variable(tf.zeros(shape=[num_tags]))
logits = tf.nn.bias_add(tf.matmul(output, weights), bias=bias)
prediction = tf.reshape(tf.argmax(logits, axis=1, output_type=tf.int32),
shape=[batchsize, -1])
# 训练的时候计算loss,target用独热编码;生成的时候只需要计算logits
if train:
with tf.name_scope('loss'):
labels = tf.reshape(output_targets, [-1])
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels, logits=logits)
total_loss = tf.reduce_mean(loss)
accu = tf.reduce_mean(tf.cast(tf.equal(output_targets, prediction),
dtype=tf.float32))
train_op = tf.train.AdamOptimizer(learning_rate=0.01).minimize(loss)
tensors['initial_state'] = initial_state
tensors['output'] = output
tensors['last_state'] = last_state
tensors['train_op'] = train_op
tensors['prediction'] = prediction
tensors['loss'] = total_loss
tensors['accu'] = accu
else:
# 和CRF的输出保持统一
tensors['prediction'] = prediction
return tensors
|
a3991206b0cdae621e1095a1d1dc4493d600bc26
| 3,640,578
|
from typing import AnyStr
from typing import List
from typing import Dict
def get_metric_monthly_rating(metric: AnyStr,
tenant_id: AnyStr,
namespaces: List[AnyStr]) -> List[Dict]:
"""
Get the monthly price for a metric.
:metric (AnyStr) A string representing the metric.
:tenant_id (AnyStr) A string representing the tenant, only used by decorators.
:namespaces (List[AnyStr]) A list of namespaces accessible by the tenant.
Return the results of the query as a list of dictionary.
"""
qry = sa.text("""
SELECT max(frame_price) * 24 *
(SELECT extract(days FROM
date_trunc('month', now()) + interval '1 month - 1 day'))
AS frame_price
FROM frames
WHERE metric = :metric
AND frame_begin >= date_trunc('month', now())
AND namespace IN :namespaces
""").bindparams(bindparam('namespaces', expanding=True))
params = {
'metric': metric,
'tenant_id': tenant_id,
'namespaces': namespaces
}
return process_query(qry, params)
|
e73c56015a9320e9ce08b0f1375a7cea70dcc0f0
| 3,640,579
|
def masked_softmax_cross_entropy(preds, labels, mask):
"""Softmax cross-entropy loss with masking."""
loss = tf.nn.softmax_cross_entropy_with_logits(logits=preds, labels=labels)
mask = tf.cast(mask, dtype=tf.float32)
mask /= tf.reduce_mean(mask)
loss *= tf.transpose(mask)
return tf.reduce_mean(tf.transpose(loss))
|
f95f917ff4dd5835c84167f7bf3ea76a4cf6536b
| 3,640,580
|
def u0(x):
"""
Initial Condition
Parameters
----------
x : array or float;
Real space
Returns
-------
array or float : Initial condition evaluated in the real space
"""
return sin(pi * x)
|
bd55cc7226a4d2ca941b8718d62025f6f2e157b6
| 3,640,581
|
import json
def jsonify(value):
"""
Convert a value into a JSON string that can be used for JSONB queries in
Postgres.
If a string happens to contain the character U+0000, which cannot be
represented in a PostgreSQL value, remove the escape sequence representing
that character, effectively stripping out that character from all strings.
"""
return json.dumps(value, ensure_ascii=False).replace("\\u0000", "")
|
7fff497b302822f8f79f0e68b2576c26458df99c
| 3,640,582
|
import os
import json
import urllib3
import certifi
def add_generated_report_header(report_header):
"""
Upload report history and return the id of the header that was generated
on the server.
Parameters
----------
report_header:
Required Parmeters:
A dictionary of parameters that will be used to describe the report that consist of:
- report: Name of the report
- executionTimeMS: The number of milliseconds it took to generate the report
- scheduled: True if the report was scheduled, false if it was not
- note: Any notes to be added to the report
- user: An Entity Header (dictionary of id and text) of the user (which could be a system user) that requested the report.
- contentType: Mime type of the report, generally this is application/pdf
- fileName: name of the file (not including the path) of the report
- reportTitle: tile of the report as it was generated
Optional Parameters
- reportSummary: report summary as returned from the generatred report
- reportDate: date of for the report
- device: An Entity Header (dictionary of id and text) of the device that this report is for, if this is provided reports for specific devices will be available in the dashboard
Returns
-------
out: string
Returns the id of the generated report that can be used to upload a report.
"""
job_server = os.environ.get('JOB_SERVER_URL')
if(job_server is None):
raise Exception("Missing environment variable [JOB_SERVER_URL]")
headers={'Content-Type':'application/json'}
generated_report_json = json.dumps(report_header)
url = "%s/api/generatedreport/header" % (job_server)
encoded_data = generated_report_json.encode('utf-8')
http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED', ca_certs=certifi.where())
r = http.request('POST', url,
headers=headers,
preload_content=False,
body=encoded_data)
responseText = ''
responseStatus = r.status
for chunk in r.stream(32):
responseText += chunk.decode("utf-8")
responseJSON = json.loads(responseText)
r.release_conn()
if responseStatus > 299:
print('Failed http call, response code: ' + str(responseStatus))
print('Url: ' + url)
print(responseJSON)
print('--------------------------------------------------------------------------------')
print()
raise Exception("Could not upload report header to %s" % url)
if(responseJSON["successful"]):
return responseJSON["result"]
else:
raise Exception(responseJSON["errors"][0]["message"])
|
d1c1923e4a61ae82f4f9319b471a18ed2bcbf562
| 3,640,583
|
def generate_dataset(type = 'nlp', test=1):
"""
Generates a dataset for the model.
"""
if type == 'nlp':
return generate_nlp_dataset(test=test)
elif type == 'non-nlp':
return generate_non_nlp_dataset()
|
5e8998a6c9e10775367be3d6d4a722f3e24c6be1
| 3,640,584
|
def search(isamAppliance, comment, check_mode=False, force=False):
"""
Retrieve snapshots with given comment contained
"""
ret_obj = isamAppliance.create_return_object()
ret_obj_all = get(isamAppliance)
for obj in ret_obj_all['data']:
if comment in obj['comment']:
logger.debug("Snapshot comment \"{0}\" has this string \"{1}\" in it.".format(obj['comment'], comment))
if ret_obj['data'] == {}:
ret_obj['data'] = [obj['id']]
else:
ret_obj['data'].append(obj['id'])
return ret_obj
|
174c12af4eb26fc2cbeef263564f58c8638daf16
| 3,640,585
|
def getAsciiFileExtension(proxyType):
"""
The file extension used for ASCII (non-compiled) proxy source files
for the proxies of specified type.
"""
return '.proxy' if proxyType == 'Proxymeshes' else '.mhclo'
|
cb2b27956b3066d58c7b39efb511b6335b7f2ad6
| 3,640,586
|
def dist(s1, s2):
"""Given two strings, return the Hamming distance (int)"""
return abs(len(s1) - len(s2)) + sum(
map(lambda p: 0 if p[0] == p[1] else 1, zip(s1.lower(), s2.lower())))
|
ef7b3bf24e24a2e49f0c7acfd7bcb8f23fa9af2e
| 3,640,587
|
import pickle
def read_bunch(path):
""" read bunch.
:param path:
:return:
"""
file = open(path, 'rb')
bunch = pickle.load(file)
file.close()
return bunch
|
aec87c93e20e44ddeeda6a8dfaf37a61e837c714
| 3,640,588
|
def cluster_analysis(L, cluster_alg, args, kwds):
"""Given an input graph (G), and whether the graph
Laplacian is to be normalized (True) or not (False) runs spectral clustering
as implemented in scikit-learn (empirically found to be less effective)
Returns Partitions (list of sets of ints)
"""
labels = cluster_alg(*args, **kwds).fit_predict(L)
num_clusters = np.max(labels) + 1
partitions = [set() for _ in range(num_clusters)]
outliers = set() # mechanisms only used in DBSCAN (i.e. where vertex gets no label)
for i, guess in enumerate(labels):
if guess == -1:
outliers.add(i)
else:
partitions[guess].add(i)
return partitions, outliers
|
83114156a0b5517d31e2b2c2ffb7fc0837098db8
| 3,640,589
|
def col_index_list(info, key, value):
"""Given a list of dicts 'info', return a list of indices corresponding to
columns in which info[key] == value. Use to build lists of default columns,
non-exportable columns, etc."""
index_list = list()
if info != None:
for i in range(0, len(info)):
if info[i].get(key) == value:
index_list.append(i)
return index_list
|
af46b03c2fe5bce2ceb7305fd670ce1f0f52ae38
| 3,640,590
|
def sparse_softmax_cross_entropy(logits, labels, weights=1.0, scope=None):
"""Cross-entropy loss using `tf.nn.sparse_softmax_cross_entropy_with_logits`.
`weights` acts as a coefficient for the loss. If a scalar is provided,
then the loss is simply scaled by the given value. If `weights` is a
tensor of size [`batch_size`], then the loss weights apply to each
corresponding sample.
Args:
logits: [batch_size, num_classes] logits outputs of the network .
labels: [batch_size, 1] or [batch_size] labels of dtype `int32` or `int64`
in the range `[0, num_classes)`.
weights: Coefficients for the loss. The tensor must be a scalar or a tensor
of shape [batch_size] or [batch_size, 1].
scope: the scope for the operations performed in computing the loss.
Returns:
A scalar `Tensor` representing the mean loss value.
Raises:
ValueError: If the shapes of `logits`, `labels`, and `weights` are
incompatible, or if `weights` is None.
"""
with ops.name_scope(scope, "sparse_softmax_cross_entropy_loss",
[logits, labels, weights]) as scope:
labels = array_ops.reshape(labels, shape=[array_ops.shape(labels)[0]])
losses = nn.sparse_softmax_cross_entropy_with_logits(labels=labels,
logits=logits,
name="xentropy")
return compute_weighted_loss(losses, weights, scope=scope)
|
dcae4206bdcb147d5bdd4611170f12ba4e371d70
| 3,640,591
|
import os, tempfile
import sys
def curl(url, headers={}, data=None, verbose=0):
"""Use curl to make a request; return the entire reply as a string."""
fd, tempname = tempfile.mkstemp(prefix='scrape')
command = 'curl --include --insecure --silent --max-redirs 0'
if data:
if not isinstance(data, str): # Unicode not allowed here
data = urlencode(data)
command += ' --data ' + shellquote(data)
for name, value in headers.iteritems():
command += ' --header ' + shellquote('%s: %s' % (name, value))
command += ' ' + shellquote(url)
if verbose >= 3:
print >>sys.stderr, 'execute:', command
os.system(command + ' > ' + tempname)
reply = open(tempname).read()
os.remove(tempname)
return reply
|
66d8e1d9291bc6d153eaac43961c940521417136
| 3,640,592
|
def retr_radihill(smax, masscomp, massstar):
"""
Return the Hill radius of a companion
Arguments
peri: orbital period
rsma: the sum of radii of the two bodies divided by the semi-major axis
cosi: cosine of the inclination
"""
radihill = smax * (masscomp / 3. / massstar)**(1. / 3.) # [AU]
return radihill
|
5010f66026db7e2544b85f70fd1449f732c024b4
| 3,640,593
|
def load_feature_file(in_feature):
"""Load the feature file into a pandas dataframe."""
f = pd.read_csv(feature_path + in_feature, index_col=0)
return f
|
95bb40cc381dab3c29cf81c40d308104e9e4035b
| 3,640,594
|
def add_observation_noise(obs, noises, stds, only_object_noise=False):
"""Add noise to observations
`noises`: Standard normal noise of same shape as `obs`
`stds`: Standard deviation per dimension of `obs` to scale noise with
"""
assert obs.shape == noises.shape
idxs_object_pos = SENSOR_INFO_PNP["object_pos"]
agent_vel = obs[..., SENSOR_INFO_PNP["grip_velp"]]
obs = obs.copy()
if only_object_noise:
obs[..., idxs_object_pos] += (
noises[..., idxs_object_pos] * stds[..., idxs_object_pos]
)
else:
obs += noises * stds
# Recompute relative position
obs[..., SENSOR_INFO_PNP["object_rel_pos"]] = (
obs[..., SENSOR_INFO_PNP["object_pos"]] - obs[..., SENSOR_INFO_PNP["grip_pos"]]
)
# Recompute relative speed: first add old agent velocity to get noisy
# object velocity, then subtract noisy agent velocity to get correct
# relative speed between noisy measurements
obs[..., SENSOR_INFO_PNP["object_velp"]] = (
obs[..., SENSOR_INFO_PNP["object_velp"]]
+ agent_vel
- obs[..., SENSOR_INFO_PNP["grip_velp"]]
)
return obs
|
926de82261b6cbd702e3f19f201f82c1a94ca72b
| 3,640,595
|
import json
def test_domains(file_path="../../domains.json"):
"""
Reads a list of domains and see if they respond
"""
# Read file
with open(file_path, 'r') as domain_file:
domains_json = domain_file.read()
# Parse file
domains = json.loads(domains_json)
results = {}
for domain in domains:
status = check_status_code(domain)
results[domain] = status
return results
|
69c6792ee86e90dfdf08a866d2d8e04022dde8c7
| 3,640,596
|
from typing import Dict
from typing import Any
def mix_dirichlet_noise(distribution: Dict[Any, float],
epsilon: float,
alpha: float) -> Dict[Any, float]:
"""Combine values in dictionary with Dirichlet noise. Samples
dirichlet_noise according to dirichlet_alpha in each component. Then
updates the value v for key k with (1-epsilon) * v + epsilon * noise_k.
Parameters
----------
distribution
Dictionary with floats as values.
epsilon
Mixes the prior probabilities for starting_node with Dirichlet
noise. Uses (1 - dirichlet_epsilon) * prior_prob +
dirichlet_epsilon * dirichlet_noise, where dirichlet_noise is
sampled from the Dirichlet distribution with parameter dirichlet_alpha.
Set to 0.0 if no Dirichlet perturbation.
alpha
The parameter to sample the Dirichlet distribution with.
Returns
-------
dict
The dictionary with perturbed values.
"""
noise = np.random.dirichlet([alpha] * len(distribution))
return {k: (1 - epsilon) * v + epsilon * noise
for ((k, v), noise) in zip(distribution.items(), noise)}
|
f779566b27107f86a92952470c949c69edb623be
| 3,640,597
|
def get_video_ID(video_url: str) -> str:
"""Returns the video ID of a youtube video from a URL"""
try:
return parse_qs(urlparse(video_url).query)['v'][0]
except KeyError:
# The 'v' key isn't there, this could be a youtu.be link
return video_url.split("/")[3][:11]
|
c185a6c5a2c8a5bb4e2d6efd57f325023b030cda
| 3,640,598
|
def profiling_csv(stage, phases, durations):
"""
Dumps the profiling information into a CSV format.
For example, with
stage: `x`
phases: ['a', 'b', 'c']
durations: [1.42, 2.0, 3.4445]
The output will be:
```
x,a,1.42
x,b,2.0
x,c,3.444
```
"""
assert all(hasattr(p, "name") for p in phases), "expected to have name attribute."
return "\n".join(
[f"{stage},{p.name},{round(t, 3)}" for (p, t) in zip(phases, durations)]
)
|
d40ee5601aa201904741870ce75c4b5bfde0f9bc
| 3,640,599
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.