content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def has_conformer(molecule, check_two_dimension=False):
"""
Check if conformer exists for molecule. Return True or False
Parameters
----------
molecule
check_two_dimension: bool, optional. Default False
If True, will also check if conformation is a 2D conformation (all z coordinates are zero) and return False if
conformation is 2D
Returns
-------
"""
conformer_bool = True
try:
if molecule.NumConfs() <= 1:
# Check if xyz coordinates are not zero
for conf in molecule.GetConfs():
# print(conf.GetCoords().__len__())
# coords = molecule.GetCoords()
# values = np.asarray(list(coords.values()))
# print(values)
# print(values.all())
# if not values.all():
# conformer_bool = False
#for i in range(conf.GetCoords().__len__()):
values = np.asarray([conf.GetCoords().__getitem__(i) == (0.0, 0.0, 0.0) for i in
conf.GetCoords()])
if values.all():
conformer_bool = False
except AttributeError:
conformer_bool = False
if conformer_bool and check_two_dimension:
for conf in molecule.GetConfs():
values = np.asarray([conf.GetCoords().__getitem__(i)[-1] == 0.0 for i in conf.GetCoords()])
if values.all():
conformer_bool = False
return conformer_bool | 5,331,100 |
def pad_sequence(yseqs, batch_first=False, padding_value=0):
"""Numpy implementation of torch.pad_sequence
Args:
yseqs (np.ndarray): List of array. (B, *)
batch_first (bool):
padding_value (int, optional): Padding value. Defaults to 0.
Returns:
np.ndarray
Examples:
>>> a = np.ones(25, 300)
>>> b = np.ones(22, 300)
>>> c = np.ones(15, 300)
>>> pad_sequence([a, b, c]).size()
(25, 3, 300)
>>> pad_sequence([a, b, c], batch_first=True).size()
(3, 25, 300)
"""
if len(yseqs) == 1:
return np.array(yseqs)
max_idx = np.argmax([y.shape[0] for y in yseqs])
max_shape = yseqs[max_idx].shape
base = np.ones((len(yseqs), *max_shape)) * padding_value
for i, y in enumerate(yseqs):
base[i][:y.shape[0]] = y
if batch_first:
return base
else:
return base.transpose(1, 0, *np.arange(2, len(base.shape))) | 5,331,101 |
def http(app):
"""Flask test client."""
with app.test_client() as client:
yield client | 5,331,102 |
async def fetch_sequence_id(session: _session.Session) -> int:
"""Fetch sequence ID."""
params = {
"limit": 0,
"tags": ["INBOX"],
"before": None,
"includeDeliveryReceipts": False,
"includeSeqID": True,
}
log.debug("Fetching MQTT sequence ID")
# Same doc id as in `Client.fetch_threads`
(j,) = await session._graphql_requests(_graphql.from_doc_id("1349387578499440", params))
sequence_id = j["viewer"]["message_threads"]["sync_sequence_id"]
if not sequence_id:
raise _exception.NotLoggedIn("Failed fetching sequence id")
return int(sequence_id) | 5,331,103 |
def test_missing_txn_request(ledger_no_genesis):
"""
Testing LedgerManager's `_missing_txns`
"""
ledger = ledger_no_genesis
for i in range(20):
txn = random_txn(i)
ledger.add(txn)
# Callbacks don't matter in this test
ledger_info = LedgerInfo(0, ledger, *[None] * 6)
assert ledger_info.catchupReplyTimer is None
assert LedgerManager._missing_txns(ledger_info) == (False, 0)
ledger_info.catchupReplyTimer = time.perf_counter()
# Ledger is already ahead
cp = ConsistencyProof(0, 1, 10, 1, 1,
'GJybBTHjzMzPWsE6n9qNQWAmhJP88dTcdbgkGLhYGFYn',
'Gv9AdSeib9EnBakfpgkU79dPMtjcnFWXvXeiCX4QAgAC', [])
ledger_info.catchUpTill = cp
ledger_info.receivedCatchUpReplies = [(i, {}) for i in range(1, 15)]
assert not LedgerManager._missing_txns(ledger_info)[0]
# Ledger is behind but catchup replies present
cp = ConsistencyProof(0, 1, 30, 1, 1,
'Gv9AdSeib9EnBakfpgkU79dPMtjcnFWXvXeiCX4QAgAC',
'EEUnqHf2GWEpvmibiXDCZbNDSpuRgqdvCpJjgp3KFbNC', [])
ledger_info.catchUpTill = cp
ledger_info.receivedCatchUpReplies = [(i, {}) for i in range(21, 31)]
assert not LedgerManager._missing_txns(ledger_info)[0]
ledger_info.receivedCatchUpReplies = [(i, {}) for i in range(21, 35)]
assert not LedgerManager._missing_txns(ledger_info)[0]
# Ledger is behind
cp = ConsistencyProof(0, 1, 30, 1, 1,
'Gv9AdSeib9EnBakfpgkU79dPMtjcnFWXvXeiCX4QAgAC',
'EEUnqHf2GWEpvmibiXDCZbNDSpuRgqdvCpJjgp3KFbNC', [])
ledger_info.catchUpTill = cp
ledger_info.receivedCatchUpReplies = [(i, {}) for i in range(21, 26)]
assert LedgerManager._missing_txns(ledger_info) == (True, 5)
ledger_info.receivedCatchUpReplies = [(i, {}) for i in range(26, 31)]
assert LedgerManager._missing_txns(ledger_info) == (True, 5) | 5,331,104 |
def main(args=None):
"""
Processes command line parameters into options and files, then checks
or update FITS DATASUM and CHECKSUM keywords for the specified files.
"""
errors = 0
fits_files = handle_options(args or sys.argv[1:])
setup_logging()
for filename in fits_files:
errors += process_file(filename)
if errors:
log.warning(f'{errors} errors')
return int(bool(errors)) | 5,331,105 |
def parse_transceiver_dom_sensor(output_lines):
"""
@summary: Parse the list of transceiver from DB table TRANSCEIVER_DOM_SENSOR content
@param output_lines: DB table TRANSCEIVER_DOM_SENSOR content output by 'redis' command
@return: Return parsed transceivers in a list
"""
result = []
p = re.compile(r"TRANSCEIVER_DOM_SENSOR\|(Ethernet\d+)")
for line in output_lines:
m = p.match(line)
assert m, "Unexpected line %s" % line
result.append(m.group(1))
return result | 5,331,106 |
def patches_from_ed_script(source,
re_cmd=re.compile(r'^(\d+)(?:,(\d+))?([acd])$')):
"""Converts source to a stream of patches.
Patches are triples of line indexes:
- number of the first line to be replaced
- one plus the number of the last line to be replaced
- list of line replacements
This is enough to model arbitrary additions, deletions and
replacements.
"""
i = iter(source)
for line in i:
match = re_cmd.match(line)
if match is None:
raise ValueError("invalid patch command: %r" % line)
(first, last, cmd) = match.groups()
first = int(first)
if last is not None:
last = int(last)
if cmd == 'd':
first = first - 1
if last is None:
last = first + 1
yield (first, last, [])
continue
if cmd == 'a':
if last is not None:
raise ValueError("invalid patch argument: %r" % line)
last = first
else: # cmd == c
first = first - 1
if last is None:
last = first + 1
lines = []
for l in i:
if l == '':
raise ValueError("end of stream in command: %r" % line)
if l == '.\n' or l == '.':
break
lines.append(l)
yield (first, last, lines) | 5,331,107 |
def superimposition_matrix(
v0: np.ndarray,
v1: np.ndarray,
scaling: bool = False,
usesvd: bool = True
) -> np.ndarray:
"""
Return matrix to transform given vector set into second vector set.
Args:
----
v0: shape (3, *) or (4, *) arrays of at least 3 vectors.
v1: shape (3, *) or (4, *) arrays of at least 3 vectors.
scaling: True scaling is desired.
usesvd: True if SVD decomposition is used.
If usesvd is True, the weighted sum of squared deviations (RMSD) is
minimized according to the algorithm by W. Kabsch [8]. Otherwise the
quaternion based algorithm by B. Horn [9] is used (slower when using
this Python implementation).
The returned matrix performs rotation, translation and uniform scaling
(if specified).
"""
v0 = np.array(v0, dtype=np.float64, copy=False)[:3]
v1 = np.array(v1, dtype=np.float64, copy=False)[:3]
if v0.shape != v1.shape or v0.shape[1] < 3:
raise ValueError('Vector sets are of wrong shape or type.')
# move centroids to origin
t0 = np.mean(v0, axis=1)
t1 = np.mean(v1, axis=1)
v0 = v0 - t0.reshape(3, 1)
v1 = v1 - t1.reshape(3, 1)
if usesvd:
# Singular Value Decomposition of covariance matrix
u, s, vh = np.linalg.svd(np.dot(v1, v0.T))
# rotation matrix from SVD orthonormal bases
R = np.dot(u, vh)
if np.linalg.det(R) < 0.0:
# R does not constitute right handed system
R -= np.outer(u[:, 2], vh[2, :]*2.0)
s[-1] *= -1.0
# homogeneous transformation matrix
M = np.identity(4)
M[:3, :3] = R
else:
# compute symmetric matrix N
xx, yy, zz = np.sum(v0 * v1, axis=1)
xy, yz, zx = np.sum(v0 * np.roll(v1, -1, axis=0), axis=1)
xz, yx, zy = np.sum(v0 * np.roll(v1, -2, axis=0), axis=1)
N = ((xx+yy+zz, yz-zy, zx-xz, xy-yx),
(yz-zy, xx-yy-zz, xy+yx, zx+xz),
(zx-xz, xy+yx, -xx+yy-zz, yz+zy),
(xy-yx, zx+xz, yz+zy, -xx-yy+zz))
# quaternion: eigenvector corresponding to most positive eigenvalue
l, V = np.linalg.eig(N)
q = V[:, np.argmax(l)]
q /= vector_norm(q) # unit quaternion
q = np.roll(q, -1) # move w component to end
# homogeneous transformation matrix
M = quaternion_matrix(q)
# scale: ratio of rms deviations from centroid
if scaling:
v0 *= v0
v1 *= v1
M[:3, :3] *= math.sqrt(np.sum(v1) / np.sum(v0))
# translation
M[:3, 3] = t1
T = np.identity(4)
T[:3, 3] = -t0
M = np.dot(M, T)
return M | 5,331,108 |
def add_verbosity(parser, quiet=True):
"""Add the verbosity and quiet options.
parser[in] the parser instance
quiet[in] if True, include the --quiet option
(default is True)
"""
parser.add_option("-v", "--verbose", action="count", dest="verbosity",
help="control how much information is displayed. "
"e.g., -v = verbose, -vv = more verbose, -vvv = debug")
if quiet:
parser.add_option("-q", "--quiet", action="store_true", dest="quiet",
help="turn off all messages for quiet execution.",
default=False) | 5,331,109 |
def get_graph_metadata(graph_id: int):
"""Returns the metadata for a single graph. This is automatically generated
by the datasource classes.
Parameters
----------
graph_id : int
Graph ID.
Returns 404 if the graph ID is not found
Returns
-------
Dict
A dictionary representing the metadata of the current graph.
"""
graph_obj = Graph.query.filter_by(id=graph_id).first()
if not graph_obj:
return make_response(jsonify({"message": "Graph not found"}), 404)
response = jsonify(graph_obj.meta)
return response | 5,331,110 |
def username_exists(username, original=""):
"""Returns true if the given username exists."""
return username != original and User.objects.filter(username=username).count() > 0 | 5,331,111 |
def set_n_jobs(n_jobs: int, x_df: pd.DataFrame) -> int:
"""
Sets the number of n_jobs, processes to run in parallel. If n_jobs is not specified, the max number of CPUs is
used. If n_jobs is set to a higher amount than the number of observations in x_df, n_jobs is rebalanced to match
the length of x_df.
:param n_jobs: number of jobs to run in parallel
:param x_df: x dataframe
:return: number of jobs to run in parallel, using the above logic
"""
if not n_jobs:
n_jobs = mp.cpu_count()
if n_jobs > len(x_df):
n_jobs = len(x_df)
return n_jobs | 5,331,112 |
def group_node_intro_times(filt, groups, n_sents):
"""
Returns lists of addition times of nodes into particular groups
"""
devs = [[] for _ in range(len(set(groups)))]
for i in range(len(groups)):
intro = int(filt[i, i])
devs[groups[i]].append(intro/n_sents) # still normalize addition time
return devs | 5,331,113 |
def svn_client_version():
"""svn_client_version() -> svn_version_t const *"""
return _client.svn_client_version() | 5,331,114 |
def split_dataset_into_train_val_test(save: bool = False) -> None:
"""Split single-label into training/validation/testing sets."""
# Random index shuffling.
pdb_ids = list(
tools.read_dict(
os.path.join(constants.DATASETS_DIR, 'dataset_single.csv')))
indexes = np.arange(len(pdb_ids))
np.random.shuffle(indexes)
# Train: ratio**2, val: ratio*(1-ratio), test: 1-ratio.
# With ratio = 0.8, train/val/test have proportion 64% / 16% / 20%.
train_ids = [pdb_ids[i] for i in indexes[0:int(RATIO_TRAIN**2 * len(pdb_ids))]]
val_ids = [pdb_ids[i] for i in indexes[int(RATIO_TRAIN**2 * len(pdb_ids)):int(RATIO_TRAIN * len(pdb_ids))]]
test_ids = [pdb_ids[i] for i in indexes[int(RATIO_TRAIN * len(pdb_ids)):]]
partition = {
'train': train_ids,
'validation': val_ids,
'test': test_ids,
}
if save: # DONE 2017-03-18.
tools.dict_to_csv(
partition,
os.path.join(constants.DATASETS_DIR, 'partition_single.csv')) | 5,331,115 |
def get_slice_name(data_dir, imname, delta=0):
"""Infer slice name with an offset"""
if delta == 0:
imname = imname + '.npy'
#print('imname0000',imname )
return imname
delta = int(delta)
dirname, slicename = imname.split(os.sep)
#slice_idx = int(slicename[:-4])
slice_idx = int(slicename)
#imname1 = '%s%s%03d.npy' % (dirname, os.sep, slice_idx + delta)
imname1 = '%s%s' % (dirname, os.sep) + str(slice_idx + delta) + '.npy'
#print('imname11111',imname1 )
# if the slice is not in the dataset, use its neighboring slice
while not os.path.exists(os.path.join(data_dir, imname1)):
# print('file not found:', imname1)
delta -= np.sign(delta)
#imname1 = '%s%s%03d.npy' % (dirname, os.sep, slice_idx + delta)
imname1 = '%s%s' % (dirname, os.sep) + str(slice_idx + delta) + '.npy'
if delta == 0:
break
return imname1 | 5,331,116 |
async def test_platform_manually_configured(hass):
"""Test that we do not discover anything or try to set up a gateway."""
assert (
await async_setup_component(
hass, cover.DOMAIN, {"cover": {"platform": deconz.DOMAIN}}
)
is True
)
assert deconz.DOMAIN not in hass.data | 5,331,117 |
def download(name, destination=None, chunksize=4096, force=False):
"""
Checks if there is an actual version of the specified file on the device,
and if not, downloads it from servers.
Files, theirs checksums and links to them must be specified
in the tps.downloader._content dictionary.
:param name: str
Name of the file.
:param destination: Optional[str]
See get_download_dir:data_dir
:param chunksize: int
What chunksize is used while downloading the file.
:return: str
Path to the file.
"""
try:
url, checksum = _content_info[name]
except KeyError:
logger.warning("There is no file named {} in content dictionary, None will be returned. Possible names: {}".
format(name, list(_content_info.keys())))
return
if not force:
filepath = find(name, destination, False, checksum)
if filepath is not None:
return filepath
destination = get_download_dir(destination)
if destination is None:
logger.warning("Can not download file due to access permissions.")
return
filepath = os.path.join(destination, name)
if os.path.exists(filepath) and checksum == calc_checksum(filepath):
logger.info("The actual version of the file is already downloaded and can be found along the path: {}"
.format(filepath))
return filepath
try:
infile = urlopen(url)
length = infile.length
chunk_n = ceil(length / chunksize)
with open(filepath, "wb") as outfile:
for _ in tqdm(range(chunk_n)):
chunk = infile.read(chunksize)
outfile.write(chunk)
if not chunk:
break
infile.close()
except IOError as e:
logger.error("Error downloading {} from {}:\n{}".format(name, url, e))
return
return filepath | 5,331,118 |
def getportnum(port):
"""
Accepts a port name or number and returns the port number as an int.
Returns -1 in case of invalid port name.
"""
try:
portnum = int(port)
if portnum < 0 or portnum > 65535:
logger.error("invalid port number: %s" % port)
portnum = -1
except:
try:
p = socket.getservbyname(port)
portnum = int(p)
except socket.error, e:
logger.error("%s: %s" % (e, port))
portnum = -1
return portnum | 5,331,119 |
async def test_snips_service_intent(hass):
"""Test ServiceIntentHandler via Snips."""
await async_mock_mqtt_component(hass)
hass.states.async_set("light.kitchen", "off")
calls = async_mock_service(hass, "light", "turn_on")
result = await async_setup_component(hass, "snips", {"snips": {}})
assert result
payload = """
{
"input": "turn the light on",
"intent": {
"intentName": "Lights",
"confidenceScore": 0.85
},
"siteId": "default",
"slots": [
{
"slotName": "name",
"value": {
"kind": "Custom",
"value": "kitchen"
},
"rawValue": "green"
}
]
}
"""
async_register(
hass, ServiceIntentHandler("Lights", "light", "turn_on", "Turned {} on")
)
async_fire_mqtt_message(hass, "hermes/intent/Lights", payload)
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].domain == "light"
assert calls[0].service == "turn_on"
assert calls[0].data["entity_id"] == "light.kitchen"
assert "confidenceScore" not in calls[0].data
assert "site_id" not in calls[0].data | 5,331,120 |
def getParafromMinibatchModel(X_train, Y_train, X_test, Y_test, learning_rate = 0.0001,
num_epochs = 1500, minibatch_size = 32, print_cost = True):
"""
Implements a three-layer tensorflow neural network: LINEAR->RELU->LINEAR->RELU->LINEAR->SOFTMAX.
Arguments:
X_train -- training set, of shape (input size = 12288, number of training examples = 1080)
Y_train -- test set, of shape (output size = 6, number of training examples = 1080)
X_test -- training set, of shape (input size = 12288, number of training examples = 120)
Y_test -- test set, of shape (output size = 6, number of test examples = 120)
learning_rate -- learning rate of the optimization
num_epochs -- number of epochs of the optimization loop
minibatch_size -- size of a minibatch
print_cost -- True to print the cost every 100 epochs
Returns:
parameters -- parameters learnt by the model. They can then be used to predict.
"""
ops.reset_default_graph() # to be able to rerun the model without overwriting tf variables
(n_x, m) = X_train.shape # (n_x: input size, m : number of examples in the train set)
n_y = Y_train.shape[0] # n_y : output size
costs = [] # To keep track of the cost
# Create Placeholders of shape (n_x, n_y)
X, Y = create_placeholders(n_x, n_y)
# Initialize parameters
parameters = initialize_parameters()
# Forward propagation: Build the forward propagation in the tensorflow graph
z3 = forward_propagation(X, parameters)
# Cost function: Add cost function to tensorflow graph
cost = compute_cost(z3, Y)
# Backpropagation: Define the tensorflow optimizer. Use an AdamOptimizer.
optimizer = tf.train.AdamOptimizer(learning_rate = learning_rate).minimize(cost)
# Initialize all the variables
init = tf.global_variables_initializer()
# Start the session to compute the tensorflow graph
with tf.Session() as sess:
# Run the initialization
sess.run(init)
# Do the training loop
for epoch in range(num_epochs):
minibatch_cost = 0.
num_minibatches = int(m / minibatch_size) # number of minibatches of size minibatch_size in the train set
minibatches = random_mini_batches(X_train, Y_train, minibatch_size)
for minibatch in minibatches:
# Select a minibatch
(minibatch_X, minibatch_Y) = minibatch
# IMPORTANT: The line that runs the graph on a minibatch.
# Run the session to execute the optimizer and the cost, the feedict should contain a minibatch for (X,Y).
_ , temp_cost = sess.run([optimizer, cost], feed_dict={X: minibatch_X, Y: minibatch_Y})
minibatch_cost += temp_cost / num_minibatches
# Print the cost every epoch
if print_cost == True and epoch % 100 == 0:
print ("Cost after epoch %i: %f" % (epoch, minibatch_cost))
if print_cost == True and epoch % 5 == 0:
costs.append(minibatch_cost)
# plot the cost
plt.plot(np.squeeze(costs))
plt.ylabel('cost')
plt.xlabel('iterations (per tens)')
plt.title("Learning rate =" + str(learning_rate))
plt.show()
# lets save the parameters in a variable
parameters = sess.run(parameters)
print ("Parameters have been trained!")
# Calculate the correct predictions
correct_prediction = tf.equal(tf.argmax(z3), tf.argmax(Y))
# Calculate accuracy on the test set
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
print ("Train Accuracy:", accuracy.eval({X: X_train, Y: Y_train}))
print ("Test Accuracy:", accuracy.eval({X: X_test, Y: Y_test}))
return parameters | 5,331,121 |
def add_land(
ax=None, scale="10m", edgecolor=None, facecolor=None, linewidth=None, **kwargs
):
"""Add land to an existing map
Parameters
----------
ax : matplotlib axes object, optional
scale : str, optional
Resolution of NaturalEarth data to use ('10m’, ‘50m’, or ‘110m').
edgecolor : str or tuple, optional
Color to use for the landmass edges.
facecolor : str or tuple, optional
Color to use for the landmass faces.
linewidth : float, optional
Width of land edge in points
Other Parameters
----------------
Keyword args are passed on to NaturalEarthFeature.
Returns
-------
FeatureArtist
"""
if ax is None:
ax = plt.gca()
edgecolor = edgecolor or plt.rcParams.get(
"pyseas.border.color", props.dark.border.color
)
facecolor = facecolor or plt.rcParams.get(
"pyseas.land.color", props.dark.land.color
)
linewidth = linewidth or plt.rcParams.get("pyseas.border.linewidth", 0.4)
land = cfeature.NaturalEarthFeature(
"physical",
"land",
scale,
edgecolor=edgecolor,
facecolor=facecolor,
linewidth=linewidth,
**kwargs,
)
return ax.add_feature(land) | 5,331,122 |
def EnsureDispatch(prog_id, bForDemand = 1): # New fn, so we default the new demand feature to on!
"""Given a COM prog_id, return an object that is using makepy support, building if necessary"""
disp = win32com.client.Dispatch(prog_id)
if not disp.__dict__.get("CLSID"): # Eeek - no makepy support - try and build it.
try:
ti = disp._oleobj_.GetTypeInfo()
disp_clsid = ti.GetTypeAttr()[0]
tlb, index = ti.GetContainingTypeLib()
tla = tlb.GetLibAttr()
mod = EnsureModule(tla[0], tla[1], tla[3], tla[4], bForDemand=bForDemand)
GetModuleForCLSID(disp_clsid)
# Get the class from the module.
import CLSIDToClass
disp_class = CLSIDToClass.GetClass(str(disp_clsid))
disp = disp_class(disp._oleobj_)
except pythoncom.com_error:
raise TypeError("This COM object can not automate the makepy process - please run makepy manually for this object")
return disp | 5,331,123 |
def _parallel_predict_log_proba(estimators, estimators_features, X, n_classes):
"""Private function used to compute log probabilities within a job."""
n_samples = X.shape[0]
log_proba = np.empty((n_samples, n_classes))
log_proba.fill(-np.inf)
all_classes = np.arange(n_classes, dtype=np.int)
for estimator, features in zip(estimators, estimators_features):
log_proba_estimator = estimator.predict_log_proba(X[:, features])
if n_classes == len(estimator.classes_):
log_proba = np.logaddexp(log_proba, log_proba_estimator)
else:
log_proba[:, estimator.classes_] = np.logaddexp(
log_proba[:, estimator.classes_],
log_proba_estimator[:, range(len(estimator.classes_))])
missing = np.setdiff1d(all_classes, estimator.classes_)
log_proba[:, missing] = np.logaddexp(log_proba[:, missing],
-np.inf)
return log_proba | 5,331,124 |
def _get_circles(img, board, pattern):
"""
Get circle centers for a symmetric or asymmetric grid
"""
h = img.shape[0]
w = img.shape[1]
if len(img.shape) == 3 and img.shape[2] == 3:
mono = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
else:
mono = img
flag = cv2.CALIB_CB_SYMMETRIC_GRID
if pattern == Patterns.ACircles:
flag = cv2.CALIB_CB_ASYMMETRIC_GRID
mono_arr = numpy.array(mono)
(ok, corners) = cv2.findCirclesGrid(mono_arr, (board.n_cols, board.n_rows), flags=flag)
# In symmetric case, findCirclesGrid does not detect the target if it's turned sideways. So we try
# again with dimensions swapped - not so efficient.
# TODO Better to add as second board? Corner ordering will change.
if not ok and pattern == Patterns.Circles:
(ok, corners) = cv2.findCirclesGrid(mono_arr, (board.n_rows, board.n_cols), flags=flag)
return (ok, corners) | 5,331,125 |
def _default_achievement_page(asoup):
"""
Parses the default steam achievement page
Most pages use this
"""
for ach_row in asoup.find_all(class_="achieveRow"):
yield achievement_row_parser(ach_row) | 5,331,126 |
def sort_coords(coords: np.ndarray) -> np.ndarray:
"""Sort coordinates based on the angle with first coord from the center.
Args:
coords (np.ndarray):
Coordinates to be sorted. The format of coords is as follows.
np.array([[x1, y1, z1], [x2, y2, z2], [x3, y3, z3]]
Returns:
np.ndarray for sorted coordinates.
"""
if len(coords[0]) != 3:
raise ValueError("Only valid for 3D vector")
center = np.average(coords, axis=0)
relative_coords = coords - center
external_prod = np.cross(relative_coords[0], relative_coords[1])
if abs(np.linalg.norm(external_prod)) < 1e-8: # Skip parallel vectors.
external_prod = np.cross(relative_coords[0], relative_coords[2])
normal_to_12_plane = external_prod / np.linalg.norm(external_prod)
v0 = relative_coords[0] / np.linalg.norm(relative_coords[0])
def angle_between_v0(index: int) -> float:
"""
Args:
index (int): index of coords.
Returns (float):
Angle between rays from the center to rel_coords[0] and
rel_coords[int].
"""
v = relative_coords[index] / np.linalg.norm(relative_coords[index])
matrix = concatenate(([v0], [v], [normal_to_12_plane]), axis=0)
determinant = det(matrix)
angle = arctan2(clip(dot(v0, v), -1.0, 1.0), determinant)
return angle
indices = [i for i in range(len(coords))]
indices.sort(key=angle_between_v0)
return coords[indices] | 5,331,127 |
def test_knn_class_with_invalid_params_fit_correctly():
""" The function define a chain with incorrect parameters in the K-nn classification
model. During the training of the chain, the parameter 'n_neighbors' is corrected
"""
samples_amount = 100
k_neighbors = 150
features_options = {'informative': 1, 'redundant': 0,
'repeated': 0, 'clusters_per_class': 1}
x_data, y_data = classification_dataset(samples_amount=samples_amount,
features_amount=3,
classes_amount=2,
features_options=features_options)
# Define regression task
task = Task(TaskTypesEnum.classification)
# Prepare data to train the model
train_input = InputData(idx=np.arange(0, len(x_data)), features=x_data,
target=y_data, task=task, data_type=DataTypesEnum.table)
# Prepare classification chain
chain = get_knn_class_chain(k_neighbors)
# Fit it
chain.fit(train_input)
is_chain_was_fitted = True
assert is_chain_was_fitted | 5,331,128 |
def to_fraction(value, den_limit=65536):
"""
Converts *value*, which can be any numeric type, an MMAL_RATIONAL_T, or a
(numerator, denominator) tuple to a :class:`~fractions.Fraction` limiting
the denominator to the range 0 < n <= *den_limit* (which defaults to
65536).
"""
try:
# int, long, or fraction
n, d = value.numerator, value.denominator
except AttributeError:
try:
# float
n, d = value.as_integer_ratio()
except AttributeError:
try:
n, d = value.num, value.den
except AttributeError:
try:
# tuple
n, d = value
warnings.warn(
PiCameraDeprecated(
"Setting framerate or gains as a tuple is "
"deprecated; please use one of Python's many "
"numeric classes like int, float, Decimal, or "
"Fraction instead"))
except (TypeError, ValueError):
# try and convert anything else to a Fraction directly
value = Fraction(value)
n, d = value.numerator, value.denominator
# Ensure denominator is reasonable
if d == 0:
raise PiCameraValueError("Denominator cannot be 0")
elif d > den_limit:
return Fraction(n, d).limit_denominator(den_limit)
else:
return Fraction(n, d) | 5,331,129 |
def find_closest(myList, myNumber):
"""
Returns closest value to myNumber.
If two numbers are equally close, return the smallest number.
# adapted from
# https://stackoverflow.com/questions/12141150/from-list-of-integers-get-number-closest-to-a-given-value
"""
sortList = sorted(myList)
# print(sortList)
pos = bisect_left(sortList, myNumber)
if pos == 0:
return sortList[0]
if pos == len(sortList):
return sortList[-1]
before = sortList[pos - 1]
after = sortList[pos]
if after - myNumber < myNumber - before:
return after
else:
return before | 5,331,130 |
def plot_grid_res():
"""The effect of a finer grid"""
halo = myname.get_name(7, True)
savefile = "boxhi_grid_cutoff_H2_32678.hdf5"
ahalo = dp.PrettyBox(halo, 5, nslice=30, savefile=savefile)
ahalo.plot_column_density(color="blue", ls="--", moment=True)
# savefile = path.join(halo,"snapdir_003/boxhi_grid_16384.hdf5")
ahalo2 = dp.PrettyBox(halo, 5, nslice=10)
ahalo2.plot_column_density(color="red",moment=True, ls="-.")
dla_data.column_density_data(moment=True)
save_figure(path.join(outdir, "cosmo7_grid_5"))
plt.clf()
cdf1 = ahalo.column_density_function()
cdf2 = ahalo2.column_density_function()
plt.semilogx(cdf1[0], cdf1[1]/cdf2[1], color="red", ls="-")
save_figure(path.join(outdir, "cosmo7_grid_5_rel"))
plt.clf() | 5,331,131 |
def remove_uuid_file(file_path, dry=False):
"""
Renames a file without the UUID and returns the new pathlib.Path object
"""
file_path = Path(file_path)
name_parts = file_path.name.split('.')
if not is_uuid_string(name_parts[-2]):
return file_path
name_parts.pop(-2)
new_path = file_path.parent.joinpath('.'.join(name_parts))
if not dry and file_path.exists():
file_path.replace(new_path)
return new_path | 5,331,132 |
def run_app():
""" Run application. """
config_file = 'config.json'
host = 'localhost'
port = 3028
if len(sys.argv) > 1:
config_file = sys.argv[1]
if len(sys.argv) > 2:
host = sys.argv[2]
if len(sys.argv) > 3:
port = int(sys.argv[3])
print(f"Loading config from '{config_file}'")
global CONFIG
CONFIG = config.from_file(config_file)
APP.run(host=host, port=port, threaded=True) | 5,331,133 |
def get_9x9x9_scramble(n=120):
""" Gets a random scramble (SiGN notation) of length `n` for a 9x9x9 cube. """
return _MEGA_SCRAMBLER.call("megaScrambler.get999scramble", n) | 5,331,134 |
def youku(link):
"""Find youku player URL."""
pattern = r'http:\/\/v\.youku\.com\/v_show/id_([\w]+)\.html'
match = re.match(pattern, link)
if not match:
return None
return 'http://player.youku.com/embed/%s' % match.group(1) | 5,331,135 |
def get_default_service_account(project: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetDefaultServiceAccountResult:
"""
Use this data source to retrieve default service account for this project
:param str project: The project ID. If it is not provided, the provider project is used.
"""
__args__ = dict()
__args__['project'] = project
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('gcp:compute/getDefaultServiceAccount:getDefaultServiceAccount', __args__, opts=opts, typ=GetDefaultServiceAccountResult).value
return AwaitableGetDefaultServiceAccountResult(
display_name=__ret__.display_name,
email=__ret__.email,
id=__ret__.id,
name=__ret__.name,
project=__ret__.project,
unique_id=__ret__.unique_id) | 5,331,136 |
def combinationSum(candidates, target):
"""
:type candidates: List[int]
:type target: int
:rtype: List[List[int]]
"""
result = []
candidates = sorted(candidates)
def dfs(remain, stack):
if remain == 0:
result.append(stack)
return
for item in candidates:
if item > remain:
break
if stack and item < stack[-1]:
continue
else:
dfs(remain - item, stack + [item])
dfs(target, [])
return result | 5,331,137 |
def create_file(webdav_storage):
"""
Creates a file with a unique prefix in the WebDAV storage
"""
from django.core.files.base import ContentFile
from django_webdav_storage.compat import PY3, TEXT_TYPE
def inner(filename, content=b'', prefix=''):
if all((PY3, isinstance(content, TEXT_TYPE))):
content = content.encode('UTF-8')
col = str(uuid.uuid4())
key = os.path.join(prefix.lstrip('/') or col, filename)
webdav_storage.save(key, ContentFile(content, key))
return key
return inner | 5,331,138 |
def get_Xy(sentence):
"""将 sentence 处理成 [word1, w2, ..wn], [tag1, t2, ...tn]"""
words_tags = re.findall('(.)/(.)', sentence)
if words_tags:
words_tags = np.asarray(words_tags)
words = words_tags[:, 0]
tags = words_tags[:, 1]
return words, tags # 所有的字和tag分别存为 data / label
return None | 5,331,139 |
def write_json_to_file(directory_string, json_content):
"""
Get the contents of a JSON file. If it doesn't exist,
create and populate it with specified or default JSON content.
:param directory_string: The relative directory string (ex: database/secrets.json)
:type directory_string: str
:param json_content: The content to populate a non-existing JSON file with
:type json_content: dict
"""
with open(directory_string, "w") as file:
json.dump(json_content, file, indent=4)
file.close() | 5,331,140 |
def write_zip_file(full_path, zipfile_instance, arcname=None):
"""
Writes the directory, file or symbolic link using the zipfile instance
works with write_to_zip()
Args:
full_path: full path to file, dir or symlink
zipfile_instance: instance of a zipfile created with w or a
arcname: Name to give the file or directory in the zip archive
Returns:
"""
if arcname is None:
arcname = full_path
if os.path.islink(full_path):
# based on http://www.mail-archive.com/python-list@python.org/msg34223.html
zip_info = zipfile.ZipInfo(arcname)
zip_info.create_system = 3
# long type of hex val of '0xA1ED0000', which is sym link attribute value
zip_info.external_attr = 2716663808
zipfile_instance.writestr(zip_info, os.readlink(full_path))
else:
zipfile_instance.write(full_path, arcname) | 5,331,141 |
async def test_saving_loading(hass):
"""Test saving and loading JSON."""
data = hass_auth.Data(MOCK_PATH, None)
data.add_user('test-user', 'test-pass')
data.add_user('second-user', 'second-pass')
with patch(JSON__OPEN_PATH, mock_open(), create=True) as mock_write:
await hass.async_add_job(data.save)
# Mock open calls are: open file, context enter, write, context leave
written = mock_write.mock_calls[2][1][0]
with patch('os.path.isfile', return_value=True), \
patch(JSON__OPEN_PATH, mock_open(read_data=written), create=True):
await hass.async_add_job(hass_auth.load_data, MOCK_PATH)
data.validate_login('test-user', 'test-pass')
data.validate_login('second-user', 'second-pass') | 5,331,142 |
def create_gap_token(rowidx=None):
"""returns a gap Token
Parameters
----------
rowidx: int (Optional)
row id
Returns
-------
Token
"""
return TT.Token(token_type=SupportedDataTypes.GAP, value='', rowidx=rowidx) | 5,331,143 |
def normalize_readthrough_dates(app_registry, schema_editor):
"""Find any invalid dates and reset them"""
db_alias = schema_editor.connection.alias
app_registry.get_model("bookwyrm", "ReadThrough").objects.using(db_alias).filter(
start_date__gt=models.F("finish_date")
).update(start_date=models.F("finish_date")) | 5,331,144 |
def forward_ssh_ports(
args, # type: IntegrationConfig
ssh_connections, # type: t.Optional[t.List[SshConnectionDetail]]
playbook, # type: str
target_state, # type: t.Dict[str, t.Tuple[t.List[str], t.List[SshProcess]]]
target, # type: IntegrationTarget
host_type, # type: str
contexts, # type: t.Dict[str, t.Dict[str, ContainerAccess]]
): # type: (...) -> None
"""Configure port forwarding using SSH and write hosts file entries."""
if ssh_connections is None:
return
test_context = None
for context_name, context in contexts.items():
context_alias = 'cloud/%s/' % context_name
if context_alias in target.aliases:
test_context = context
break
if not test_context:
return
if not ssh_connections:
if args.explain:
return
raise Exception('The %s host was not pre-configured for container access and SSH forwarding is not available.' % host_type)
redirects = [] # type: t.List[t.Tuple[int, str, int]]
messages = []
for container_name, container in test_context.items():
explain = []
for container_port, access_port in container.port_map():
if container.forwards:
redirects.append((container_port, container.host_ip, access_port))
explain.append('%d -> %s:%d' % (container_port, container.host_ip, access_port))
else:
explain.append('%s:%d' % (container.host_ip, container_port))
if explain:
if container.forwards:
message = 'Port forwards for the "%s" container have been established on the %s host' % (container_name, host_type)
else:
message = 'Ports for the "%s" container are available on the %s host as' % (container_name, host_type)
messages.append('%s:\n%s' % (message, '\n'.join(explain)))
hosts_entries = create_hosts_entries(test_context)
inventory = generate_ssh_inventory(ssh_connections)
with named_temporary_file(args, 'ssh-inventory-', '.json', None, inventory) as inventory_path:
run_playbook(args, inventory_path, playbook, dict(hosts_entries=hosts_entries))
ssh_processes = [] # type: t.List[SshProcess]
if redirects:
for ssh in ssh_connections:
ssh_processes.append(create_ssh_port_redirects(args, ssh, redirects))
target_state[target.name] = (hosts_entries, ssh_processes)
for message in messages:
display.info(message, verbosity=1) | 5,331,145 |
def clear_cache():
"""每次运行前都清除 cache"""
check_url.cache_clear() | 5,331,146 |
def test_ambiguous_label_pk(setup_codes, parameter_type):
"""Situation: LABEL of entity_02 is exactly equal to ID of entity_01.
Verify that using an ambiguous identifier gives precedence to the ID interpretation
Appending the special ambiguity breaker character will force the identifier to be treated as a LABEL
"""
entity_01, entity_02, entity_03 = setup_codes
identifier = f'{entity_02.label}'
result = parameter_type.convert(identifier, None, None)
assert result.uuid == entity_01.uuid
identifier = f'{entity_02.label}{OrmEntityLoader.label_ambiguity_breaker}'
result = parameter_type.convert(identifier, None, None)
assert result.uuid == entity_02.uuid | 5,331,147 |
def get_local_bricks(volume: str) -> Result:
"""
Return all bricks that are being served locally in the volume
volume: Name of the volume to get local bricks for
"""
vol_info = volume_info(volume)
if vol_info.is_err():
return Err(vol_info.value)
local_ip = get_local_ip()
local_brick_list = []
for volume in vol_info.value:
for brick in volume.bricks:
if brick.peer.hostname == local_ip:
local_brick_list.append(brick)
return Ok(local_brick_list) | 5,331,148 |
def delegate_remote(args, exclude, require, integration_targets):
"""
:type args: EnvironmentConfig
:type exclude: list[str]
:type require: list[str]
:type integration_targets: tuple[IntegrationTarget]
"""
parts = args.remote.split('/', 1)
platform = parts[0]
version = parts[1]
core_ci = AnsibleCoreCI(args, platform, version, stage=args.remote_stage, provider=args.remote_provider)
success = False
if isinstance(args, ShellConfig):
use_httptester = args.httptester
else:
use_httptester = args.httptester and any('needs/httptester/' in target.aliases for target in integration_targets)
if use_httptester and not docker_available():
display.warning('Assuming --disable-httptester since `docker` is not available.')
use_httptester = False
httptester_id = None
ssh_options = []
try:
core_ci.start()
if use_httptester:
httptester_id, ssh_options = start_httptester(args)
core_ci.wait()
options = {
'--remote': 1,
}
cmd = generate_command(args, 'ansible/test/runner/test.py', options, exclude, require)
if httptester_id:
cmd += ['--inject-httptester']
if isinstance(args, TestConfig):
if args.coverage and not args.coverage_label:
cmd += ['--coverage-label', 'remote-%s-%s' % (platform, version)]
if isinstance(args, IntegrationConfig):
if not args.allow_destructive:
cmd.append('--allow-destructive')
# remote instances are only expected to have a single python version available
if isinstance(args, UnitsConfig) and not args.python:
cmd += ['--python', 'default']
manage = ManagePosixCI(core_ci)
manage.setup()
if isinstance(args, IntegrationConfig):
cloud_platforms = get_cloud_providers(args)
for cloud_platform in cloud_platforms:
ssh_options += cloud_platform.get_remote_ssh_options()
try:
manage.ssh(cmd, ssh_options)
success = True
finally:
manage.ssh('rm -rf /tmp/results && cp -a ansible/test/results /tmp/results && chmod -R a+r /tmp/results')
manage.download('/tmp/results', 'test')
finally:
if args.remote_terminate == 'always' or (args.remote_terminate == 'success' and success):
core_ci.stop()
if httptester_id:
docker_rm(args, httptester_id) | 5,331,149 |
def unionWCT(m=6, n=6):
""" @ worst-case family union where
@m>=2 and n>=2 and k=3
:arg m: number of states
:arg n: number of states
:type m: integer
:type n: integer
:returns: two dfas
:rtype: (DFA,DFA)"""
if n < 2 or m < 2:
raise TestsError("number of states must both greater than 1")
d1, d2 = DFA(), DFA()
d1.setSigma(["a", "b", "c"])
d1.States = list(range(m))
d1.setInitial(0)
d1.addFinal(m - 1)
d1.addTransition(0, "a", 1)
d1.addTransition(0, "c", 0)
for i in range(1, m):
d1.addTransition(i, "a", (i + 1) % m)
d1.addTransition(i, "b", 0)
d1.addTransition(i, "c", i)
d2.setSigma(["a", "b", "c"])
d2.States = list(range(n))
d2.setInitial(0)
d2.addFinal(n - 1)
d2.addTransition(0, "a", 0)
d2.addTransition(0, "b", 1)
for i in range(1, n):
d2.addTransition(i, "b", (i + 1) % n)
d2.addTransition(i, "a", i)
d2.addTransition(i, "c", 1)
return d1, d2 | 5,331,150 |
def set_device_parameters(request):
"""Set up the class."""
def fin():
request.cls.device.close()
request.addfinalizer(fin)
request.cls.driver = OriginalDriver
request.cls.patched_driver = PatchedDriver
request.cls.vendor = 'ibm'
parent_conftest.set_device_parameters(request) | 5,331,151 |
def week_changes (after, before, str_dates, offset = 0, limit = 3) :
"""Yield all elements of `str_dates` closest to week changes."""
return unit_changes (after, before, str_dates, "week", offset, limit) | 5,331,152 |
def getHiddenStatus(data):
"""
使用Gaussian HMM对数据进行建模,并得到预测值
"""
cols = ["r_5", "r_20", "a_5", "a_20"]
model = GaussianHMM(n_components=3, covariance_type="full", n_iter=1000,
random_state=2010)
model.fit(data[cols])
hiddenStatus = model.predict(data[cols])
return hiddenStatus | 5,331,153 |
def format_time(data, year):
"""Format any time variables in US.
Parameters
----------
data : pd.DataFrame
Data without time formatting.
year : int
The `year` of the wave being processed.
Returns
-------
data : pd.DataFrame
Data with time formatting.
"""
# See to do messages at the top of the file.
# Theres some wierd overlap in the pidp data. Theres essentially a gap in September 2008 with noone in it from
# BHPS which makes transition models fail.
# Following 2 lines are a stupid work around.
# if self.year <= 2008:
# self.year += 1
data["time"] = year
return data | 5,331,154 |
def get_connection(sid):
"""
Attempts to connect to the given server and
returns a connection.
"""
server = get_server(sid)
try:
shell = spur.SshShell(
hostname=server["host"],
username=server["username"],
password=server["password"],
port=server["port"],
missing_host_key=spur.ssh.MissingHostKey.accept,
connect_timeout=10)
shell.run(["echo", "connected"])
except spur.ssh.ConnectionError as e:
raise WebException(
"Cannot connect to {}@{}:{} with the specified password".format(
server["username"], server["host"], server["port"]))
return shell | 5,331,155 |
def compare_apertures(reference_aperture, comparison_aperture, absolute_tolerance=None, attribute_list=None, print_file=sys.stdout, fractional_tolerance=1e-6, verbose=False, ignore_attributes=None):
"""Compare the attributes of two apertures.
Parameters
----------
reference_aperture
comparison_aperture
absolute_tolerance
attribute_list
print_file
fractional_tolerance
verbose
Returns
-------
"""
if attribute_list is None:
attribute_list = PRD_REQUIRED_ATTRIBUTES_ORDERED
comparison_table = Table(names=('aperture', 'attribute', 'reference', 'comparison', 'difference', 'percent'), dtype=['S50']*6)
add_blank_line = False
for attribute in attribute_list:
if (ignore_attributes is not None) and (attribute in list(ignore_attributes)):
continue
show = False
reference_attr = getattr(reference_aperture, attribute)
comparison_attr = getattr(comparison_aperture, attribute)
if verbose:
print('Comparing {} {}: {}{} {}{}'.format(reference_aperture, attribute, type(reference_attr), reference_attr, type(comparison_attr), comparison_attr))
if reference_attr != comparison_attr:
show = True
# if isinstance(reference_attr, float) and isinstance(comparison_attr, float):
if (type(reference_attr) in [int, float, np.float64]) and (type(comparison_attr) in [int, float, np.float64]):
difference = np.abs(comparison_attr - reference_attr)
fractional_difference = difference / np.max(
[np.abs(reference_attr), np.abs(comparison_attr)])
if verbose:
print('difference={}, fractional_difference={}'.format(difference, fractional_difference))
if (absolute_tolerance is not None) and math.isclose(reference_attr, comparison_attr, abs_tol=absolute_tolerance):
show = False
elif fractional_difference <= fractional_tolerance:
show = False
else:
fractional_difference_percent_string = '{:.4f}'.format(fractional_difference*100.)
difference_string = '{:.6f}'.format(difference)
else:
difference_string = 'N/A'
fractional_difference_percent_string = 'N/A'
if show:
add_blank_line = True
print('{:25} {:>15} {:>21} {:>21} {:>15} {:>10}'.format(reference_aperture.AperName, attribute, str(reference_attr), str(comparison_attr), difference_string, fractional_difference_percent_string), file=print_file)
# add comparison data to table
comparison_table.add_row([reference_aperture.AperName, attribute, str(reference_attr), str(comparison_attr), difference_string, fractional_difference_percent_string])
if add_blank_line:
print('', file=print_file)
return comparison_table | 5,331,156 |
def tail(filename: str, nlines: int = 20, bsz: int = 4096) -> List[str]:
"""
Pure python equivalent of the UNIX ``tail`` command. Simply pass a filename and the number of lines you want to load
from the end of the file, and a ``List[str]`` of lines (in forward order) will be returned.
This function is simply a wrapper for the highly efficient :func:`.io_tail`, designed for usage with a small (<10,000) amount
of lines to be tailed. To allow for the lines to be returned in the correct order, it must load all ``nlines`` lines into memory
before it can return the data.
If you need to ``tail`` a large amount of data, e.g. 10,000+ lines of a logfile, you should consider using the lower level
function :func:`.io_tail` - which acts as a generator, only loading a certain amount of bytes into memory per iteration.
Example file ``/tmp/testing``::
this is an example 1
this is an example 2
this is an example 3
this is an example 4
this is an example 5
this is an example 6
Example usage::
>>> from privex.helpers import tail
>>> lines = tail('/tmp/testing', nlines=3)
>>> print("\\n".join(lines))
this is an example 4
this is an example 5
this is an example 6
:param str filename: Path to file to tail. Relative or absolute path. Absolute path is recommended for safety.
:param int nlines: Total number of lines to retrieve from the end of the file
:param int bsz: Block size (in bytes) to load with each iteration (default: 4096 bytes). DON'T CHANGE UNLESS YOU
UNDERSTAND WHAT THIS MEANS.
:return List[str] lines: The last 'nlines' lines of the file 'filename' - in forward order.
"""
res = []
with open(filename, 'rb') as fp:
for chunk in io_tail(f=fp, nlines=nlines, bsz=bsz):
res = chunk + res
return res | 5,331,157 |
def initialized(name, secret_shares=5, secret_threshold=3, pgp_keys=None,
keybase_users=None, unseal=True):
"""
Ensure that the vault instance has been initialized and run the
initialization if it has not.
:param name: The id used for the state definition
:param secret_shares: THe number of secret shares to use for the
initialization key
:param secret_threshold: The number of keys required to unseal the vault
:param pgp_keys: List of PGP public key strings to use for encrypting
the sealing keys
:param keybase_users: List of Keybase users to retrieve public PGP keys
for to use in encrypting the sealing keys
:param unseal: Whether to unseal the vault during initialization
:returns: Result of the execution
:rtype: dict
"""
ret = {'name': name,
'comment': '',
'result': '',
'changes': {}}
initialized = __salt__['vault.is_initialized']()
if initialized:
ret['result'] = True
ret['Comment'] = 'Vault is already initialized'
elif __opts__['test']:
ret['result'] = None
ret['comment'] = 'Vault will be initialized.'
else:
success, sealing_keys, root_token = __salt__['vault.initialize'](
secret_shares, secret_threshold, pgp_keys, keybase_users, unseal
) if not initialized else (True, {}, '')
ret['result'] = success
ret['changes'] = {
'root_credentials': {
'new': {
'sealing_keys': sealing_keys,
'root_token': root_token
},
'old': {}
}
}
ret['comment'] = 'Vault has {}initialized'.format(
'' if success else 'failed to be ')
return ret | 5,331,158 |
def plot(plot, x, y, **kwargs):
"""
Adds series to plot. By default this is displayed as continuous line.
Refer to matplotlib.pyplot.plot() help for more info. X and y coordinates
are expected to be in user's data units.
Args:
plot: matplotlib.pyplot
Plot to which series should be added.
x: (float,)
Collection of x-coordinates in user units.
y: (float,)
Collection of y-coordinates in user units.
title: str
Series legend.
"""
# add series
return plot.plot(x, y, **kwargs) | 5,331,159 |
def assert_not_has_text(output, text):
""" Asserts specified output does not contain the substring
specified by the argument text."""
assert output is not None, "Checking not_has_text assertion on empty output (None)"
assert output.find(text) < 0, "Output file contains unexpected text '%s'" % text | 5,331,160 |
def glyph_by_hershey_code(hershey_code):
"""
Returns the Hershey glyph corresponding to `hershey_code`.
"""
glyph = glyphs_by_hershey_code.get(hershey_code)
if glyph is None:
raise ValueError("No glyph for hershey code %d" % hershey_code)
return glyph | 5,331,161 |
def _get_prefab_from_address(address):
"""
Parses an address of the format ip[:port] and return return a prefab object connected to the remote node
"""
try:
if ':' in address:
ip, port = address.split(':')
port = int(port)
else:
ip, port = address, 22
except Exception:
raise ValueError("Invalid node address")
return j.tools.prefab.getFromSSH(addr=ip, port=port) | 5,331,162 |
def interval_to_errors(value, low_bound, hi_bound):
"""
Convert error intervals to errors
:param value: central value
:param low_bound: interval low bound
:param hi_bound: interval high bound
:return: (error minus, error plus)
"""
error_plus = hi_bound - value
error_minus = value - low_bound
return error_minus, error_plus | 5,331,163 |
def actionSetHelperNodeNoRecur(s, l, st, t):
"""
Set as helper node which is not recursively processed by editor and renderers
even if it is unknown
"""
if t.name != None:
t.helperNode = True
t.helperRecursive = False
else:
t[0].helperNode = True
t[0].helperRecursive = False | 5,331,164 |
def log_enabled_arg(request: Any) -> bool:
"""Using different log messages.
Args:
request: special fixture that returns the fixture params
Returns:
The params values are returned one at a time
"""
return cast(bool, request.param) | 5,331,165 |
def test_set_and_get_property_from_clr():
"""Test setting and getting clr-accessible properties from the clr."""
t = ExampleClrClass()
assert t.GetType().GetProperty("X").GetValue(t) == 3
assert t.GetType().GetProperty("Y").GetValue(t) == 3 * 2
t.GetType().GetProperty("X").SetValue(t, 4)
assert t.GetType().GetProperty("X").GetValue(t) == 4
assert t.GetType().GetProperty("Y").GetValue(t) == 4 * 2 | 5,331,166 |
def do(args):
""" Main entry point """
reset = args.reset
git_worktree = qisrc.parsers.get_git_worktree(args)
sync_ok = git_worktree.sync()
if not sync_ok:
sys.exit(1)
git_projects = qisrc.parsers.get_git_projects(git_worktree, args,
default_all=True,
use_build_deps=True)
if not git_projects:
qisrc.worktree.on_no_matching_projects(git_worktree, groups=args.groups)
return
git_worktree.configure_projects(git_projects)
skipped = list()
failed = list()
ui.info(ui.green, ":: Syncing projects ...")
max_src = max(len(x.src) for x in git_projects)
# wrap with a list to sidestep problem described in
# https://www.python.org/dev/peps/pep-3104/
i = [0]
lock = threading.Lock()
def do_sync(git_project):
""" Do Sync """
if reset:
(status, out) = git_project.reset()
else:
(status, out) = git_project.sync(rebase_devel=args.rebase_devel)
with lock:
ui.info_count(i[0], len(git_projects),
ui.blue, git_project.src.ljust(max_src))
if status is None:
ui.info(git_project.src, ui.brown, " [skipped]")
skipped.append((git_project.src, out))
if status is False:
ui.info(git_project.src, ui.red, " [failed]")
failed.append((git_project.src, out))
if out:
ui.info(ui.indent(out + "\n\n", num=2))
i[0] += 1
qisys.parallel.foreach(git_projects, do_sync, n_jobs=args.num_jobs)
print_overview(len(git_projects), len(skipped), len(failed))
if failed or skipped:
sys.exit(1) | 5,331,167 |
def expsign(sign, exp):
"""
optimization of sign ** exp
"""
if sign == 1:
return 1
assert sign == -1
return -1 if exp % 2 else 1 | 5,331,168 |
def convert_format(tensors, kind, target_kind):
"""Converts data from format 'kind' to one of the formats specified in 'target_kind'
This allows us to convert data to/from dataframe representations for operators that
only support certain reprentations
"""
# this is all much more difficult because of multihot columns, which don't have
# great representations in dicts of cpu/gpu arrays. we're representing multihots
# as tuples of (values, offsets) tensors in this case - but have to do work at
# each step in terms of converting.
if kind & target_kind:
return tensors, kind
elif target_kind & Supports.GPU_DICT_ARRAY:
if kind == Supports.CPU_DICT_ARRAY:
return _convert_array(tensors, cp.array), Supports.GPU_DICT_ARRAY
elif kind == Supports.CPU_DATAFRAME:
return _pandas_to_array(tensors, False), Supports.GPU_DICT_ARRAY
elif kind == Supports.GPU_DATAFRAME:
return _cudf_to_array(tensors, False), Supports.GPU_DICT_ARRAY
elif target_kind & Supports.CPU_DICT_ARRAY:
if kind == Supports.GPU_DICT_ARRAY:
return _convert_array(tensors, cp.asnumpy), Supports.CPU_DICT_ARRAY
elif kind == Supports.CPU_DATAFRAME:
return _pandas_to_array(tensors, True), Supports.CPU_DICT_ARRAY
elif kind == Supports.GPU_DATAFRAME:
return _cudf_to_array(tensors, True), Supports.CPU_DICT_ARRAY
elif target_kind & Supports.GPU_DATAFRAME:
if kind == Supports.CPU_DATAFRAME:
return cudf.DataFrame(tensors), Supports.GPU_DATAFRAME
return _array_to_cudf(tensors), Supports.GPU_DATAFRAME
elif target_kind & Supports.CPU_DATAFRAME:
if kind == Supports.GPU_DATAFRAME:
return tensors.to_pandas(), Supports.CPU_DATAFRAME
elif kind == Supports.CPU_DICT_ARRAY:
return _array_to_pandas(tensors), Supports.CPU_DATAFRAME
elif kind == Supports.GPU_DICT_ARRAY:
return _array_to_pandas(_convert_array(tensors, cp.asnumpy)), Supports.CPU_DATAFRAME
raise ValueError("unsupported target for converting tensors", target_kind) | 5,331,169 |
def template2path(template, params, ranges=None):
"""Converts a template and a dict of parameters to a path fragment.
Converts a template, such as /{name}/ and a dictionary of parameter
values to a URL path (string).
Parameter values that are used for buildig the path are converted to
strings using `str()` and URI-escaped, then validated against the their
range. Unused parameters are ignored.
Any optional ([]) blocks in the template are skipped unless they contain at
least one parameter and all parameters needed to fill the block (including
nested blocks) are present in `params`.
Example:
>>> import rhino.mapper
>>> rhino.mapper.template2path("/{name}", {'name': 'fred'})
'/fred'
"""
if len(template) and -1 < template.find('|') < len(template) - 1:
raise InvalidTemplateError("'|' may only appear at the end, found at position %d in %s" % (template.find('|'), template))
if ranges is None:
ranges = DEFAULT_RANGES
# Stack for path components. A new list is added for each '[]' block
# encountered. When the closing ']' is reached, the last element is
# removed and either merged into the previous one (we keep the
# block) or discarded (we skip the block). At the end, this should
# contain a flat list of strings as its single element.
stack = [[]]
pattern = "[^/]+" # default range
name = "" # name of the current parameter
bracketdepth = 0 # current level of nested brackets
skip_to_depth = 0 # if > 1, skip until we're back at this bracket level
state = S_PATH
rangename = None # range name for the current parameter
seen_name = [False] # have we seen a named param in bracket level (index)?
for c in template_splitter.split(template):
if state == S_PATH:
if len(c) > 1:
stack[-1].append(c)
elif c == '[':
bracketdepth += 1
stack.append([])
seen_name.append(False)
elif c == ']':
bracketdepth -= 1
if bracketdepth < 0:
raise InvalidTemplateError("Mismatched brackets in %s" % template)
last_elem = stack.pop()
if seen_name.pop():
stack[-1].extend(last_elem)
seen_name[-1] = True
elif c == '{':
name = ""
state = S_TEMPLATE
elif c == '}':
raise InvalidTemplateError("Mismatched braces in %s" % template)
elif c == '|':
pass
else:
stack[-1].append(c)
elif state == S_SKIP:
if c == '[':
bracketdepth += 1
seen_name.append(False)
elif c == ']':
if bracketdepth == skip_to_depth:
stack.pop()
skip_to_depth = 0
state = S_PATH
bracketdepth -= 1
seen_name.pop()
else: # state == S_TEMPLATE
if c == '}':
if name not in params:
if bracketdepth:
# We're missing a parameter, but it's ok since
# we're inside a '[]' block. Skip everything
# until we reach the end of the current block.
skip_to_depth = bracketdepth
state = S_SKIP
else:
raise InvalidArgumentError("Missing parameter '%s' in %s" % (name, template))
else:
if rangename and rangename in ranges:
regex = ranges[rangename]
else:
regex = pattern
value_bytes = unicode(params[name]).encode('utf-8')
value = urllib.quote(value_bytes, safe='/:;')
if not re.match('^' + regex + '$', value):
raise InvalidArgumentError("Value '%s' for parameter '%s' does not match '^%s$' in %s" % (value, name, regex, template))
stack[-1].append(value)
state = S_PATH
rangename = None
else:
name = c
if name.find(":") > -1:
name, rangename = name.split(":")
seen_name[bracketdepth] = True
if bracketdepth != 0:
raise InvalidTemplateError("Mismatched brackets in %s" % template)
if state == S_TEMPLATE:
raise InvalidTemplateError("Mismatched braces in %s" % template)
# None of these Should Ever Happen [TM]
if state == S_SKIP: # pragma: no cover
raise MapperException("Internal error: end state is S_SKIP")
if len(stack) > 1: # pragma: no cover
raise MapperException("Internal error: stack not empty")
if len(seen_name) != 1: # pragma: no cover
raise MapperException("Internal error: seen_name not empty")
return "".join(stack[0]) | 5,331,170 |
def calculate_total_matched(
market_book: Union[Dict[str, Any], MarketBook]
) -> Union[int, float]:
"""
Calculate the total matched on this market from the amounts matched on each runner at each price point. Useful for historic data where this field is not populated
:param market_book: A market book either as a dictionary or betfairlightweight MarketBook object
:return: The total matched on this market
"""
if type(market_book) is MarketBook:
market_book = market_book._data
return sum(
ps["size"]
for r in market_book.get("runners", [])
for ps in r.get("ex", {}).get("tradedVolume", [])
) | 5,331,171 |
def parse_args(args):
"""Build parser object with options for sample.
Returns:
Python argparse parsed object.
"""
parser = argparse.ArgumentParser(
description="A VCF editing utility which adds ref and all sequences to a SURVIVOR fasta file.")
parser.add_argument("--reference-fasta", "-r", required=True, type=str,
help="Reference fasta file.")
parser.add_argument("--survivor-insertions-fasta", "-i", required=True, type=str,
help="Insertions fasta file from SURVIVOR.")
parser.add_argument("--survivor-vcf-file", "-v", required=True, type=str,
help="VCF file from SURVIVOR.")
parser.add_argument("--output-vcf", "-o", required=True, type=str,
help="Output path of edited VCF.")
parser.add_argument("--debug", action="store_true",
help="Verbose logging")
args = parser.parse_args(args)
truvari.setup_logging(args.debug)
return args | 5,331,172 |
def _newline_to_ret_token(instring):
"""Replaces newlines with the !RET token.
"""
return re.sub(r'\n', '!RET', instring) | 5,331,173 |
def _ComputeLineCounts(old_lines, chunks):
"""Compute the length of the old and new sides of a diff.
Args:
old_lines: List of lines representing the original file.
chunks: List of chunks as returned by patching.ParsePatchToChunks().
Returns:
A tuple (old_len, new_len) representing len(old_lines) and
len(new_lines), where new_lines is the list representing the
result of applying the patch chunks to old_lines, however, without
actually computing new_lines.
"""
old_len = len(old_lines)
new_len = old_len
if chunks:
(_, old_b), (_, new_b), old_lines, _ = chunks[-1]
new_len += new_b - old_b
return old_len, new_len | 5,331,174 |
def dirac_api_client(host="localhost", port=18861):
"""RPC DIRAC API client context."""
conn = rpyc.connect(host, port, config={"allow_public_attrs": True})
try:
yield conn.root.dirac_api
finally:
conn.close() | 5,331,175 |
def __check_session_gap_expired_time(session_gap, expired_time, flink_window_type):
"""
校验窗口配置项:统计频率
:param session_gap: 间隔时间
:param expired_time: 过期时间
:param flink_window_type: 窗口类型
"""
if flink_window_type == SESSION:
# 间隔时间(单位:s)
if session_gap not in [0, 10, 30, 60, 180, 300, 600]:
raise StreamWindowConfigCheckError(
_("窗口类型[%s] 属性[%s] 目前只支持 %s") % (SESSION, "session_gap", "[0, 10, 30, 60, 180, 300, 600]")
)
# 过期时间(单位:m)
if expired_time not in [0, 1, 3, 5, 10, 30]:
raise StreamWindowConfigCheckError(
_("窗口类型[%s] 属性[%s] 目前只支持 %s") % (SESSION, "expired_time", "[0, 1, 3, 5, 10, 30]")
) | 5,331,176 |
def _read_int(file_handle, data_size):
"""
Read a signed integer of defined data_size from file.
:param file_handle: The file handle to read from at current position
:param data_size: The data size in bytes of the integer to read
:returns: The integer read and decoded
"""
return int.from_bytes(file_handle.read(data_size), byteorder="little", signed=True) | 5,331,177 |
def quaternion_to_matrix(quat):
"""OI
"""
qw = quat[0][0]
qx = quat[1][0]
qy = quat[2][0]
qz = quat[3][0]
rot = numpy.array([[1 - 2*qy*qy - 2*qz*qz, 2*qx*qy - 2*qz*qw, 2*qx*qz + 2*qy*qw],
[2*qx*qy + 2*qz*qw, 1 - 2*qx*qx - 2*qz*qz, 2*qy*qz - 2*qx*qw],
[2*qx*qz - 2*qy*qw, 2*qy*qz + 2*qx*qw, 1 - 2*qx*qx - 2*qy*qy]])
return rot | 5,331,178 |
def start_metrics_process():
"""
Start metrics process that performs periodic monitoring activities
:return: None
"""
stop_metrics_process()
#start metrics watcher
oneagent_filepath = os.path.join(os.getcwd(),'agent.py')
args = ['python{0}'.format(sys.version_info[0]), oneagent_filepath, '-metrics']
log = open(os.path.join(os.getcwd(), 'daemon.log'), 'w')
hutil_log_info('start watcher process '+str(args))
subprocess.Popen(args, stdout=log, stderr=log) | 5,331,179 |
def get_cart_from_request(request, create=False):
"""Returns Cart object for current user. If create option is True,
new cart will be saved to db"""
cookie_token = request.get_signed_cookie(
Cart.COOKIE_NAME, default=None)
if request.user.is_authenticated():
user = request.user
queryset = user.carts
token = get_user_open_cart_token(request.user)
else:
user = None
queryset = Cart.objects.anonymous()
token = cookie_token
try:
cart = queryset.open().get(token=token)
except Cart.DoesNotExist:
if create:
cart = Cart.objects.create(
user=user,
token=cookie_token)
else:
cart = Cart()
cart.discounts = request.discounts
return cart | 5,331,180 |
def make_concrete_rule(rule_no, zone_map, direction, zone, rule, concrete_port):
"""Take a rule and create a corresponding concrete rule."""
def make_rule(target_zone, port):
return ConcreteRule(source_rules=[rule], rule_no=rule_no, target_zone=target_zone,
direction=direction, port=port, action="allow")
target_zone = zone_map[rule.target_zone]
# Rule level ephemerality overrides zone level
if '+ephemeral_strict' in rule.tags:
ephem_start = 32768
elif '+ephemeral_loose' in rule.tags:
ephem_start = 1024
elif rule.direction == '>' and '+ephemeral_strict' in zone.tags and direction == 'ingress':
# An internal network with systems that use a tight ephemeral port range
ephem_start = 32768
else:
ephem_start = 1024
if concrete_port.proto == 'all':
# ISSUE: We should *maybe* prevent rules with the "all" protocol from being
# concretized. Because of the nature of "all" rules you can't restrict the
# return traffic at all. Really, this should be a policy level error?
return_port = ConcretePort(proto=concrete_port.proto, from_port=0, to_port=0)
else:
return_port = ConcretePort(proto=concrete_port.proto, from_port=ephem_start, to_port=65535)
if direction == 'ingress':
if rule.direction == '>':
if rule.zone == zone.name or rule.zone == 'all': # a > b (return traffic)
return make_rule(target_zone=rule.target_zone, port=return_port)
elif rule.target_zone == zone.name: # b > a (forward traffic)
return make_rule(target_zone=rule.zone, port=concrete_port)
else: # '<'
if rule.zone == zone.name: # a < b (forward traffic)
return make_rule(target_zone=rule.target_zone, port=concrete_port)
elif rule.target_zone == zone.name: # b < a
raise NotImplementedError("Receiving traffic from internal zone?")
else: # egress
if rule.direction == '>':
if rule.zone == zone.name or rule.zone == 'all': # a > b (forward traffic)
return make_rule(target_zone=rule.target_zone, port=concrete_port)
elif rule.target_zone == zone.name: # b > a (return traffic)
return make_rule(target_zone=rule.zone, port=return_port)
else: # '<'
if rule.zone == zone.name: # a < b (return traffic)
return make_rule(target_zone=rule.target_zone, port=return_port)
elif rule.target_zone == zone.name: # b < a
raise NotImplementedError("Receiving traffic from internal zone?")
raise AssertionError("should not reach here") | 5,331,181 |
def downcast(df: pd.DataFrame, signed_columns: List[str] = None) -> pd.DataFrame:
"""
Automatically check for signed/unsigned columns and downcast.
However, if a column can be signed while all the data in that column is unsigned, you don't want to downcast to
an unsigned column. You can explicitly pass these columns.
:arg df: Data as Pandas DataFrame
:arg signed_columns: List of signed columns (signed = positive and negative values, unsigned = only positive values).
"""
logger.info(f'Size before downcasting: {df.memory_size} KB')
for column in df.columns:
if df[column].dtype in [np.int8, np.int16, np.int32, np.int64]:
if (df[column] < 0).any() or (signed_columns is not None and df[column].name in signed_columns):
df[column] = pd.to_numeric(df[column], downcast='signed')
else:
df[column] = pd.to_numeric(df[column], downcast='unsigned')
elif df[column].dtype in [np.float16, np.float32, np.float64]:
df[column] = pd.to_numeric(df[column], downcast='float')
logger.info(f'Size after downcasting: {df.memory_size} KB')
return df | 5,331,182 |
def gen_even_BMS_tree(fanout):
"""This generalization hierarchy for BMS-WebView-2.dat is defined according to even fan-out (average distribution).
For large dataset fanout = 5, for small dataset fanout = 4
"""
need_static = False
static_value = []
BMS_tree = open('data/treefile_BMS.txt', 'w')
try:
static_file = open('data/BMS_Static_value.pickle', 'rb')
static_value = pickle.load(static_file)
static_file.close()
except:
static_value = pickle_static()
height = int(math.ceil(math.log(len(static_value), fanout)))
for i, temp in enumerate(static_value):
node = []
for h in range(height):
if h == 0:
temp = '%d' % static_value[i]
else:
window = fanout ** h
times = i / window
bottom = times * window
top = (times + 1) * window - 1
if top >= len(static_value):
top = len(static_value) - 1
temp = '%d,%d' % (static_value[bottom], static_value[top])
node.append(temp)
node.append('*')
BMS_tree.write(';'.join(node) + '\n')
BMS_tree.close() | 5,331,183 |
def get_setting(name):
"""
Hook for getting Django settings and using properties of this file as the
default.
"""
me = sys.modules[__name__]
return getattr(settings, name, getattr(me, name, None)) | 5,331,184 |
def test_parse_hello_world_template():
"""Extract commands and output files from the 'Hello world' template."""
template = WorkflowTemplate.from_dict(doc=util.read_object(TEMPLATE_HELLOWORLD))
steps, args, output_files = parser.parse_template(template=template, arguments={'names': 'names.txt', 'sleeptime': 10})
assert len(steps) == 1
step = steps[0]
assert step.image == 'python:2.7'
assert len(step.commands) == 1
assert step.commands[0] == '${python} "${helloworld}" --inputfile "${inputfile}" --outputfile "${outputfile}" --sleeptime ${sleeptime}' # noqa: E501
assert output_files == ['results/greetings.txt']
assert args == {'helloworld': 'code/helloworld.py', 'inputfile': 'names.txt', 'outputfile': 'results/greetings.txt', 'sleeptime': '10'} | 5,331,185 |
def checkCrowDist(comment,dist,expectedCrowDist):
"""
Check the consistency of the crow distributions
@ In, comment, string, a comment
@ In, dist, instance, the distribution to inquire
@ In, expectedCrowDist, dict, the dictionary of the expected distribution (with all the parameters)
@ Out, None
"""
crowDist = dist.getCrowDistDict()
if crowDist != expectedCrowDist:
results["fail"] += 1
print(comment,'\n',crowDist,'\n',expectedCrowDist)
else:
results["pass"] += 1 | 5,331,186 |
def entity_type(entity: dict) -> Optional[str]:
"""
Safely get the NGSI type of the given entity.
The type, if present, is expected to be a string, so we convert it if it
isn't.
:param entity: the entity.
:return: the type string if there's an type, `None` otherwise.
"""
return maybe_map(str, safe_get_value(entity, NGSI_TYPE)) | 5,331,187 |
def read_private_key_data(bio):
"""
Read enough data from bio to fully read a private key.
(The data read is thrown away, though.)
This is required since the format does not contain the actual length
of the privately-serialized private key data. The knowledge of what
to read for each key type is known by OpenSSH itself; see
https://github.com/openssh/openssh-portable/blob/c7670b091a7174760d619ef6738b4f26b2093301/sshkey.c#L2767
for the details.
:param bio: Seekable binary IO object to read from
:return: Tuple of (key format, private key data).
"""
key_format = read_openssh_string(bio)
start_idx = bio.tell()
reader = _readers.get(key_format.decode())
if not reader:
raise NotImplementedError('Unknown key format %r' % key_format)
reader(bio)
end_idx = bio.tell()
bytes_read = end_idx - start_idx
bio.seek(start_idx)
private_key_bytes = bio.read(bytes_read)
return (key_format, private_key_bytes) | 5,331,188 |
def list_check(lst):
"""Are all items in lst a list?
>>> list_check([[1], [2, 3]])
True
>>> list_check([[1], "nope"])
False
"""
t = [1 if isinstance(x, list) else 0 for x in lst]
return len(lst) == sum(t) | 5,331,189 |
def check_for_peaks_in_residual(vel, data, errors, best_fit_list, dct,
fitted_residual_peaks, signal_ranges=None,
signal_mask=None, force_accept=False,
params_min=None, params_max=None, noise_spike_mask=None):
"""Try fit by adding new components, whose initial parameters were determined from residual peaks.
Parameters
----------
vel : numpy.ndarray
Velocity channels (unitless).
data : numpy.ndarray
Original data of spectrum.
errors : numpy.ndarray
Root-mean-square noise values.
best_fit_list : list
List containing parameters of the current best fit for the spectrum. It is of the form [{0} params_fit, {1} params_errs, {2} ncomps_fit, {3} best_fit, {4} residual, {5} rchi2, {6} aicc, {7} new_fit, {8} params_min, {9} params_max, {10} pvalue]
dct : dict
Dictionary containing parameter settings for the improved fitting.
fitted_residual_peaks : list
List of initial mean position guesses for new fit components determined from residual peaks that were already tried in previous iterations.
signal_ranges : list
Nested list containing info about ranges of the spectrum that were estimated to contain signal. The goodness-of-fit calculations are only performed for the spectral channels within these ranges.
signal_mask : numpy.ndarray
Boolean array containing the information of signal_ranges.
force_accept : bool
Experimental feature. Default is 'False'. If set to 'True', the new fit will be forced to become the best fit.
params_min : list
List of minimum limits for parameters: [min_amp1, ..., min_ampN, min_fwhm1, ..., min_fwhmN, min_mean1, ..., min_meanN]
params_max : list
List of maximum limits for parameters: [max_amp1, ..., max_ampN, max_fwhm1, ..., max_fwhmN, max_mean1, ..., max_meanN]
Returns
-------
best_fit_list : list
List containing parameters of the chosen best fit for the spectrum. It is of the form [{0} params_fit, {1} params_errs, {2} ncomps_fit, {3} best_fit, {4} residual, {5} rchi2, {6} aicc, {7} new_fit, {8} params_min, {9} params_max, {10} pvalue]
fitted_residual_peaks : list
Updated list of initial mean position guesses for new fit components determined from residual peaks.
"""
# TODO: remove params_min and params_max keywords
params_fit = best_fit_list[0]
ncomps_fit = best_fit_list[2]
residual = best_fit_list[4]
amps_fit, fwhms_fit, offsets_fit = split_params(params_fit, ncomps_fit)
amp_guesses, fwhm_guesses, offset_guesses = get_initial_guesses(
residual, errors[0], dct['snr'], dct['significance'],
peak='positive')
if amp_guesses.size == 0:
best_fit_list[7] = False
return best_fit_list, fitted_residual_peaks
if list(offset_guesses) in fitted_residual_peaks:
best_fit_list[7] = False
return best_fit_list, fitted_residual_peaks
fitted_residual_peaks.append(list(offset_guesses))
amps_fit = list(amps_fit) + list(amp_guesses)
fwhms_fit = list(fwhms_fit) + list(fwhm_guesses)
offsets_fit = list(offsets_fit) + list(offset_guesses)
params_fit = amps_fit + fwhms_fit + offsets_fit
best_fit_list = get_best_fit(
vel, data, errors, params_fit, dct, first=False,
best_fit_list=best_fit_list, signal_ranges=signal_ranges,
signal_mask=signal_mask, force_accept=force_accept,
params_min=params_min, params_max=params_max,
noise_spike_mask=noise_spike_mask)
return best_fit_list, fitted_residual_peaks | 5,331,190 |
def build_encoded_manifest_from_nested_directory(
data_directory_path: str,
) -> Dict[str, EncodedVideoInfo]:
"""
Creates a dictionary from video_id to EncodedVideoInfo for
encoded videos in the given directory.
Args:
data_directory_path (str): The folder to ls to find encoded
video files.
Returns:
Dict[str, EncodedVideoInfo] mapping video_id to EncodedVideoInfo
for each file in 'data_directory_path'
"""
encoded_video_infos = {}
for participant_id in g_pathmgr.ls(data_directory_path):
participant_folder_path = f"{data_directory_path}/{participant_id}"
for video_file_name in g_pathmgr.ls(participant_folder_path):
video_id = video_file_name[:6]
video_full_path = f"{participant_folder_path}/{video_file_name}"
encoded_video_infos[video_id] = EncodedVideoInfo(video_id, video_full_path)
return encoded_video_infos | 5,331,191 |
def test_photoslibrary_hidden(photoslib):
""" Test hidden """
import time
import photoscript
# due to pytest weirdness, need to create a new photoslib object
# to get hide and hidden to work as they would in a real script
photoslib.quit()
photoslib = photoscript.PhotosLibrary()
photoslib.activate()
time.sleep(1)
assert not photoslib.hidden
photoslib.hide()
time.sleep(1)
assert photoslib.hidden | 5,331,192 |
def assert_different_renderings(expected_width, expected_height, documents):
"""
Render HTML documents to PNG and check that no two documents render
the same.
Each document is passed as a (name, html_source) tuple.
"""
pixels_list = []
for name, html in documents:
_doc, pixels = html_to_pixels(
name, expected_width, expected_height, html)
pixels_list.append((name, pixels))
for i, (name_1, pixels_1) in enumerate(pixels_list):
for name_2, pixels_2 in pixels_list[i + 1:]:
if pixels_1 == pixels_2: # pragma: no cover
write_png(name_1, pixels_1, expected_width, expected_height)
# Same as "assert pixels_1 != pixels_2" but the output of
# the assert hook would be gigantic and useless.
assert False, '%s and %s are the same' % (name_1, name_2) | 5,331,193 |
def derive_question(doc):
"""
Return a string that rephrases an action in the
doc in the form of a question.
'doc' is expected to be a spaCy doc.
"""
verb_chunk = find_verb_chunk(doc)
if not verb_chunk:
return None
subj = verb_chunk['subject'].text
obj = verb_chunk['object'].text
if verb_chunk['verb'].tag_ != 'VB':
# If the verb is not in its base form ("to ____" form),
# use the spaCy lemmatizer to convert it to such
verb = verb_chunk['verb'].lemma_
else:
verb = verb_chunk['verb'].text
question = "Why did {} {} {}?".format(subj, verb, obj)
return question | 5,331,194 |
def install_cover(disc, only_from_cache=False):
"""
Installs the symbolic links in the moOde directories
making the cover arts accessible for the web site.
"""
cover = get_cover(disc,only_from_cache)
dest = moode_cd_dir(disc)
if cover is not None:
source = cache_dir(disc)
else:
source = default_cd_dir(disc)
if dest.is_symlink() :
dest.unlink()
dest.symlink_to(source)
theme_jpg = theme_jpg_file(disc)
if theme_jpg.is_symlink() :
theme_jpg.unlink()
theme_jpg.symlink_to(list(dest.glob("*.jpg"))[0])
theme_sm_jpg = theme_sm_jpg_file(disc)
if theme_sm_jpg.is_symlink() :
theme_sm_jpg.unlink()
theme_sm_jpg.symlink_to(list(dest.glob("*.jpg"))[0]) | 5,331,195 |
def recover_label(pred_variable, gold_variable, mask_variable, label_alphabet, word_recover, sentence_classification=False):
"""
input:
pred_variable (batch_size, sent_len): pred tag result
gold_variable (batch_size, sent_len): gold result variable
mask_variable (batch_size, sent_len): mask variable
"""
pred_variable = pred_variable[word_recover]
# print("reordered labels: {}".format(pred_variable))
gold_variable = gold_variable[word_recover]
mask_variable = mask_variable[word_recover]
batch_size = gold_variable.size(0)
if sentence_classification:
pred_tag = pred_variable.cpu().data.numpy().tolist()
gold_tag = gold_variable.cpu().data.numpy().tolist()
pred_label = [label_alphabet.get_instance(pred) for pred in pred_tag]
gold_label = [label_alphabet.get_instance(gold) for gold in gold_tag]
else:
seq_len = gold_variable.size(1)
mask = mask_variable.cpu().data.numpy()
pred_tag = pred_variable.cpu().data.numpy()
gold_tag = gold_variable.cpu().data.numpy()
batch_size = mask.shape[0]
pred_label = []
gold_label = []
for idx in range(batch_size):
pred = [label_alphabet.get_instance(pred_tag[idx][idy]) for idy in range(seq_len) if mask[idx][idy] != 0]
gold = [label_alphabet.get_instance(gold_tag[idx][idy]) for idy in range(seq_len) if mask[idx][idy] != 0]
assert(len(pred)==len(gold))
pred_label.append(pred)
gold_label.append(gold)
return pred_label, gold_label | 5,331,196 |
def RegenerateOverview(*args, **kwargs):
"""
RegenerateOverview(Band srcBand, Band overviewBand, char const * resampling="average", GDALProgressFunc callback=0,
void * callback_data=None) -> int
"""
return _gdal.RegenerateOverview(*args, **kwargs) | 5,331,197 |
def compute_divergences(
corpus,
load_src,
_log,
_run,
max_length=None,
word_emb_path="wiki.id.vec",
src_key_as_lang=False,
device="cpu",
batch_size=16,
):
"""Compute divergences of source taggers a la Heskes (1998)."""
if max_length is None:
max_length = {}
samples = {
wh: list(read_tagging_samples(wh, max_length.get(wh))) for wh in ["train", "dev"]
}
for wh in samples:
n_toks = sum(len(s["words"]) - 2 for s in samples[wh]) # don't count BOS/EOS tokens
_log.info("Read %d %s samples and %d tokens", len(samples[wh]), wh, n_toks)
kv = KeyedVectors.load_word2vec_format(word_emb_path)
srcs = list(load_src.keys())
if src_key_as_lang and corpus["lang"] in srcs:
_log.info("Removing %s from src parsers because it's the tgt", corpus["lang"])
srcs.remove(corpus["lang"])
prev_tag_vocab = None
for src_i, src in enumerate(srcs):
_log.info("Processing src %s [%d/%d]", src, src_i + 1, len(srcs))
load_from, load_params = load_src[src]
path = Path(load_from) / "vocab.yml"
_log.info("Loading %s vocabulary from %s", src, path)
vocab = load(path.read_text(encoding="utf8"))
if prev_tag_vocab is not None and vocab["tags"] != prev_tag_vocab:
raise ValueError(f"tag vocab for src {src} isn't compatible")
prev_tag_vocab = vocab["tags"]
for name in vocab:
_log.info("Found %d %s", len(vocab[name]), name)
_log.info("Extending %s vocabulary with target words", src)
vocab.extend(chain(*samples.values()), ["words"])
_log.info("Found %d words now", len(vocab["words"]))
samples_ = {wh: list(vocab.stoi(samples[wh])) for wh in samples}
path = Path(load_from) / "model.yml"
_log.info("Loading %s model from metadata %s", src, path)
model = load(path.read_text(encoding="utf8"))
path = Path(load_from) / load_params
_log.info("Loading %s model parameters from %s", src, path)
model.load_state_dict(torch.load(path, "cpu"))
_log.info("Creating %s extended word embedding layer", src)
assert model.word_emb.embedding_dim == kv.vector_size
with torch.no_grad():
model.word_emb = torch.nn.Embedding.from_pretrained(
extend_word_embedding(
model.word_emb.weight,
vocab["words"],
kv,
vocab["words"].index(vocab.UNK_TOKEN),
)
)
model.to(device)
for wh in ["train", "dev"]:
for i, s in enumerate(samples_[wh]):
s["_id"] = i
runner = Runner()
runner.state["_ids"] = []
runner.state.update({"_ids": [], "log_marginals": []})
@runner.on(Event.BATCH)
def compute_marginals(state):
batch = state["batch"].to_array()
words = torch.from_numpy(batch["words"]).to(device)
mask = words != vocab["words"].index(vocab.PAD_TOKEN)
assert mask.all(), "must not have masking at test time"
model.eval()
scores = model(words)
crf = LinearCRF(scores)
lm = (crf.marginals() + 1e-9).log()
assert not torch.isnan(lm).any()
state["log_marginals"].extend(lm)
state["_ids"].extend(batch["_id"].tolist())
state["n_items"] = words.numel()
n_toks = sum(len(s["words"]) for s in samples_[wh])
ProgressBar(total=n_toks, unit="tok").attach_on(runner)
_log.info("Computing marginals for %s set with source %s", wh, src)
with torch.no_grad():
runner.run(BucketIterator(samples_[wh], lambda s: len(s["words"]), batch_size))
assert len(runner.state["log_marginals"]) == len(samples_[wh])
assert len(runner.state["_ids"]) == len(samples_[wh])
for i, lms in zip(runner.state["_ids"], runner.state["log_marginals"]):
samples_[wh][i]["log_marginals"] = lms
assert len(samples_[wh]) == len(samples[wh])
_log.info("Combining the marginals")
for i in tqdm(range(len(samples_[wh])), unit="sample", leave=False):
lms = samples[wh][i].get("log_marginals", 0)
lms = torch.tensor(lms, device=device) + samples_[wh][i]["log_marginals"]
samples[wh][i]["log_marginals"] = lms.tolist()
lmss = samples[wh][i].get("log_marginals_ls", [])
lmss.append(samples_[wh][i]["log_marginals"].tolist())
assert len(lmss) == src_i + 1
samples[wh][i]["log_marginals_ls"] = lmss
for wh in ["train", "dev"]:
_log.info("Computing the LOP on %s set", wh)
for s in tqdm(samples[wh], unit="sample", leave=False):
lms = torch.tensor(s["log_marginals"])
lms /= len(srcs)
assert lms.dim() == 3 and lms.size(1) == lms.size(2)
# Renormalise the marginal probabilities
lms = rearrange(lms, "slen nntags ntags -> slen (nntags ntags)")
lms = lms.log_softmax(dim=1)
lms = rearrange(
lms, "slen (nntags ntags) -> slen nntags ntags", ntags=len(vocab["tags"])
)
s["lop"] = lms
s.pop("log_marginals")
for wh in ["train", "dev"]:
_log.info("Computing error and diversity on %s set", wh)
error = diversity = 0
for src_i in range(len(srcs)):
n_words = total_q_kl = total_kl = 0
for s in tqdm(samples[wh], unit="sample", leave=False):
lms = torch.tensor(s["log_marginals_ls"][src_i])
assert lms.dim() == 3 and lms.size(1) == lms.size(2)
for j in range(1, len(s["words"])):
if s["words"][j - 1] in ("<s>", "</s>"):
continue
if s["words"][j] in ("<s>", "</s>"):
continue
n_words += 1
total_q_kl += -lms[
j - 1,
vocab["tags"].index(s["tags"][j]),
vocab["tags"].index(s["tags"][j - 1]),
]
lop = s["lop"]
assert lms.shape == lop.shape
lms = rearrange(lms, "slen nntags ntags -> slen (nntags ntags)")
lop = rearrange(lop, "slen nntags ntags -> slen (nntags ntags)")
if s["words"] and s["words"][0] == "<s>":
lms, lop = lms[1:], lop[1:] # remove BOS
if s["words"] and s["words"][-1] == "</s>":
lms, lop = lms[:-1], lop[:-1] # remove EOS
kl = kl_divergence(Categorical(logits=lop), Categorical(logits=lms))
assert kl.dim() == 1
total_kl += kl.sum()
q_kl = total_q_kl / n_words
src_kl = total_kl / n_words
error += q_kl
diversity += src_kl
error /= len(srcs)
diversity /= len(srcs)
_log.info("Error is %.4f", error)
_log.info("Diversity is %.4f", diversity)
_run.log_scalar(f"error_on_{wh}", float(error))
_run.log_scalar(f"diversity_on_{wh}", float(diversity)) | 5,331,198 |
def assert_set_equality(set1: Set[Any], set2: Set[Any]) -> None:
"""Assert that the sets are the same."""
diff1 = set1.difference(set2)
diff2 = set2.difference(set1)
if diff1 or diff2:
error_message_list = ["Expected sets to have the same keys."]
if diff1:
error_message_list.append(f"Keys in the first set but not the second: {diff1}.")
if diff2:
error_message_list.append(f"Keys in the second set but not the first: {diff2}.")
raise AssertionError(" ".join(error_message_list)) | 5,331,199 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.