content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def inverse_gamma(data, alpha=0.1, beta=0.1):
"""
Inverse gamma distributions
:param data: Data value
:param alpha: alpha value
:param beta: beta value
:return: Inverse gamma distributiion
"""
return (pow(beta, alpha) / math.gamma(alpha)) *\
pow(alpha, data-1) * math.exp(-beta/data) | 5,332,100 |
def plot_metric(n_points, metric_ts, metric_name, ax):
"""
generate an incertitude plot for the metric data passed in parameter
"""
incertitude_plot(n_points, metric_ts, ax)
ax.set_xlabel('Number of points in the point cloud')
ax.set_ylabel(f'{metric_name} difference') | 5,332,101 |
def query(queryid):
"""
Dynamic Query View.
Must be logged in to access this view, otherwise redirected to login page.
A unique view is generated based off a query ID.
A page is only returned if the query ID is associated with a logged in user.
Otherwise a logged in user will be redirected to a 404 error page.
"""
query = models.SpellChecks.query.get(queryid)
if query is not None and ((g.user.is_admin) or (g.user.username == query.username)):
query
render = make_response(render_template('spellcheck/history_s_query.html', query=query))
render.headers.set('Content-Security-Policy', "default-src 'self'")
render.headers.set('X-Content-Type-Options', 'nosniff')
render.headers.set('X-Frame-Options', 'SAMEORIGIN')
render.headers.set('X-XSS-Protection', '1; mode=block')
return render
else:
abort(404) | 5,332,102 |
def get_ssid() -> str:
"""Gets SSID of the network connected.
Returns:
str:
Wi-Fi or Ethernet SSID.
"""
process = Popen(
['/System/Library/PrivateFrameworks/Apple80211.framework/Versions/Current/Resources/airport', '-I'],
stdout=PIPE)
out, err = process.communicate()
if error := process.returncode:
logger.error(f"Failed to fetch SSID with exit code: {error}\n{err}")
# noinspection PyTypeChecker
return dict(map(str.strip, info.split(': ')) for info in out.decode('utf-8').splitlines()[:-1] if
len(info.split()) == 2).get('SSID') | 5,332,103 |
def retrieval_def(ws):
"""Context manager for the RetrievalDef[Init/Close] context."""
ws.covmat_block = []
ws.covmat_inv_block = []
ws.retrievalDefInit()
yield ws
ws.retrievalDefClose() | 5,332,104 |
def predict_recording_level_label(audio_file, model1, model2=None):
"""
:param audio_file: the path of audio file to test
:param model_path: the path of the recording level model to use
:return: class_name : the predicted class of the input file
"""
basic_features_params = {}
basic_features_params['google_credentials'] = config['google_credentials']
basic_features_params['audio_models_folder'] = config['audio_models_folder']
basic_features_params['text_models_folder'] = config['text_models_folder']
model_dict = pickle.load(open(model1, 'rb'))
basic_features_params['classifier_type'] = model_dict['classifier_type']
basic_features_params['class_mapping'] = model_dict['class_mapping']
basic_features_params['classifier'] = model_dict['classifier']
basic_features_params['features_type'] = model_dict['features_type']
basic_features_params['reference_text'] = model_dict['reference_text']
basic_features_params['text_segmentation_params'] = model_dict['text_segmentation_params']
basic_features_params['audio_features'] = model_dict['audio_features']
basic_features_params['pyaudio_params'] = model_dict['pyaudio_params']
basic_features_params['pyaudio_num_features'] = model_dict['pyaudio_num_features']
basic_features_params['raudio_num_features_discard'] = model_dict['raudio_num_features_discard']
basic_features_params['gender'] = model_dict['gender']
if basic_features_params['reference_text']:
folder = os.path.dirname(audio_file)
file_name = os.path.basename(audio_file)
file_name = os.path.splitext(file_name)[0]
file_name = file_name + '.txt'
textfile = [os.path.join(folder, file_name)]
else:
textfile = []
if audio_file.endswith('.wav'):
feature_extractor = RecordingLevelFeatureExtraction(basic_features_params)
fused_features, fused_names, readys_features, readys_names, pyaudio_features, pyaudio_names, labels, filenames = \
feature_extractor.extract_recording_level_features([audio_file], textfile, ['positive'])
else:
feature_extractor = RecordingLevelFeatureLoading(basic_features_params)
fused_features, fused_names, readys_features, readys_names, pyaudio_features, pyaudio_names, labels, filenames= \
feature_extractor.load_recording_level_features([audio_file],textfile,['positive'])
print(readys_names)
print(readys_features)
print(pyaudio_names)
print(pyaudio_features)
classifier = model_dict['classifier']
class_mapping = model_dict['class_mapping']
if basic_features_params['audio_features'] == "fused":
class_id = classifier.predict(fused_features)
class_id = int(class_id)
class_name = class_mapping[class_id]
elif basic_features_params['audio_features'] == "audio":
class_id = classifier.predict(readys_features)
class_id = int(class_id)
class_name = class_mapping[class_id]
else:
proba1 = classifier.predict_proba(readys_features)
model_dict2 = pickle.load(open(model2, 'rb'))
classifier2 = model_dict2['classifier']
proba2 = classifier2.predict_proba(pyaudio_features)
average = np.mean([proba1, proba2], axis=0)
y_pred = np.argmax(average, axis=1)
class_id = int(y_pred)
class_name = class_mapping[class_id]
print("class name:", class_name)
print("class id:", class_id)
return | 5,332,105 |
def test_stovoltpl_find_list(dpm_mode_cpcs): # noqa: F811
# pylint: disable=redefined-outer-name
"""
Test list(), find(), findall().
"""
if not dpm_mode_cpcs:
pytest.skip("No CPCs in DPM mode provided")
for cpc in dpm_mode_cpcs:
assert cpc.dpm_enabled
print("Testing on CPC {} (DPM mode)".format(cpc.name))
skipif_no_storage_mgmt_feature(cpc)
console = cpc.manager.client.consoles.console
session = cpc.manager.session
# Pick a random storage volume template of a random storage group
# template associated to this CPC
grp_vol_tuples = []
stogrptpl_list = console.storage_group_templates.findall(
**{'cpc-uri': cpc.uri})
for stogrptpl in stogrptpl_list:
stovoltpl_list = stogrptpl.storage_volume_templates.list()
for stovoltpl in stovoltpl_list:
grp_vol_tuples.append((stogrptpl, stovoltpl))
if not grp_vol_tuples:
msg_txt = "No storage group templates with volumes associated to " \
"CPC {}".format(cpc.name)
warnings.warn(msg_txt, End2endTestWarning)
pytest.skip(msg_txt)
stogrptpl, stovoltpl = random.choice(grp_vol_tuples)
runtest_find_list(
session, stogrptpl.storage_volume_templates, stovoltpl.name, 'name',
'size', STOVOLTPL_VOLATILE_PROPS, STOVOLTPL_MINIMAL_PROPS,
STOVOLTPL_LIST_PROPS) | 5,332,106 |
def train_predict(clf, X_train, X_test, y_train, y_test):
"""Train clf on <X_train, y_train>, predict <X_test, y_test>; return y_pred."""
print("Training a {}...".format(clf.__class__.__name__))
get_ipython().run_line_magic('time', 'clf.fit(X_train, y_train)')
print(clf)
print("Predicting test labels...")
y_pred = clf.predict(X_test)
return y_pred | 5,332,107 |
def FindFilesWithContents(string_a, string_b):
"""Returns list of paths of files that contain |string_a| or |string_b|.
Uses --name-only to print the file paths. The default behavior of git grep
is to OR together multiple patterns.
Args:
string_a: A string to search for (not a regular expression).
string_b: As above.
Returns:
A list of file paths as strings.
"""
matching_files = subprocess.check_output([
'git', 'grep', '--name-only', '--fixed-strings', '-e', string_a,
'-e', string_b])
files_list = matching_files.split('\n')
# The output ends in a newline, so slice that off.
files_list = files_list[:-1]
return files_list | 5,332,108 |
def get_executable_choices(versions):
"""
Return available Maya releases.
"""
return [k for k in versions if not k.startswith(Config.DEFAULTS)] | 5,332,109 |
def test_1T1E1A_lastN_with_limit(reporter_dataset):
"""
See GitHub issue #249.
"""
# Query
query_params = {
'type': entity_type,
'lastN': 3,
'limit': 10
}
r = requests.get(query_url(), params=query_params)
assert r.status_code == 200, r.text
# Expect only last N
expected_temperatures = [27, 28, 29]
expected_index = [
'1970-01-28T00:00:00',
'1970-01-29T00:00:00',
'1970-01-30T00:00:00'
]
# Assert
obtained = r.json()
expected = {
'entityId': entity_id,
'attrName': attr_name,
'index': expected_index,
'values': expected_temperatures
}
assert_1T1E1A_response(obtained, expected) | 5,332,110 |
def list_songs():
"""
Lists all the songs in your media server
Can do this without a login
"""
# # Check if the user is logged in, if not: back to login.
# if('logged_in' not in session or not session['logged_in']):
# return redirect(url_for('login'))
page['title'] = 'List Songs'
# Get a list of all songs from the database
allsongs = None
allsongs = database.get_allsongs()
# Data integrity checks
if allsongs == None:
allsongs = []
return render_template('listitems/listsongs.html',
session=session,
page=page,
user=user_details,
allsongs=allsongs) | 5,332,111 |
def linear_regression(xs, ys):
"""
Computes linear regression coefficients
https://en.wikipedia.org/wiki/Simple_linear_regression
Returns a and b coefficients of the function f(y) = a * x + b
"""
x_mean = statistics.mean(xs)
y_mean = statistics.mean(ys)
num, den = 0.0, 0.0
for x, y in zip(xs, ys):
num += (x - x_mean) * (y - y_mean)
den += (x - x_mean) * (x - x_mean)
a = num / den
b = y_mean - a * x_mean
return a, b | 5,332,112 |
def scp_amazon(runmap_file,ec_key,dns,anl_fold):
""" Copy relevant files and folders to EC2 instance """
run_map_dict = {}
infile = open(runmap_file, 'rU')
for line in infile:
spline = line.strip().split("\t")
run_map_dict[spline[0]] = spline[1].strip().split(".txt")[0]+"_corrected"+".txt" #Run IDs as keys and mapping filenames as values
mkdir = "mkdir %s/" % anl_fold
os.system("ssh -i %s ubuntu@%s %s" % (ec_key,dns,mkdir))
for fold_id in run_map_dict:
folder = "Output_folder_"+fold_id
log_output("\n#Step 6: Copying relevant files to the Amazon EC2 instance of QIIME...")
os.system("scp -r -i %s %s ubuntu@%s:/home/ubuntu/%s" % (ec_key,folder,dns,anl_fold))
os.system("scp -i %s Index_filtered_ordered_run_%s.fastq ubuntu@%s:/home/ubuntu/%s" % (ec_key,fold_id,dns,anl_fold))
mapfile = run_map_dict[fold_id]
os.system("scp -i %s %s ubuntu@%s:/home/ubuntu/%s" % (ec_key,mapfile,dns,anl_fold))
os.system("scp -i %s %s ubuntu@%s:/home/ubuntu/%s" % (ec_key,runmap_file,dns,anl_fold))
log_output("All files copied to EC2 instace!\n")
return | 5,332,113 |
def calcR1(n_hat):
"""
Calculate the rotation matrix that would rotate the
position vector x_ae to the x-y plane.
Parameters
----------
n_hat : `~numpy.ndarray` (3)
Unit vector normal to plane of orbit.
Returns
-------
R1 : `~numpy.matrix` (3, 3)
Rotation matrix.
"""
n_hat_ = np.ascontiguousarray(n_hat)
# Find the rotation axis v
v = np.cross(n_hat_, Z_AXIS)
# Calculate the cosine of the rotation angle, equivalent to the cosine of the
# inclination
c = np.dot(n_hat_, Z_AXIS)
# Compute the skew-symmetric cross-product of the rotation axis vector v
vp = np.array([[0, -v[2], v[1]],
[v[2], 0, -v[0]],
[-v[1], v[0], 0]])
# Calculate R1
R1 = np.identity(3) + vp + np.linalg.matrix_power(vp, 2) * (1 / (1 + c))
return R1 | 5,332,114 |
def load():
"""
entry point for the UI, launch an instance of the tool with this method
"""
global _win
try:
_win.close()
except(NameError, RuntimeError):
pass
finally:
_win = QThreadDemoWindow()
_win.show() | 5,332,115 |
def get_arb_info(info, n=1000):
"""
Example: info := {'start':1556668800, 'period':300, 'trading_pair':'eth_btc', 'exchange_id':'binance'}
"""
assert {'exchange_id', 'trading_pair', 'period', 'start'}.issubset(info.keys())
info['n'] = n
q = """with sub as (
select * from candlesticks
where trading_pair=%(trading_pair)s and period=%(period)s and timestamp>=%(start)s
),
thing as (
select "timestamp", avg(close) from sub
group by (timestamp)
)
select exchange,trading_pair, thing.timestamp, "period", "avg", "close"-"avg" as arb_diff, ("close"-"avg")/"avg" as arb_signal from
(sub inner join thing on sub.timestamp = thing.timestamp)
where exchange=%(exchange_id)s
order by thing.timestamp
limit %(n)s;
"""
results = safe_qall(q, info)
if results is not None:
# arb_signal is more interpretable than arb_diff but the signal is the same
df = pd.DataFrame(results, columns=["exchange", "trading_pair", "timestamp", "period", "avg", "arb_diff", "arb_signal"])
return d.fix_df(df) | 5,332,116 |
def compare_gaussian_classifiers():
"""
Fit both Gaussian Naive Bayes and LDA classifiers on both gaussians1 and gaussians2 datasets
"""
for f in ["gaussian1.npy", "gaussian2.npy"]:
# Load dataset
X, y = load_dataset('../datasets/' + f)
# print(data[0].shape,data[1].shape)
# Fit models and predict over training set
lda = LDA()
naive_bayes = GaussianNaiveBayes()
lda.fit(X, y)
naive_bayes.fit(X, y)
naive_bayes.likelihood(X)
lda_pred = lda.predict(X)
bayes_pred = naive_bayes.predict(X)
# Plot a figure with two suplots, showing the Gaussian Naive Bayes predictions on the left and LDA predictions
# on the right. Plot title should specify dataset used and subplot titles should specify algorithm and accuracy
# Create subplots
# Add traces for data-points setting symbols and colors
from IMLearn.metrics import accuracy
fig = make_subplots(rows=1, cols=2, subplot_titles=['LDA Model. Acuuracy: ' + str(accuracy(y, lda_pred)),
'Gaussian Naive Bayes Model. Acuuracy: ' + str(
accuracy(y, bayes_pred))])
fig.add_traces([go.Scatter(x=X[:, 0], y=X[:, 1], mode='markers', showlegend=False,
marker=dict(color=lda_pred, line=dict(color="black", width=1))),
go.Scatter(x=X[:, 0], y=X[:, 1], mode='markers', showlegend=False,
marker=dict(color=bayes_pred, line=dict(color="black", width=1)))],
rows=[1, 1], cols=[1, 2])
fig.update_layout(title="Comparing LDA and GNB over gaussians. " + "Data set: " + f,
font=dict(
family="Arial, monospace",
size=20,
color="RebeccaPurple"),
margin=dict(t=100))
# Add `X` dots specifying fitted Gaussians' means
for plot_col, model in enumerate([lda, naive_bayes]):
for i, k in enumerate(model.classes_):
center_x = model.mu_[i][0]
center_y = model.mu_[i][1]
fig.add_trace(go.Scatter(x=[center_x], y=[center_y],
mode="markers", showlegend=False,
marker=dict(size=15, color='black', symbol='x',
line=dict(color="black", width=1))), row=1, col=plot_col + 1)
# Add ellipses depicting the covariances of the fitted Gaussians
if plot_col == 0:
cov = lda.cov_
else:
cov = np.diag(naive_bayes.vars_[np.where(naive_bayes.classes_ == k)[0][0]])
fig.add_trace(get_ellipse(np.array([center_x, center_y]), cov), row=1, col=plot_col + 1),
fig.show() | 5,332,117 |
def parse_nested_root(stream: TokenStream) -> AstRoot:
"""Parse nested root."""
with stream.syntax(colon=r":"):
colon = stream.expect("colon")
if not consume_line_continuation(stream):
exc = InvalidSyntax("Expected non-empty block.")
raise set_location(exc, colon)
level, command_level = stream.indentation[-2:]
commands: List[AstCommand] = []
with stream.intercept("newline"), stream.provide(
scope=(),
line_indentation=command_level,
):
while True:
commands.append(delegate("command", stream))
# The command parser consumes the trailing newline so we need to rewind
# to be able to use "consume_line_continuation()".
while (token := stream.peek()) and not token.match("newline", "eof"):
stream.index -= 1
with stream.provide(multiline=True, line_indentation=level):
if not consume_line_continuation(stream):
break
node = AstRoot(commands=AstChildren(commands))
return set_location(node, commands[0], commands[-1]) | 5,332,118 |
def wg_config_write():
""" Write configuration file. """
global wg_config_file
return weechat.config_write(wg_config_file) | 5,332,119 |
def _pip(params):
"""
Runs pip install commands
"""
for pkg in params:
_sudo("pip install %s" % pkg) | 5,332,120 |
def validate_twitter_handle(value):
"""Raises a ValidationError if value isn't a valid twitter handle."""
if not VALID_TWITTER_RE.match(value):
raise ValidationError(
u'This is not a valid twitter handle.'
u' Please enter just the portion after the @.'
) | 5,332,121 |
def is_palindrome_permutation(phrase):
"""checks if a string is a permutation of a palindrome"""
table = [0 for _ in range(ord("z") - ord("a") + 1)]
countodd = 0
for c in phrase:
x = char_number(c)
if x != -1:
table[x] += 1
if table[x] % 2:
countodd += 1
else:
countodd -= 1
return countodd <= 1 | 5,332,122 |
def get_stored_username():
"""Get Stored Username"""
filename = 'numbers.json'
try:
with open(filename) as file_object:
username = json.load(file_object)
except FileNotFoundError:
return None
else:
return username | 5,332,123 |
def test_classes_shape():
"""Test that n_classes_ and classes_ have proper shape."""
# Classification, single output
clf = RandomForestClassifier()
clf.fit(X, y)
assert_equal(clf.n_classes_, 2)
assert_equal(clf.classes_, [-1, 1])
# Classification, multi-output
_y = np.vstack((y, np.array(y) * 2)).T
clf = RandomForestClassifier()
clf.fit(X, _y)
assert_equal(len(clf.n_classes_), 2)
assert_equal(len(clf.classes_), 2)
assert_equal(clf.n_classes_, [2, 2])
assert_equal(clf.classes_, [[-1, 1], [-2, 2]]) | 5,332,124 |
def find_app(pattern: Optional[str]) -> None:
"""projector ide find [pattern]
Find projector-compatible IDE with the name matching to the given pattern.
If no pattern is specified, finds all the compatible IDEs.
"""
do_find_app(pattern) | 5,332,125 |
def post_process_symbolizer_image_file(file_href, dirs):
""" Given an image file href and a set of directories, modify the image file
name so it's correct with respect to the output and cache directories.
"""
# support latest mapnik features of auto-detection
# of image sizes and jpeg reading support...
# http://trac.mapnik.org/ticket/508
mapnik_auto_image_support = (MAPNIK_VERSION >= 701)
mapnik_requires_absolute_paths = (MAPNIK_VERSION < 601)
file_href = urljoin(dirs.source.rstrip('/')+'/', file_href)
scheme, n, path, p, q, f = urlparse(file_href)
if scheme in ('http','https'):
scheme, path = '', locally_cache_remote_file(file_href, dirs.cache)
if scheme not in ('file', '') or not systempath.exists(un_posix(path)):
raise Exception("Image file needs to be a working, fetchable resource, not %s" % file_href)
if not mapnik_auto_image_support and not Image:
raise SystemExit('PIL (Python Imaging Library) is required for handling image data unless you are using PNG inputs and running Mapnik >=0.7.0')
img = Image.open(un_posix(path))
if mapnik_requires_absolute_paths:
path = posixpath.realpath(path)
else:
path = dirs.output_path(path)
msg('reading symbol: %s' % path)
image_name, ext = posixpath.splitext(path)
if ext in ('.png', '.tif', '.tiff'):
output_ext = ext
else:
output_ext = '.png'
# new local file name
dest_file = un_posix('%s%s' % (image_name, output_ext))
if not posixpath.exists(dest_file):
img.save(dest_file,'PNG')
msg('Destination file: %s' % dest_file)
return dest_file, output_ext[1:], img.size[0], img.size[1] | 5,332,126 |
def describe_trusted_advisor_check_summaries(checkIds=None):
"""
Returns the summaries of the results of the Trusted Advisor checks that have the specified check IDs. Check IDs can be obtained by calling DescribeTrustedAdvisorChecks .
The response contains an array of TrustedAdvisorCheckSummary objects.
See also: AWS API Documentation
Exceptions
:example: response = client.describe_trusted_advisor_check_summaries(
checkIds=[
'string',
]
)
:type checkIds: list
:param checkIds: [REQUIRED]\nThe IDs of the Trusted Advisor checks.\n\n(string) --\n\n
:rtype: dict
ReturnsResponse Syntax{
'summaries': [
{
'checkId': 'string',
'timestamp': 'string',
'status': 'string',
'hasFlaggedResources': True|False,
'resourcesSummary': {
'resourcesProcessed': 123,
'resourcesFlagged': 123,
'resourcesIgnored': 123,
'resourcesSuppressed': 123
},
'categorySpecificSummary': {
'costOptimizing': {
'estimatedMonthlySavings': 123.0,
'estimatedPercentMonthlySavings': 123.0
}
}
},
]
}
Response Structure
(dict) --The summaries of the Trusted Advisor checks returned by the DescribeTrustedAdvisorCheckSummaries operation.
summaries (list) --The summary information for the requested Trusted Advisor checks.
(dict) --A summary of a Trusted Advisor check result, including the alert status, last refresh, and number of resources examined.
checkId (string) --The unique identifier for the Trusted Advisor check.
timestamp (string) --The time of the last refresh of the check.
status (string) --The alert status of the check: "ok" (green), "warning" (yellow), "error" (red), or "not_available".
hasFlaggedResources (boolean) --Specifies whether the Trusted Advisor check has flagged resources.
resourcesSummary (dict) --Details about AWS resources that were analyzed in a call to Trusted Advisor DescribeTrustedAdvisorCheckSummaries .
resourcesProcessed (integer) --The number of AWS resources that were analyzed by the Trusted Advisor check.
resourcesFlagged (integer) --The number of AWS resources that were flagged (listed) by the Trusted Advisor check.
resourcesIgnored (integer) --The number of AWS resources ignored by Trusted Advisor because information was unavailable.
resourcesSuppressed (integer) --The number of AWS resources ignored by Trusted Advisor because they were marked as suppressed by the user.
categorySpecificSummary (dict) --Summary information that relates to the category of the check. Cost Optimizing is the only category that is currently supported.
costOptimizing (dict) --The summary information about cost savings for a Trusted Advisor check that is in the Cost Optimizing category.
estimatedMonthlySavings (float) --The estimated monthly savings that might be realized if the recommended operations are taken.
estimatedPercentMonthlySavings (float) --The estimated percentage of savings that might be realized if the recommended operations are taken.
Exceptions
Support.Client.exceptions.InternalServerError
:return: {
'summaries': [
{
'checkId': 'string',
'timestamp': 'string',
'status': 'string',
'hasFlaggedResources': True|False,
'resourcesSummary': {
'resourcesProcessed': 123,
'resourcesFlagged': 123,
'resourcesIgnored': 123,
'resourcesSuppressed': 123
},
'categorySpecificSummary': {
'costOptimizing': {
'estimatedMonthlySavings': 123.0,
'estimatedPercentMonthlySavings': 123.0
}
}
},
]
}
:returns:
Support.Client.exceptions.InternalServerError
"""
pass | 5,332,127 |
def _register_cli_commands(ext_registry: extension.ExtensionRegistry):
"""
Register xcube's standard CLI commands.
"""
cli_command_names = [
'chunk',
'compute',
'benchmark',
'dump',
'edit',
'extract',
'gen',
'gen2',
'genpts',
'grid',
'level',
'optimize',
'prune',
'rectify',
'resample',
'serve',
'tile',
'vars2dim',
'verify',
'versions',
# Experimental + Hidden
'io',
]
for cli_command_name in cli_command_names:
ext_registry.add_extension(
loader=extension.import_component(f'xcube.cli.{cli_command_name}:{cli_command_name}'),
point=EXTENSION_POINT_CLI_COMMANDS,
name=cli_command_name
) | 5,332,128 |
def qft(q):
"""Quantum Fourier Transform on a quantum register.
Args:
q (list): quantum register where the QFT is applied.
Returns:
generator with the required quantum gates applied on the quantum circuit.
"""
for i1 in range(len(q)):
yield gates.H(q[i1])
for i2 in range(i1 + 1, len(q)):
theta = np.pi / 2 ** (i2 - i1)
yield gates.CU1(q[i2], q[i1], theta)
for i in range(len(q) // 2):
yield gates.SWAP(i, len(q) - i - 1) | 5,332,129 |
def collate_fn_feat_padded(batch):
"""
Sort a data list by frame length (descending order)
batch : list of tuple (feature, label). len(batch) = batch_size
- feature : torch tensor of shape [1, 40, 80] ; variable size of frames
- labels : torch tensor of shape (1)
ex) samples = collate_fn([batch])
batch = [dataset[i] for i in batch_indices]. ex) [Dvector_train_dataset[i] for i in [0,1,2,3,4]]
batch[0][0].shape = torch.Size([1,64,774]). "774" is the number of frames per utterance.
"""
batch.sort(key=lambda x: x[0].shape[2], reverse=True)
feats, labels = zip(*batch)
# Merge labels => torch.Size([batch_size,1])
labels = torch.stack(labels, 0)
labels = labels.view(-1)
# Merge frames
lengths = [feat.shape[2] for feat in feats] # in decreasing order
max_length = lengths[0]
# features_mod.shape => torch.Size([batch_size, n_channel, dim, max(n_win)])
padded_features = torch.zeros(len(feats), feats[0].shape[0], feats[0].shape[1], feats[0].shape[2]).float() # convert to FloatTensor (it should be!). torch.Size([batch, 1, feat_dim, max(n_win)])
for i, feat in enumerate(feats):
end = lengths[i]
num_frames = feat.shape[2]
while max_length > num_frames:
feat = torch.cat((feat, feat[:,:,:end]), 2)
num_frames = feat.shape[2]
padded_features[i, :, :, :] = feat[:,:,:max_length]
return padded_features, labels | 5,332,130 |
def slugify(value, allow_unicode=False):
"""
Taken from https://github.com/django/django/blob/master/django/utils/text.py
Convert to ASCII if 'allow_unicode' is False. Convert spaces or repeated
dashes to single dashes. Remove characters that aren't alphanumerics,
underscores, or hyphens. Convert to lowercase. Also strip leading and
trailing whitespace, dashes, and underscores.
"""
value = str(value)
if allow_unicode:
value = unicodedata.normalize('NFKC', value)
else:
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode('ascii')
value = re.sub(r'[^\w\s-]', '', value.lower())
return re.sub(r'[-\s]+', '-', value).strip('-_') | 5,332,131 |
def average_gradient_norm(model, data):
""" Computes the average gradient norm for a keras model """
# just checking if the model was already compiled
if not hasattr(model, "train_function"):
raise RuntimeError("You must compile your model before using it.")
weights = model.trainable_weights # weight tensors
get_gradients = model.optimizer.get_gradients(model.total_loss, weights) # gradient tensors
input_tensors = [
# input data
model.inputs[0],
# how much to weight each sample by
model.sample_weights[0],
# labels
model.targets[0],
# train or test mode
keras.backend.learning_phase()
]
grad_fct = keras.backend.function(inputs=input_tensors, outputs=get_gradients)
steps = 0
total_norm = 0
s_w = None
while steps < data.steps_per_epoch:
X, y = next(data)
# set sample weights to one
# for every input
if s_w is None:
s_w = np.ones(X.shape[0])
gradients = grad_fct([X, s_w, y, 0])
total_norm += np.sqrt(np.sum([np.sum(np.square(g)) for g in gradients]))
steps += 1
return total_norm / float(steps) | 5,332,132 |
def string_regex_matcher(input_str: str, regex: str, replacement_str=""):
"""Python version of StringRegexMatcher in mlgtools.
Replaces all substring matched with regular expression (regex) with replacement string (replacement_str).
Args:
input_str (str): input string to match
regex (str): regular expression to match
replacement_str (str): replacement string for string matched with regex
Returns:
str: string removed replacement_str if it is set, or otherwise the original string
"""
# log error if regex is None or empty
if not regex:
log(logging.INFO, DataCategory.PUBLIC,
'_string_regex_matcher: regex is None or empty. Returning original sentence.')
return input_str
# Compile the regular expression
regex_compiled = check_and_compile_regular_expression(regex)
# Return the string with replacing matched substrings with replacement_str
return regex_compiled.sub(replacement_str, input_str) | 5,332,133 |
def cont_scatterplot(data: pd.DataFrame,
x: str,
y: str,
z: str or None,
label: str,
cmap: str,
size: int or str or None,
fig: plt.Figure,
cbar_kwargs: dict,
**kwargs):
"""
Scatterplot with continuous label
Parameters
----------
data: Pandas.DataFrame
x: str
y: str
z: str, optional
label: str
cmap: str
size: int or str, optional
fig: Matplotlib.Figure
cbar_kwargs: dict
Keyword arguments passed to colorbar
kwargs:
Additional keyword arguments passed to Matplotlib.Axes.scatter call
Returns
-------
Matplotlib.Axes
"""
if isinstance(size, str):
size = data[size].values
if z is not None:
ax = fig.add_subplot(111, projection="3d")
im = ax.scatter(data[x].values,
data[y].values,
data[z].values,
c=data[label].values,
s=size,
cmap=cmap,
**kwargs)
else:
ax = fig.add_subplot(111)
im = ax.scatter(data[x].values,
data[y].values,
c=data[label].values,
s=size,
cmap=cmap,
**kwargs)
fig.colorbar(im, ax=ax, **cbar_kwargs)
return ax | 5,332,134 |
def performance(problem, W, H, C, R_full):
"""Compute the performance of the IMC estimates."""
assert isinstance(problem, IMCProblem), \
"""`problem` must be an IMC problem."""
assert W.ndim == H.ndim, """Mismatching dimensionality."""
if W.ndim < 3:
W, H = np.atleast_3d(W, H)
n_iterations = W.shape[-1]
assert W.shape[-1] == H.shape[-1], """Mismatching number of iterations."""
# sparsitry coefficients
sparsity_W = np.isclose(W, 0).mean(axis=(0, 1))
sparsity_H = np.isclose(H, 0).mean(axis=(0, 1))
# Regularization -- components
reg_ridge = (0.5 * np.linalg.norm(W, "fro", axis=(0, 1))**2 +
0.5 * np.linalg.norm(H, "fro", axis=(0, 1))**2)
reg_group = (np.linalg.norm(W, 2, axis=1).sum(axis=0) +
np.linalg.norm(H, 2, axis=1).sum(axis=0))
reg_lasso = (np.linalg.norm(W.reshape(-1, W.shape[-1]), 1, axis=0) +
np.linalg.norm(H.reshape(-1, H.shape[-1]), 1, axis=0))
# Regularization -- full
C_lasso, C_group, C_ridge = C
regularizer_value = (C_group * reg_group +
C_lasso * reg_lasso +
C_ridge * reg_ridge)
# sequential forbenius norm of the matrices
div_W = np.r_[np.linalg.norm(np.diff(W, axis=-1),
"fro", axis=(0, 1)), 0]
div_H = np.r_[np.linalg.norm(np.diff(H, axis=-1),
"fro", axis=(0, 1)), 0]
# Objective value on the train data
v_val_train = np.array([problem.value(W[..., i], H[..., i])
for i in range(n_iterations)])
# Objective on the full matrix (expensive!)
v_val_full = np.array([
problem.loss(problem.prediction(W[..., i], H[..., i]).ravel(),
R_full.ravel()).sum() for i in range(n_iterations)])
# Score on the full matrix (expensive!)
score_full = np.array([
problem.score(problem.prediction(W[..., i], H[..., i]).ravel(),
R_full.ravel()) for i in range(n_iterations)])
metrics = np.stack([v_val_train, regularizer_value,
score_full, v_val_full,
sparsity_W, sparsity_H,
div_W, div_H], axis=0)
titles = ['Observed Elements', 'Regularization', 'Score',
'Full Matrix', 'Zero Values of W', 'Zero Values of H',
'L2-Norm Variation W', 'L2-Norm Variation H']
units = ['L2-Loss', 'L2-Loss', 'Score', 'L2-Loss',
'%', '%', 'L2-Norm', 'L2-Norm']
return metrics, titles, units | 5,332,135 |
def courseschedules_to_private_ical_feed(user):
"""
Generate an ICAL feed for all course schedules associated with the given user.
The IDs given for each event are sequential, unique only amongst the results of this particular query, and not
guaranteed to be consistent across calls.
:param user: The user to generate an ICAL feed for.
:return: An ICAL string of all the user's course schedules.
"""
calendar = _create_calendar(user)
events = []
for course in Course.objects.for_user(user.pk).iterator():
events += coursescheduleservice.course_schedules_to_events(course, course.schedules)
timezone.activate(pytz.timezone(user.settings.time_zone))
for event in events:
calendar_event = icalendar.Event()
calendar_event["UID"] = f"he-{user.pk}-{event.pk}"
calendar_event["SUMMARY"] = event.title
calendar_event["DTSTAMP"] = icalendar.vDatetime(timezone.localtime(event.created_at))
if not event.all_day:
calendar_event["DTSTART"] = icalendar.vDatetime(timezone.localtime(event.start))
calendar_event["DTEND"] = icalendar.vDatetime(timezone.localtime(event.end))
else:
calendar_event["DTSTART"] = icalendar.vDate(event.start)
calendar_event["DTEND"] = icalendar.vDate((event.end + datetime.timedelta(days=1)))
calendar_event["DESCRIPTION"] = _create_event_description(event)
calendar.add_component(calendar_event)
timezone.deactivate()
return calendar.to_ical() | 5,332,136 |
def uninstall(packages, options=None):
"""
Remove one or more packages.
Extra *options* may be passed to ``opkg`` if necessary.
"""
manager = MANAGER
command = "remove"
if options is None:
options = []
if not isinstance(packages, six.string_types):
packages = " ".join(packages)
options = " ".join(options)
cmd = '%(manager)s %(command)s %(options)s %(packages)s' % locals()
run_as_root(cmd, pty=False) | 5,332,137 |
def print_init_dict(dict_, fname='test.trempy.ini'):
"""Print an initialization dictionary."""
version = dict_['VERSION']['version']
keys = ['VERSION', 'SIMULATION', 'ESTIMATION',
'SCIPY-BFGS', 'SCIPY-POWELL', 'SCIPY-L-BFGS-B',
'CUTOFFS', 'QUESTIONS']
# Add keys based on version of the utility function
if version in ['scaled_archimedean']:
keys += ['UNIATTRIBUTE SELF', 'UNIATTRIBUTE OTHER', 'MULTIATTRIBUTE COPULA']
elif version in ['nonstationary']:
keys += ['ATEMPORAL', 'DISCOUNTING']
else:
raise TrempyError('version not implemented')
questions = list(dict_['QUESTIONS'].keys())
is_cutoffs = False
with open(fname, 'w') as outfile:
for key_ in keys:
# We do not ned to print the CUTOFFS block if none are specified. So we first check
# below if there is any need.
if key_ not in ['CUTOFFS']:
outfile.write(key_ + '\n\n')
for label in sorted(dict_[key_].keys()):
info = dict_[key_][label]
label_internal = label
# Manually translate labels to internal labels based on version
if version in ['scaled_archimedean']:
if label in ['r'] and 'SELF' in key_:
label_internal = 'r_self'
elif label in ['r'] and 'OTHER' in key_:
label_internal = 'r_other'
elif version in ['nonstationary']:
pass
# Build format string for line
str_ = '{:<25}'
if label_internal in PREFERENCE_PARAMETERS[version] + questions:
# Handle optional arguments where None can occur
if (isinstance(label_internal, str) and
label_internal.startswith('unrestricted_weights') and info[0] is None):
str_ += ' {:>25} {:>10} '
# Preference parameters are formatted as floats
else:
str_ += ' {:25.4f} {:>10} '
else:
# All other parameters are formatted as strings
str_ += ' {:>25}\n'
# Handle string output (e.g. "True" or "None")
if label in ['detailed', 'version', 'heterogeneity']:
info = str(info)
if label in ['discounting', 'stationary_model']:
if info is None:
info = 'None'
else:
info = str(info)
if (label_internal in PREFERENCE_PARAMETERS[version] + questions and
key_ != 'CUTOFFS'):
line, str_ = format_coefficient_line(label_internal, info, str_)
elif key_ in ['CUTOFFS']:
line, str_ = format_cutoff_line(label, info)
# We do not need to print a [NONE, None] cutoff.
if line.count('None') == 2:
continue
if not is_cutoffs:
is_cutoffs = True
outfile.write(key_ + '\n\n')
else:
line = [label, info]
outfile.write(str_.format(*line))
outfile.write('\n') | 5,332,138 |
def test_chip_data(client) -> None:
"""
Test chip data.
:param client: client
:return: None
"""
response_value = client.get("/api/v1/chip_data")
assert response_value.status_code == 200 | 5,332,139 |
def AuxStream_Cast(*args):
"""
Cast(BaseObject o) -> AuxStream
AuxStream_Cast(Seiscomp::Core::BaseObjectPtr o) -> AuxStream
"""
return _DataModel.AuxStream_Cast(*args) | 5,332,140 |
def BPNet(tasks, bpnet_params):
"""
BPNet architecture definition
Args:
tasks (dict): dictionary of tasks info specifying
'signal', 'loci', and 'bias' for each task
bpnet_params (dict): parameters to the BPNet architecture
The keys include (all are optional)-
'input_len': (int)
'output_profile_len': (int),
'motif_module_params': (dict) -
'filters' (list)
'kernel_sizes' (list)
'padding' (str)
'syntax_module_params': (dict) -
'num_dilation_layers' (int)
'filters' (int)
'kernel_size' (int)
'padding': (str)
'pre_activation_residual_unit' (boolean)
'profile_head_params': (dict) -
'filters' (int)
'kernel_size' (int)
'padding' (str)
'counts_head_params': (dict) -
'units' (int)
'profile_bias_module_params': (dict) -
'kernel_sizes' (list)
'counts_bias_module_params': (dict) - N/A
'use_attribution_prior': (boolean)
'attribution_prior_params': (dict) -
'frequency_limit' (int)
'limit_softness' (float)
'grad_smooth_sigma' (int)
'profile_grad_loss_weight' (float)
'counts_grad_loss_weight' (float)
'loss_weights': (list)
Returns:
tensorflow.keras.layers.Model
"""
# load params from json file
(input_len,
output_profile_len,
motif_module_params,
syntax_module_params,
profile_head_params,
counts_head_params,
profile_bias_module_params,
counts_bias_module_params,
use_attribution_prior,
attribution_prior_params) = load_params(bpnet_params)
# Step 1 - sequence input
one_hot_input = layers.Input(shape=(input_len, 4), name='sequence')
# Step 2 - Motif module (one or more conv layers)
motif_module_out = motif_module(
one_hot_input, motif_module_params['filters'],
motif_module_params['kernel_sizes'], motif_module_params['padding'])
# Step 3 - Syntax module (all dilation layers)
syntax_module_out = syntax_module(
motif_module_out, syntax_module_params['num_dilation_layers'],
syntax_module_params['filters'], syntax_module_params['kernel_size'],
syntax_module_params['padding'],
syntax_module_params['pre_activation_residual_unit'])
# Step 4.1 - Profile head (large conv kernel)
# Step 4.1.1 - get total number of output tracks across all tasks
num_tasks = len(list(tasks.keys()))
total_tracks = 0
for i in range(num_tasks):
total_tracks += len(tasks[i]['signal']['source'])
# Step 4.1.2 - conv layer to get pre bias profile prediction
profile_head_out = profile_head(
syntax_module_out, total_tracks,
profile_head_params['kernel_size'], profile_head_params['padding'])
# first let's figure out if bias input is required based on
# tasks info, this also affects the naming of the profile head
# and counts head layers
# total number of bias tasks in the tasks_info dictionary
total_bias_tracks = 0
# number of bias tracks in each task
task_bias_tracks = {}
for i in range(num_tasks):
task_bias_tracks[i] = _get_num_bias_tracks_for_task(tasks[i])
total_bias_tracks += task_bias_tracks[i]
# Step 4.1.3 crop profile head to match output_len
if total_bias_tracks == 0:
profile_head_name = 'profile_predictions'
else:
profile_head_name = 'profile_head_cropped'
crop_size = int_shape(profile_head_out)[1] // 2 - output_profile_len // 2
profile_head_out = layers.Cropping1D(
crop_size, name=profile_head_name)(profile_head_out)
# Step 4.2 - Counts head (global average pooling)
if total_bias_tracks == 0:
counts_head_name = 'logcounts_predictions'
else:
counts_head_name = 'counts_head'
counts_head_out = counts_head(
syntax_module_out, counts_head_name, total_tracks)
# Step 5 - Bias Input
# if the tasks have no bias tracks then profile_head and
# counts_head are the outputs of the model
inputs = [one_hot_input]
if total_bias_tracks == 0:
# we need to first rename the layers to correspond to what
# the batch generator sends
# At this point, since there is no bias the two outputs
# are called 'profile_head_cropped' & 'counts_head'
print("renaming layers")
profile_head_out._name = 'profile_predictions'
counts_head_out._name = 'logcounts_predictions'
profile_outputs = profile_head_out
logcounts_outputs = counts_head_out
else:
if num_tasks != len(profile_bias_module_params['kernel_sizes']):
raise NoTracebackException(
"Length on 'kernel_sizes' in profile_bias_module_params "
"must match #tasks")
# Step 5.1 - Define the bias input layers
profile_bias_inputs = []
counts_bias_inputs = []
for i in range(num_tasks):
if task_bias_tracks[i] > 0:
# profile bias input for task i
profile_bias_inputs.append(layers.Input(
shape=(output_profile_len, task_bias_tracks[i]),
name="profile_bias_input_{}".format(i)))
# counts bias input for task i
counts_bias_inputs.append(layers.Input(
shape=(task_bias_tracks[i]),
name="counts_bias_input_{}".format(i)))
# append to inputs
inputs.append(profile_bias_inputs[i])
inputs.append(counts_bias_inputs[i])
else:
profile_bias_inputs.append(None)
counts_bias_inputs.append(None)
# Step 5.2 - account for profile bias
profile_outputs = profile_bias_module(
profile_head_out, profile_bias_inputs, tasks,
kernel_sizes=profile_bias_module_params['kernel_sizes'])
# Step 5.3 - account for counts bias
logcounts_outputs = counts_bias_module(
counts_head_out, counts_bias_inputs, tasks)
if use_attribution_prior:
# instantiate attribution prior Model with inputs and outputs
return AttributionPriorModel(
attribution_prior_params['frequency_limit'],
attribution_prior_params['limit_softness'],
attribution_prior_params['grad_smooth_sigma'],
attribution_prior_params['profile_grad_loss_weight'],
attribution_prior_params['counts_grad_loss_weight'],
inputs=inputs,
outputs=[profile_outputs, logcounts_outputs])
else:
# instantiate keras Model with inputs and outputs
return Model(
inputs=inputs, outputs=[profile_outputs, logcounts_outputs]) | 5,332,141 |
def getCubePixels(cubeImages):
"""
Returns a list containing the raw pixels from the `bpy.types.Image` images
in the list `cubeImages`. Factoring this functionality out into its own
function is useful for performance profiling.
"""
return [face.pixels[:] for face in cubeImages] | 5,332,142 |
async def get_round_details(round_id):
"""
Get details for a given round (include snapshot)
"""
query = (
select(detail_columns)
.select_from(select_from_default)
.where(rounds_table.c.id == round_id)
) # noqa: E127
result = await conn.fetch_one(query=query)
return result and dict(result) | 5,332,143 |
def dens_hist_plot(**kwargs):
"""
plot prediction probability density histogram
Arguments:
df: classification prediction probability in pandas datafrane
"""
data = {'top1prob' : random.sample(range(1, 100), 5),
'top2prob' : random.sample(range(1, 100), 5)
}
def_vals = {"df" : data
} # default parameters value
for k, v in def_vals.items():
kwargs.setdefault(k, v)
df = kwargs['df']
x = df['top1prob']
y = df['top2prob']
def make_anno(x=1, y=1, text=text_source):
return go.Annotation(
text=text, # annotation text
showarrow=False, # remove arrow
xref='paper', # use paper coords
yref='paper', # for both coordinates
xanchor='right', # x-coord line up with right end of text
yanchor='bottom', # y-coord line up with bottom end of text
x=x, # position's x-coord
y=y # and y-coord
)
title = 'Prediction Result<br>\
Top1, Top2' # plot's title
x_title = 'Top1 Probability'#.format(site1) # x and y axis titles
y_title = 'Top2 Probability'
# Make a layout object
layout1 = go.Layout(
title=title, # set plot's title
font=dict(
family='PT Sans Narrow', # global font
size=13
),
xaxis1=go.XAxis(
title=x_title, # set x-axis title
#range=xy_range, # x-axis range
zeroline=False # remove x=0 line
),
annotations=go.Annotations([ # add annotation citing the data source
make_anno()
]),
showlegend=True, # remove legend
autosize=False, # custom size
width=980, # set figure width
height=880, # and height
margin=dict(l=100,
r=50,
b=100,
t=50
)
)
trace1 = go.Scatter(
x=x, y=y, mode='markers', name='points',
marker=dict(color='rgb(102,0,0)', size=2, opacity=0.4)
)
trace2 = go.Histogram2dContour(
x=x, y=y, name='density', ncontours=20,
colorscale='Hot', reversescale=True, showscale=False
)
trace3 = go.Histogram(
x=x, name='x density',
marker=dict(color='rgb(102,0,0)'),
yaxis='y2'
)
trace4 = go.Histogram(
y=y, name='y density', marker=dict(color='rgb(102,100,200)'),
xaxis='x2'
)
data = [trace1, trace2, trace3, trace4]
layout = go.Layout(
showlegend=False,
autosize=False,
xaxis=dict(
domain=[0, 0.85],
showgrid=False,
zeroline=False, title = x_title
),
yaxis=dict(
domain=[0, 0.85],
showgrid=False,
zeroline=False, title = y_title
),
margin=dict(
t=50
),
hovermode='closest',
bargap=0,
xaxis2=dict(
domain=[0.85, 1],
showgrid=False,
zeroline=False
),
yaxis2=dict(
domain=[0.85, 1],
showgrid=False,
zeroline=False
)
)
fig = go.Figure(data=data, layout=layout)
fig.update(layout = layout1)
fig['layout'].update(images= [dict(
source= "image/0016_Blue_horizon.svg",
xref= "paper",
yref= "paper", xanchor="left", yanchor="bottom",
x= 0,
y= 0,
sizex= 0.1,
sizey= 0.1,
sizing= "stretch",
opacity= 0.5,
layer= "above")])
iplot(fig, show_link=False, config={'modeBarButtonsToRemove': ['sendDataToCloud'], 'showLink': False, 'displaylogo' : False})
#plot(fig, filename='network_predic.html', show_link=False, config={'modeBarButtonsToRemove': ['sendDataToCloud'], 'showLink': False, 'displaylogo' : False}) | 5,332,144 |
def run_query(run_id, athena_client, query, athena_database_name, wait_to_finish):
""" Run the given Athena query
Arguments:
run_id {string} -- run_id for the current Step Function execution
athena_client {boto3.client} -- Boto3 Athena client
query {string} -- Athena query to execute
athena_database_name {string} -- Athena database to use for query execution
wait_to_finish {boolean} -- Should method wait for the Athena query to finish?
Raises:
utility.S3InsightsException: when Athena query fails
Returns:
string -- Athena execution id
"""
output_location = {
'OutputLocation': 's3://{0}/{1}'.format(
config.DeploymentDetails.consolidated_inventory_bucket_name,
get_s3_output_location_prefix(run_id)),
}
if athena_database_name is not None:
query_response = athena_client.start_query_execution(
QueryString=query,
QueryExecutionContext={
'Database': athena_database_name
},
ResultConfiguration=output_location)
else:
query_response = athena_client.start_query_execution(
QueryString=query,
ResultConfiguration=output_location)
execution_id = query_response['QueryExecutionId']
if wait_to_finish:
for attempt_count in range(1, 10):
query_status = athena_client.get_query_execution(
QueryExecutionId=execution_id)
query_execution_status = query_status['QueryExecution']['Status']['State']
if utility.compare_strings(query_execution_status, 'succeeded'):
break
elif utility.compare_strings(query_execution_status, 'failed'):
raise utility.S3InsightsException('Athena query failed for unknown reasons')
time.sleep(30)
return execution_id | 5,332,145 |
def next_version(v: str) -> str:
"""
If ``v`` is a prerelease version, returns the base version. Otherwise,
returns the next minor version after the base version.
"""
vobj = Version(v)
if vobj.is_prerelease:
return str(vobj.base_version)
vs = list(vobj.release)
vs[1] += 1
vs[2:] = [0] * len(vs[2:])
s = ".".join(map(str, vs))
if vobj.epoch:
s = f"{vobj.epoch}!{s}"
return s | 5,332,146 |
def sample_translate_text(text, target_language, project_id):
"""
Translating Text
Args:
text The content to translate in string format
target_language Required. The BCP-47 language code to use for translation.
"""
client = translate.TranslationServiceClient()
# TODO(developer): Uncomment and set the following variables
# text = 'Text you wish to translate'
# target_language = 'fr'
# project_id = '[Google Cloud Project ID]'
contents = [text]
parent = client.location_path(project_id, "global")
response = client.translate_text(
parent=parent,
contents=contents,
mime_type='text/plain', # mime types: text/plain, text/html
source_language_code='vi',
target_language_code=target_language)
# Display the translation for each input text provided
for translation in response.translations:
print(u"Translated text: {}".format(translation.translated_text))
return response | 5,332,147 |
def temp_obs():
"""Return a list of tobs from the 2016-08-24 to 2017-08-23"""
# Query temperature data with date
temp_query = session.query(Measurement.date, Measurement.tobs).filter(Measurement.date > year_start_date).all()
# Create a dictionary from the row data and append to a list of all temperature data
temp_data = []
for date, tobs in temp_query:
temp_dict = {}
temp_dict["Date"] = date
temp_dict["Temperature"] = tobs
temp_data.append(temp_dict)
# Return data in json format
return jsonify(temp_data) | 5,332,148 |
def pcmh_5_5c__3():
"""ER/IP discharge log"""
er_ip_log_url = URL('init', 'word', 'er_ip_log.doc',
vars=dict(**request.get_vars),
hmac_key=MY_KEY, salt=session.MY_SALT, hash_vars=["app_id", "type"])
# er_ip_log tracking chart
er_ip_log = MultiQNA(
1, float('inf'), True,
'er_ip_log',
"Please fill out <a href='{url}'>this ER/IP log</a> with at least 4 months of past data. Continue to maintain "
"this log permanently as part of your PCMH transformation. <b>Please make sure all the patients in this log "
"have their discharge summary in their patient record!</b>"
.format(url=er_ip_log_url)
)
er_ip_log.set_template("{choose_file}")
return dict(documents=[
dict(
description="Emergency Room / Inpatient Tracking Log",
url=er_ip_log_url,
permissions=["IS_TEAM"]
),
]) | 5,332,149 |
def prettify_users(users: List[Dict]): # -> List[Dict]:
"""
Modify list of users in place.
Make 'money' int, remove 'num'
"""
for user in users:
user['money'] = int(user['money'])
del user['num'] | 5,332,150 |
def get_largest_component_size(component_size_distribution):
"""Finds the largest component in the given component size distribution.
Parameters
----------
component_size_distribution : dict
The component size distribution. See the function get_component_size_dist
Returns
-------
The largest component size : int
"""
# YOUR CODE HERE
#TODO: Implement this function.
return max(component_size_distribution.keys()) | 5,332,151 |
def bin_regions_parallel(
bed_files,
out_dir,
chromsizes,
bin_size=200,
stride=50,
final_length=1000,
parallel=12):
"""bin in parallel
"""
split_queue = setup_multiprocessing_queue()
for bed_file in bed_files:
prefix = os.path.basename(bed_file).split(".narrowPeak")[0].split(".bed")[0]
split_args = [
bed_file,
"{}/{}".format(out_dir, prefix),
bin_size,
stride,
final_length,
chromsizes,
"naive"]
split_queue.put([bin_regions_sharded, split_args])
# run the queue
run_in_parallel(split_queue, parallel=parallel, wait=True)
return None | 5,332,152 |
def is_jsonable(data):
"""
Check is the data can be serialized
Source: https://stackoverflow.com/a/53112659/8957978
"""
try:
json.dumps(data)
return True
except (TypeError, OverflowError):
return False | 5,332,153 |
def handle_jumbo_message(conn, message):
"""Handle a jumbo message received.
"""
payload = message.payload.message
# TODO: this cuts out all sender and receiver info -- ADD SENDER GID
logger.info(f"Received jumbo message fragment")
prefix, seq, length, msg = payload.split("/")
# if a jumbo monitor thread is not running, start one
if conn.jumbo_thread.is_alive():
pass
else:
conn.events.jumbo_len = length
conn.jumbo_thread = None
conn.jumbo_thread = threading.Thread(
target=monitor_jumbo_msgs, daemon=True, args=[conn]
)
conn.jumbo_thread.start()
# add the message to the events.jumbo queue
conn.events.jumbo.append(payload)
return | 5,332,154 |
def check_ref_type(ref, allowed_types, ws_url):
"""
Validates the object type of ref against the list of allowed types. If it passes, this
returns True, otherwise False.
Really, all this does is verify that at least one of the strings in allowed_types is
a substring of the ref object type name.
Ex1:
ref = "KBaseGenomes.Genome-4.0"
allowed_types = ["assembly", "KBaseFile.Assembly"]
returns False
Ex2:
ref = "KBaseGenomes.Genome-4.0"
allowed_types = ["assembly", "genome"]
returns True
"""
obj_type = get_object_type(ref, ws_url).lower()
for t in allowed_types:
if t.lower() in obj_type:
return True
return False | 5,332,155 |
def check_infractions(
infractions: Dict[str, List[str]],
) -> int:
"""
Check infractions.
:param infractions: the infractions dict {commit sha, infraction explanation}
:return: 0 if no infractions, non-zero otherwise
"""
if len(infractions) > 0:
logger.print('Missing sign-off(s):')
logger.print()
for commit_sha, commit_infractions in infractions.items():
logger.print('\t' + commit_sha)
for commit_infraction in commit_infractions:
logger.print('\t\t' + commit_infraction)
return 1
logger.print('All good!')
return 0 | 5,332,156 |
def _reduction_range_bars(y, ylow, yhigh, sitecol, data=None, **kwargs):
""" Draws upper/lower bound bars on `reduction_plots` """
ax = plt.gca()
offset_map = {
'ED-1': 0.0,
'LV-2': -0.2,
'LV-4': +0.2
}
data = (
data.assign(offset=data[sitecol].map(offset_map))
.assign(err_low=data[y] - data[ylow])
.assign(err_high=data[yhigh] - data[y])
)
value_selection = None if data[sitecol].unique().shape[0] > 1 else 0
data['x'] = (
data.groupby(sitecol)
.apply(lambda g: np.arange(g['offset'].size) + g['offset'])
.values
.flatten()
)
ax.errorbar(x=data['x'], y=data[y], yerr=data[['err_low', 'err_high']].values.T, ls='none', fmt='k') | 5,332,157 |
def get_particle_tracks(_particle_tracker, particle_detects, time, is_last, debug_axis=None, metrics=None):
"""This module constructs the particle tracks.
WARNING: No examples for Finishing. No error warning.
Parameters
----------
_particle_tracker: dict
A particle tracker dictionary
particle_detects: list
A list of detected particle positions.
time: int
Current time.
is_last: bool
Last frame. Boolean.
debug_axis: ax
Matplotlib axis for debug plotting. Defaults to None.
metrics: dict
output parameter for track filtering metrics
Returns
-------
(A list of track dictionaries, Updated particle tracker dictionary)
"""
if not metrics:
metrics = {}
# Projecting the particle in the 'Current_Tracks' list.
projection = np.zeros(2)
projected_points = []
for track in _particle_tracker['Current_Tracks']:
projection = project(track['Particles_Estimated_Position'][-1],
track['Particles_Estimated_Velocity'][-1],
track['Particles_Estimated_Acceleration'][-1],
_particle_tracker['Use_Acceleration'],
delta_t=1.0)
projection_copy = projection.copy() # This prevents duplication in Projection_List
projected_points.append(projection_copy)
# NOTE: DEBUG plot projected particles
if len(projected_points) and debug_axis is not None:
projected_points = np.array(projected_points)
debug_axis.scatter(projected_points[:,1], projected_points[:,0], marker='o', s=2, color='cyan', label='path projections')
assignments = defaultdict(list)
assignment_dists = defaultdict(list)
unassigned = []
# If no detection, set it to empty list
if not particle_detects:
particle_detects = []
# Find the distances from the detections to projections and assign to most probable track
if len(projected_points) == 0 or len(particle_detects) == 0:
unassigned = particle_detects
else:
pairwise_distances = distance_matrix(particle_detects, projected_points)
for index, particle in enumerate(particle_detects):
# Get associated track index
assign_index = np.argmin(pairwise_distances[index])
# Get assignment distance
min_assign_dist = np.min(pairwise_distances[index])
# Get length of associated track
curr_track_len = len(_particle_tracker['Current_Tracks'][assign_index]['Particles_Position'])
if curr_track_len == 1 and min_assign_dist > _particle_tracker['Max_Init_Dist']:
# Second point in a track, but too far from first point
unassigned.append(particle)
elif curr_track_len > 1 and min_assign_dist > _particle_tracker['Max_Assignment_Dist']:
# Future points in a track, but too far from projection
unassigned.append(particle)
else:
# Detection is close enough to be associated to track
assignments[assign_index].append(particle)
if not _particle_tracker['Allow_Cluster_Aggregation']:
min_dist = pairwise_distances[index][assign_index]
assignment_dists[assign_index].append(min_dist)
finished_tracks = []
current_tracks = []
# Updating the tracks by aggregation
for i, _ in enumerate(_particle_tracker['Current_Tracks']):
positions = np.array(assignments[i])
aggregated_position = None
if len(positions) > 0:
if _particle_tracker['Allow_Cluster_Aggregation']:
aggregated_position = aggregate(positions)
else:
best_index = np.argmin(assignment_dists[i])
aggregated_position = positions[best_index]
track = _particle_tracker['Current_Tracks'][i]
finished = True
if (aggregated_position is not None
or track['Projected_Frames'] < _particle_tracker['Max_Projected_Frames']):
finished = False
track = update(time, aggregated_position, track, _particle_tracker['Use_Acceleration'])
current_tracks.append(track)
if is_last or finished:
track_length = sum(p is not None for p in track['Particles_Position'])
if track_length >= _particle_tracker['Min_Track_Obs']:
finished_track = finish(track, _particle_tracker['Last_ID'] + 1)
_particle_tracker['Last_ID'] = _particle_tracker['Last_ID'] + 1
finished_tracks.append(finished_track)
# Adding new tracks from the Unassigned list to the Current_Tracks
for particle in unassigned:
_particle_tracker['Temp_ID'] += 1
track = particle_track(time,
particle,
VELOCITY,
ACCELERATION,
_particle_tracker['Temp_ID'])
current_tracks.append(track)
if 'tracks_started' not in metrics:
metrics['tracks_started'] = 0
metrics['tracks_started'] += 1
_particle_tracker['Current_Tracks'] = current_tracks
return finished_tracks, _particle_tracker | 5,332,158 |
def handle_removed_term(event):
"""Un-index term into inner catalog"""
parent = event.oldParent
if IThesaurusTermsContainer.providedBy(parent): # pylint: disable=no-value-for-parameter
thesaurus = parent.__parent__
if IThesaurus.providedBy(thesaurus): # pylint: disable=no-value-for-parameter
unindex_object(event.object, thesaurus.catalog) | 5,332,159 |
def test_server_fixture(
loop,
aiohttp_client,
):
"""A pytest fixture which yields a test server to be used by tests.
Args:
loop (Event loop): The built-in event loop provided by pytest.
aiohttp_client (aiohttp_client): Built-in pytest fixture used as a wrapper to the aiohttp web server.
Yields:
aiohttp_client : A aiohttp_client server used by tests.
"""
with FakeServer(loop, aiohttp_client) as test_server:
yield test_server | 5,332,160 |
def default_role(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
"""Set the default interpreted text role."""
if not arguments:
if roles._roles.has_key(''):
# restore the "default" default role
del roles._roles['']
return []
role_name = arguments[0]
role, messages = roles.role(
role_name, state_machine.language, lineno, state.reporter)
if role is None:
error = state.reporter.error(
'Unknown interpreted text role "%s".' % role_name,
nodes.literal_block(block_text, block_text), line=lineno)
return messages + [error]
roles._roles[''] = role
# @@@ should this be local to the document, not the parser?
return messages | 5,332,161 |
def init(self):
"""Initialize the Instrument object with instrument specific values."""
self.acknowledgements = " ".join(("This work uses the SAMI2 ionosphere",
"model written and developed by the",
"Naval Research Laboratory."))
self.references = " ".join(("Huba, J.D., G. Joyce, and J.A. Fedder,",
"Sami2 is Another Model of the Ionosphere",
"(SAMI2): A new low‐latitude ionosphere",
"model, J. Geophys. Res., 105, Pages",
"23035-23053,",
"https://doi.org/10.1029/2000JA000035,",
"2000.\n",
"Klenzing, J., Jonathon Smith, Michael",
"Hirsch, & Angeline G. Burrell. (2020,",
"July 17). sami2py/sami2py: Version 0.2.2",
"(Version v0.2.2). Zenodo.",
"http://doi.org/10.5281/zenodo.3950564"))
logger.info(self.acknowledgements)
return | 5,332,162 |
def get_zind_json(server_token, output_folder) -> Dict:
"""
Returns the dict for the ZInD json.
Sends a request to the BridgeAPI to get details about the ZInD Dataset
Stores the respose json file in output folder
:param server_token: token for access to the API
:param output_folder: path to store response
:return: ZInD Dict
"""
dest_path = os.path.join(output_folder, "zind_response.json")
result_dict = {}
value_key = "value"
if os.path.exists(dest_path):
logger.info(f"Loading ZInD json from {dest_path}")
try:
result_dict = json.load(open(dest_path))
logger.info("Loaded ZInD json successfully")
except Exception as e:
logger.info(f"ZInD json invalid, re-downloading file: {e}")
zind_url = BRIDGE_API_URL
bearer_token = f"Bearer {server_token}"
payload = {}
headers = {"Authorization": bearer_token}
for retry_count in (1, MAX_NUM_RETRIES + 1):
if value_key in result_dict:
break
logger.info(
f"Retrieving ZInD json (attempt {retry_count} out of {MAX_NUM_RETRIES})"
)
result_dict = download_json_in_chunks(zind_url, headers, payload, dest_path)
logger.info("Downloaded ZInD json successfully")
else:
logger.error(
"Could not download ZInD json, please check your credentials and internet connection"
)
return None
return result_dict[value_key] | 5,332,163 |
def input_fn(evaluate=False) -> tf.data.Dataset:
"""
Returns the text as char array
Args:
n_repetitions: Number of times to repeat the inputs
"""
# The dataset
g = ( evaluate_generator if evaluate else train_generator )
ds = tf.data.Dataset.from_generator( generator=g,
output_types=( { 'character' : tf.string } , tf.string ),
output_shapes=( { 'character' : (SEQUENCE_LENGHT,) } , () )
)
ds = ds.batch(64)
ds = ds.prefetch(1)
return ds | 5,332,164 |
def battle(players):
"""
Турнир - персонажи делятся на две команды и соревнуются.
"""
count_players = 10
winner_players = []
# перемешиваем участников и потом берём попарно
shuffle(players)
command1 = players[0:5]
command2 = players[5:10]
for i in range(5):
winner = duel(command1[i], command2[i])
winner_players.append(winner)
list_winners = ', '.join(winner_players)
print(f'{Fore.YELLOW}{Back.BLUE}Победители турнира: {list_winners}{Style.RESET_ALL}') | 5,332,165 |
def make_model_path(model_base_path: Text, model_name: Text,
version: int) -> Text:
"""Make a TFS-flavored model path.
Args:
model_base_path: A base path containing the directory of model_name.
model_name: A name of the model.
version: An integer version of the model.
Returns:
`{model_base_path}/{model_name}/{version}`.
"""
return os.path.join(model_base_path, model_name, str(version)) | 5,332,166 |
async def validate_input(data):
"""Validate the user input allows us to connect.
Data has the keys from DATA_SCHEMA with values provided by the user.
"""
harmony = await get_harmony_client_if_available(data[CONF_HOST])
if not harmony:
raise CannotConnect
return {
CONF_NAME: find_best_name_for_remote(data, harmony),
CONF_HOST: data[CONF_HOST],
UNIQUE_ID: find_unique_id_for_remote(harmony),
} | 5,332,167 |
def to_wiggle_pairs(filein, fileout, region_string, endcrop=False):
""" Constructs fragment pile-ups in wiggle format using paired-end information
:param filein: BAM file that contains paired-end reads
:param fileout: base output file name with extension (.wig) omitted
:param region_string: region of interest, formatted like this example: chr7:5527160-5532160
:param endcrop: if True, don't count the first and last read of a fragment - the output for each
position is effectively a safe underestimate for the number of spanning reads
"""
[chr, sta, end] = re.split('[:-]', region_string)
sta = int(sta)
end = int(end)
wlist = [0] * (end-sta+1)
wig = open(fileout + ".wig", "w")
wig.write("variableStep\tchrom=%s\n" % chr)
bam = pysam.AlignmentFile(filein, 'rb')
for read1, read2 in read_pair_generator(bam, region_string):
read = read_pair_align(read1, read2)
if not read:
continue
if endcrop:
wlist = [x+1 if read[0]-sta < i < read[-1]-sta else x for i, x in enumerate(wlist)]
else:
wlist = [x+1 if read[0]-sta <= i <= read[-1]-sta else x for i, x in enumerate(wlist)]
for i, x in enumerate(wlist):
wig.write("%i\t%i\n" % (sta + i, x))
wig.close()
bam.close() | 5,332,168 |
def window_tukey(M, alpha=0.5):
"""Return a Tukey window, also known as a tapered cosine window.
The function returns a Hann window for `alpha=0` and a boxcar window for `alpha=1`
"""
if alpha == 0:
return numpy.hann(M)
elif alpha == 1:
return window_boxcar(M)
n = numpy.arange(0, M)
width = int(numpy.floor(alpha * (M - 1) / 2.0))
n1 = n[0:width + 1]
n2 = n[width + 1:M - width - 1]
n3 = n[M - width - 1:]
w1 = 0.5 * (1 + numpy.cos(numpy.pi * (-1 + 2.0 * n1 / alpha / (M - 1))))
w2 = numpy.ones(n2.shape)
w3 = 0.5 * (1 + numpy.cos(numpy.pi * (-2.0 / alpha + 1 + 2.0 * n3 / alpha / (M - 1))))
return numpy.concatenate((w1, w2, w3)) | 5,332,169 |
def workshopsDF(symbol="", **kwargs):
"""This is a meeting or series of meetings at which a group of people engage in discussion and activity on a particular subject, product or service to gain hands-on experience.
https://iexcloud.io/docs/api/#workshops
Args:
symbol (str): symbol to use
"""
return _baseDF(id="PREMIUM_WALLSTREETHORIZON_WORKSHOP", symbol=symbol, **kwargs) | 5,332,170 |
def free_path(temp, diff, m_mol):
"""
Calculates the free path for a molecule
Based on free_path.m by Joni Kalliokoski 2014-08-13
:param temp: temperature (K)
:param diff: diffusion coefficient (m^2/s)
:param m_mol: molar mass (kg/mol)
:return: free path (m)
"""
return 3*diff*np.sqrt((np.pi*m_mol)/(8*gas_const*temp)) | 5,332,171 |
def comp_periodicity_spatial(self):
"""Compute the (anti)-periodicities of the machine in space domain
Parameters
----------
self : Machine
A Machine object
Returns
-------
pera : int
Number of spatial periodicities of the machine over 2*pi
is_apera : bool
True if an anti-periodicity is possible after the periodicities
"""
p = self.get_pole_pair_number()
# Get stator (anti)-periodicity in spatial domain
pera_s, is_antipera_s = self.stator.comp_periodicity_spatial()
# Get rotor (anti)-periodicities in spatial domain
pera_r, is_antipera_r = self.rotor.comp_periodicity_spatial()
# Get machine spatial periodicity
pera = int(gcd(gcd(pera_s, pera_r), p))
# Get machine time and spatial anti-periodicities
is_apera = bool(is_antipera_s and is_antipera_r)
return pera, is_apera | 5,332,172 |
def download(date_array, tag, sat_id, data_path, user=None, password=None):
"""Routine to download F107 index data
Parameters
-----------
tag : (string or NoneType)
Denotes type of file to load. Accepted types are '' and 'forecast'.
(default=None)
sat_id : (string or NoneType)
Specifies the satellite ID for a constellation. Not used.
(default=None)
data_path : (string or NoneType)
Path to data directory. If None is specified, the value previously
set in Instrument.files.data_path is used. (default=None)
Returns
--------
Void : (NoneType)
data downloaded to disk, if available.
Note
----
Called by pysat. Not intended for direct use by user.
Warnings
--------
Only able to download current forecast data, not archived forecasts.
"""
# download standard F107 data
if tag == '':
# download from LASP, by month
import requests
import json
for date in date_array:
# modify date to be the start of the month
if date.day != 1:
raise ValueError('The Download routine must be invoked with ' +
'a freq="MS" option.')
# download webpage
dstr = 'http://lasp.colorado.edu/lisird/latis/'
dstr += 'noaa_radio_flux.json?time%3E='
dstr += date.strftime('%Y-%m-%d')
dstr += 'T00:00:00.000Z&time%3C='
dstr += (date + pds.DateOffset(months=1) -
pds.DateOffset(days=1)).strftime('%Y-%m-%d')
dstr += 'T00:00:00.000Z'
# data returned as json
r = requests.get(dstr)
# process
raw_dict = json.loads(r.text)['noaa_radio_flux']
data = pds.DataFrame.from_dict(raw_dict['samples'])
times = [pysat.datetime.strptime(time, '%Y %m %d')
for time in data.pop('time')]
data.index = times
# replace fill with NaNs
idx, = np.where(data['f107'] == -99999.0)
data.iloc[idx,:] = np.nan
# create file
data.to_csv(os.path.join(data_path, 'f107_monthly_' +
date.strftime('%Y-%m') + '.txt'))
elif tag == 'all':
# download from LASP, by year
import requests
import json
# download webpage
dstr = 'http://lasp.colorado.edu/lisird/latis/'
dstr += 'noaa_radio_flux.json?time%3E='
dstr += pysat.datetime(1947, 2, 13).strftime('%Y-%m-%d')
dstr += 'T00:00:00.000Z&time%3C='
now = pysat.datetime.utcnow()
dstr += now.strftime('%Y-%m-%dT%H:%M:%S.000Z')
# data returned as json
r = requests.get(dstr)
# process
raw_dict = json.loads(r.text)['noaa_radio_flux']
data = pds.DataFrame.from_dict(raw_dict['samples'])
times = [pysat.datetime.strptime(time, '%Y %m %d')
for time in data.pop('time')]
data.index = times
# replace fill with NaNs
idx, = np.where(data['f107'] == -99999.0)
data.iloc[idx,:] = np.nan
# create file
data.to_csv(os.path.join(data_path, 'f107_1947_to_' +
now.strftime('%Y-%m-%d') + '.txt'))
elif tag == 'forecast':
import requests
print('This routine can only download the current forecast, not ' +
'archived forecasts')
# download webpage
furl = 'https://services.swpc.noaa.gov/text/' + \
'3-day-solar-geomag-predictions.txt'
r = requests.get(furl)
# parse text to get the date the prediction was generated
date_str = r.text.split(':Issued: ')[-1].split(' UTC')[0]
date = pysat.datetime.strptime(date_str, '%Y %b %d %H%M')
# get starting date of the forecasts
raw_data = r.text.split(':Prediction_dates:')[-1]
forecast_date = pysat.datetime.strptime(raw_data[3:14], '%Y %b %d')
# times for output data
times = pds.date_range(forecast_date, periods=3, freq='1D')
# string data is the forecast value for the next three days
raw_data = r.text.split('10cm_flux:')[-1]
raw_data = raw_data.split('\n')[1]
val1 = int(raw_data[24:27])
val2 = int(raw_data[38:41])
val3 = int(raw_data[52:])
# put data into nicer DataFrame
data = pds.DataFrame([val1, val2, val3], index=times, columns=['f107'])
# write out as a file
data.to_csv(os.path.join(data_path, 'f107_forecast_' +
date.strftime('%Y-%m-%d') + '.txt'))
elif tag == '45day':
import requests
print('This routine can only download the current forecast, not ' +
'archived forecasts')
# download webpage
furl = 'https://services.swpc.noaa.gov/text/45-day-ap-forecast.txt'
r = requests.get(furl)
# parse text to get the date the prediction was generated
date_str = r.text.split(':Issued: ')[-1].split(' UTC')[0]
date = pysat.datetime.strptime(date_str, '%Y %b %d %H%M')
# get to the forecast data
raw_data = r.text.split('45-DAY AP FORECAST')[-1]
# grab AP part
raw_ap = raw_data.split('45-DAY F10.7 CM FLUX FORECAST')[0]
# clean up
raw_ap = raw_ap.split('\n')[1:-1]
# f107
raw_f107 = raw_data.split('45-DAY F10.7 CM FLUX FORECAST')[-1]
# clean up
raw_f107 = raw_f107.split('\n')[1:-4]
# parse the AP data
ap_times = []
ap = []
for line in raw_ap:
for i in np.arange(5):
ap_times.append(pysat.datetime.strptime(line[0:7], '%d%b%y'))
ap.append(int(line[8:11]))
f107 = []
f107_times = []
for line in raw_f107:
for i in np.arange(5):
f107_times.append(pysat.datetime.strptime(line[0:7], '%d%b%y'))
f107.append(int(line[8:11]))
# collect into DataFrame
data = pds.DataFrame(f107, index=f107_times, columns=['f107'])
data['ap'] = ap
# write out as a file
data.to_csv(os.path.join(data_path, 'f107_45day_' +
date.strftime('%Y-%m-%d') + '.txt'))
return | 5,332,173 |
def shuffle_tensor(input):
"""
Returns a new tensor whose elements correspond to a randomly shuffled version of the the elements of the input.
Args:
input (`torch.Tensor`): input tensor.
Returns:
(`torch.Tensor`): output tensor.
"""
return input[torch.randperm(input.nelement())] | 5,332,174 |
def relaunch_failed_jobs(tasks, spec_file, verbose=False):
""" Relaunch jobs that are failed from the given list """
job_cnts = 0 # number of newly launched jobs
for i, task in enumerate(tasks):
job_id = str(task[-1]) # the last entry
# Try to launch until succeed
while True:
p = relaunch_failed_job(job_id, spec_file)
if p is None: # NOTE: when the job is not failed
break
if verbose:
print("==> Re-launching failed task: {} ...".format(task))
new_id = get_job_id(p)
if new_id is not None:
break
# If a new process is launched
if p is not None:
tasks[i][-1] = new_id
job_cnts += 1
return job_cnts | 5,332,175 |
def intensityTriWave(coeff,L,ang):
"""Simulate the intensity observed a distance L from
the grating. Standard Zernike coefficients, L, and
the diffraction angle ang are used as input.
"""
k = 2*np.pi/405.e-6 #blue wavevector
x,y = np.meshgrid(np.linspace(-1.1,1.1,1000),np.linspace(-1.1,1.1,1000))
m = np.sin(ang)
coeff = np.array(coeff).astype('float')
coeff = np.tile(coeff,(3,1))
coeff[0][2] = -m/2.
coeff[1][1] = m/2.*np.sqrt(3)/2
coeff[1][2] = m/4
coeff[2][1] = -m/2.*np.sqrt(3)/2
coeff[2][2] = m/4
#Construct three phases
phi1 = zern.zernsurf(x,y-m*L,0.,0.,1.,coeff[0])
phi2 = zern.zernsurf(x-m*L*np.sqrt(3)/2,y+m*L/2,0,0,1,coeff[1])
phi3 = zern.zernsurf(x+m*L*np.sqrt(3)/2,y+m*L/2,0,0,1,coeff[2])
#Transform into complex exponentials and combine
i = np.abs(np.exp(1j*phi1*k)+np.exp(1j*phi2*k)+np.exp(1j*phi3*k))**2
return phi1,phi2,phi3,i | 5,332,176 |
def do_auth_code_grant(fqdn, force_login=False, identity=None):
"""Perform an Oauth2 authorization grant consent flow."""
code_verifier, code_challenge = _gen_code()
scope = (SCOPE_FORMAT.format(fqdn=fqdn))
host = GLOBUS_AUTH_HOST
creds = _lookup_credentials()
params = {
'redirect_uri' : 'https://' + host + '/v2/web/auth-code',
'client_id' : creds['client'],
'access_type' : 'offline',
'state' : '_default',
'code_challenge' : code_challenge,
'code_challenge_method': 'S256',
'response_type' : 'code',
'scope' : scope
}
if identity is not None:
params['session_message'] = 'The SSH service requires that you authenticate using this identity:'
params['session_required_identities'] = str(identity)
if force_login is True:
params['prompt'] = 'login'
url = "https://" + host + '/v2/oauth2/authorize?' + urlencode(params)
print('Please go to this URL and login: {0}'.format(url))
auth_code = raw_input(
'Please enter the code you get after login here: ').strip()
body = {
'code' : auth_code,
'code_verifier': code_verifier,
'redirect_uri' : 'https://' + host + '/v2/web/auth-code',
'grant_type' : 'authorization_code'
}
r = _authenticated_request(requests.post, '/v2/oauth2/token', data = body)
return Token(authorized_at=int(time.time()), **ast.literal_eval(r.text)) | 5,332,177 |
def decode(serialized: str) -> Node:
"""Decode JSON as a `Node`"""
node = json.loads(serialized)
return dict_decode(node) if isinstance(node, dict) else node | 5,332,178 |
def validate_duration_unit(recv_duration_unit):
"""Decapitalize and check in units_list"""
units_list = DaysAndUnitsList.units_list
recv_duration_unit = recv_duration_unit.lower()
if recv_duration_unit in units_list:
return True
else:
return False | 5,332,179 |
def make_word_list1():
"""Reads lines from a file and builds a list using append."""
t = []
fin = open('words.txt')
for line in fin:
word = line.strip()
t.append(word)
return t | 5,332,180 |
def pytest_configure(config):
"""
Loads the test context since we are no longer using run.py
"""
# Monkey patch ssl so we do not verify ssl certs
import ssl
try:
_create_unverified_https_context = ssl._create_unverified_context
except AttributeError:
# Legacy Python that doesn't verify HTTPS certificates by default
pass
else:
# Handle target environment that doesn't support HTTPS verification
ssl._create_default_https_context = _create_unverified_https_context
url = config.getoption("url", default="http://localhost:9000/")
if not url.endswith('/'):
url += '/'
import sys
sys.dont_write_bytecode = True
VinylDNSTestContext.configure(config.getoption("dns_ip"),
config.getoption("dns_zone"),
config.getoption("dns_key_name"),
config.getoption("dns_key"),
config.getoption("url"),
config.getoption("teardown"))
from shared_zone_test_context import SharedZoneTestContext
if not hasattr(config, 'workerinput'):
print 'Master, standing up the test fixture...'
# use the fixture file if it exists
if os.path.isfile('tmp.out'):
print 'Fixture file found, assuming the fixture file'
SharedZoneTestContext('tmp.out')
else:
print 'No fixture file found, loading a new test fixture'
ctx = SharedZoneTestContext()
ctx.out_fixture_file("tmp.out")
else:
print 'This is a worker' | 5,332,181 |
def create_app(environment: str = None):
"""Create the Flask application.
Returns:
obj: The configured Flask application context.
"""
app = Flask(__name__)
if environment is None:
app.config.from_object(ConfigurationFactory.from_env())
else:
app.config.from_object(ConfigurationFactory.get_config(environment))
app.config.update(
SQLALCHEMY_DATABASE_URI=ConfigurationFactory.get_config(
environment).sqlalchemy_database_uri,
OAUTH2_TOKEN_EXPIRES_IN={
'authorization_code': 864000,
'implicit': 3600,
'password': 864000,
'client_credentials': 60 * 5
},
SECRET_KEY=ConfigurationFactory.generate_secret_key()
)
is_testing = environment == 'TESTING'
logging.basicConfig(format='%(message)s', level=logging.INFO)
@app.after_request
def after_request(response):
""" Logging every request. """
if not is_testing:
jsonstr = json.dumps({
"remote_addr": request.remote_addr,
"request_time": str(dt.utcnow()),
"method": request.method,
"path": request.path,
"scheme": request.scheme.upper(),
"statusCode": response.status_code,
"status": response.status,
"content_length": response.content_length,
"user_agent": str(request.user_agent)
})
logging.info(jsonstr)
return response
def handle_errors(e):
logging.info(f"""{e}, app.py, line 83""")
response_body = ResponseBody()
if isinstance(e, ValidationError):
return response_body.custom_response(status="Error", messages=[e.messages])
elif isinstance(e, RecordNotFoundError):
return response_body.not_found_response(e.record_id)
else:
try:
error_code = str(e).split(':')[0][:3].strip()
error_text = str(e).split(':')[0][3:].strip()
if isinstance(error_code, int):
return response_body.custom_response(code=error_code, messages={'error': error_text})
else:
raise Exception
except Exception as e:
return response_body.exception_response(str(e))
if not is_testing:
apm_enabled = bool(int(os.getenv('APM_ENABLED', '0')))
if apm_enabled:
app.config['ELASTIC_APM'] = {
'SERVICE_NAME': 'authserver',
'SECRET_TOKEN': os.getenv('APM_TOKEN', ''),
'SERVER_URL': os.getenv('APM_HOSTNAME', ''),
}
apm = ElasticAPM(app)
db.init_app(app)
config_oauth(app)
CORS(app)
migrate = Migrate(app, db)
app.register_blueprint(home_bp)
app.register_blueprint(health_api_bp)
app.register_blueprint(user_bp)
app.register_blueprint(client_bp)
app.register_blueprint(oauth2_bp)
app.register_blueprint(role_bp)
app.register_blueprint(scope_bp)
app.register_blueprint(password_recovery_bp)
app.register_error_handler(Exception, handle_errors)
app.teardown_appcontext(teardown_appcontext)
FlaskInjector(app=app, modules=[
ConfigurationModule, GraphDatabaseModule, MailServiceModule])
return app | 5,332,182 |
def metropolis(data, likelihood, priors, samples=1000, par_init=None,
width_prop=.5):
"""
Returns the posterior function of the parameters given the likelihood and
the prior functions. Returns also the number of the accepted jumps in the
Metropolis-Hastings algorithm.
Notes:
- <width_prop> should be chosen so to result in about 50% accepted jumps.
- <posterior> has shape (samples, n_par).
- priors must be from function "prior_dist".
- for numerical stability the computation is carried out using logarithms.
"""
# Current parameters
n_par = len(priors)
par_curr = np.zeros(n_par) if (par_init is None) else np.asarray(par_init)
# Init quantities
jumps = 0
par_prop = np.zeros(n_par)
posterior = np.zeros((samples, n_par))
posterior[0, :] = par_curr
# Current priors
bb = 0.0
for i in range(n_par):
bb += np.log(prior_dist(priors[i], par_curr[i]))
prior_curr = np.exp(bb)
# Current likelihood
bb = np.log(likelihood(data, par_curr)).sum()
likelihood_curr = np.exp(bb)
# Current posterior probability
p_curr = likelihood_curr * prior_curr
# Loop <samples> times
for sample in range(samples):
# Randomnly pick the proposed parameters
for i in range(n_par):
par_prop[i] = stats.norm(par_curr[i], width_prop).rvs()
# Evaluate priors with the proposed parameters
bb = 0.0
for i in range(n_par):
bb += np.log(prior_dist(priors[i], par_prop[i]))
prior_prop = np.exp(bb)
# Evaluate likelihood with the proposed parameters
bb = np.log(likelihood(data, par_prop)).sum()
likelihood_prop = np.exp(bb)
# Proposed posterior probability
p_prop = likelihood_prop * prior_prop
# Randomly accept or reject the jump
p_accept = p_prop / p_curr
if ((np.random.uniform() < p_accept)):
# Update quantities if jump accepted
jumps += 1
par_curr = par_prop.copy()
prior_curr = prior_prop
likelihood_curr = likelihood_prop
p_curr = p_prop
# Save (accepted and rejected) parameters
posterior[sample, :] = par_curr
return posterior, jumps | 5,332,183 |
def _get_single_spec_df(reference_dict, mapping_dict, spectrum):
"""Primary method for reading and storing information from a single spectrum.
Args:
reference_dict (dict): dict with reference columns to be filled in
mapping_dict (dict): mapping of engine level column names to ursgal unified column names
spectrum (xml Element): namespace of single spectrum with potentially multiple PSMs
Returns:
(pd.DataFrame): dataframe containing spectrum information
"""
spec_records = []
spec_level_dict = reference_dict.copy()
spec_level_dict["spectrum_id"] = spectrum.attrib["spectrumID"].split("scan=")[-1]
# Iterate children
for psm in spectrum.findall(".//{*}SpectrumIdentificationItem"):
psm_level_dict = spec_level_dict.copy()
psm_level_dict.update(
{mapping_dict[k]: psm.attrib[k] for k in mapping_dict if k in psm.attrib}
)
cv_param_info = {
c.attrib["name"]: c.attrib["value"] for c in psm.findall(".//{*}cvParam")
}
psm_level_dict.update(
{
mapping_dict[k]: cv_param_info[k]
for k in mapping_dict
if k in cv_param_info
}
)
spec_records.append(psm_level_dict)
return pd.DataFrame(spec_records) | 5,332,184 |
def in2func(inp):
"""Function converts input expression to a mathematical expression."""
# Validate Function
if inp == "":
raise ValueError( f"Enter a function to plot!")
for char in re.findall("[a-zA-Z_]+", inp):
if char not in allowed_inputs:
# Error will communicate over stderr pipeline
raise ValueError( f"'{char}' is not in the allowed as an input character!")
return
# Replace allowed chars with suitable methods for eval compiling.
for before, after in replaced_inputs.items():
inp = inp.replace(before, after)
# Edge Case: When no 'x' presents in the function
if "x" not in inp:
inp = f"({inp})*(x**0)"
# Return a function to be used for y value calculation.
def func(x):
return eval(inp)
return func | 5,332,185 |
def filter_posts(posts: list, parsing_date: datetime) -> list:
"""Отфильтровывает лишние посты, которые не входят в месяц парсинга"""
res = []
for post in posts:
post_date = datetime.fromtimestamp(post['date'])
if post_date.month == parsing_date.month:
res.append(post)
return res | 5,332,186 |
def parse_bing():
"""
解析bing网页的壁纸链接,采用正则表达式匹配
:return: IMG_info,IMG_url
"""
base_url = 'https://cn.bing.com/'
language_parameter = '?mtk=zh-CN'
# base_url = 'https://www.bing.com/?mkt=zh-CN'
try:
resp = requests.get(base_url+language_parameter, headers=header).text
except RequestException:
send_text(MASTER, "connectionError")
# print(resp)
match_url = re.search('id="bgLink".*?href="(.*?)"', resp, re.S)
info = re.search('class="sc_light" title="(.*?)".*?"主页图片信息"', resp, re.S)
print(info)
if not info:
info = re.search('"copyright":"(.*?)","copyrightlink"', resp, re.S)
print('-'*40)
print(info)
IMG_info = str(info.groups(1)).strip("(),'")
IMG_url = base_url + str(match_url.groups(1)).strip("()',")
print(IMG_info, "----", IMG_url)
return IMG_info, IMG_url | 5,332,187 |
def quadsum(*args, **kwargs):
"""Sum of array elements in quadrature.
This function is identical to numpy.sum except that array elements are
squared before summing and then the sqrt of the resulting sums is returned.
The docstring from numpy.sum is reproduced below for convenience (copied
2014-12-09)
Parameters
----------
a : array_like
Elements to sum.
axis : None or int or tuple of ints, optional
Axis or axes along which a sum is performed.
The default (`axis` = `None`) is perform a sum over all
the dimensions of the input array. `axis` may be negative, in
which case it counts from the last to the first axis.
.. versionadded:: 1.7.0
If this is a tuple of ints, a sum is performed on multiple
axes, instead of a single axis or all the axes as before.
dtype : dtype, optional
The type of the returned array and of the accumulator in which
the elements are summed. By default, the dtype of `a` is used.
An exception is when `a` has an integer type with less precision
than the default platform integer. In that case, the default
platform integer is used instead.
out : ndarray, optional
Array into which the output is placed. By default, a new array is
created. If `out` is given, it must be of the appropriate shape
(the shape of `a` with `axis` removed, i.e.,
``numpy.delete(a.shape, axis)``). Its type is preserved. See
`doc.ufuncs` (Section "Output arguments") for more details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `arr`.
Returns
-------
sum_along_axis : ndarray
An array with the same shape as `a`, with the specified
axis removed. If `a` is a 0-d array, or if `axis` is None, a scalar
is returned. If an output array is specified, a reference to
`out` is returned.
See Also
--------
ndarray.sum : Equivalent method.
cumsum : Cumulative sum of array elements.
trapz : Integration of array values using the composite trapezoidal rule.
mean, average
Notes
-----
Arithmetic is modular when using integer types, and no error is
raised on overflow.
Examples
--------
>>> np.sum([0.5, 1.5])
2.0
>>> np.sum([0.5, 0.7, 0.2, 1.5], dtype=np.int32)
1
>>> np.sum([[0, 1], [0, 5]])
6
>>> np.sum([[0, 1], [0, 5]], axis=0)
array([0, 6])
>>> np.sum([[0, 1], [0, 5]], axis=1)
array([1, 5])
If the accumulator is too small, overflow occurs:
>>> np.ones(128, dtype=np.int8).sum(dtype=np.int8)
-128
"""
args = list(args)
args[0] = np.asarray(args[0])**2
return np.sqrt(np.sum(*args, **kwargs)) | 5,332,188 |
def open_file(path):
"""open_file."""
return codecs.open(path, encoding='utf8').read() | 5,332,189 |
def plot_anomaly(ts: TimeSeries, anomaly, normal, lb, ub):
"""
Plotting the data points after classification as anomaly/normal.
Data points classified as anomaly are represented in red and normal in green.
"""
plt.figure(figsize=(12, 8))
plt.plot(normal.index, normal, 'o', color='green')
plt.plot(anomaly.index, anomaly[['observed']], 'o', color='red')
# Plotting brutlag confidence bands
plt.plot(ts.data.index, ub, linestyle='--', color='grey')
plt.plot(ts.data.index, lb, linestyle='--', color='grey')
# Formatting the graph
plt.legend(['Normal', 'Anomaly', 'Upper Bound', 'Lower Bound'])
plt.gcf().autofmt_xdate()
plt.title(ts.name)
plt.xlabel('Datetime')
plt.ylabel(ts.target)
plt.show() | 5,332,190 |
def get_api_status():
"""Get API status"""
return "<h4>API Is Up</h4>" | 5,332,191 |
def loadtxt(filetxt, storage):
""" Convert txt file into Blaze native format """
Array(np.loadtxt(filetxt), params=params(storage=storage)) | 5,332,192 |
def do_image_delete(gc, args):
"""Delete specified image."""
failure_flag = False
for args_id in args.id:
try:
gc.images.delete(args_id)
except exc.HTTPForbidden:
msg = "You are not permitted to delete the image '%s'." % args_id
utils.print_err(msg)
failure_flag = True
except exc.HTTPNotFound:
msg = "No image with an ID of '%s' exists." % args_id
utils.print_err(msg)
failure_flag = True
except exc.HTTPConflict:
msg = "Unable to delete image '%s' because it is in use." % args_id
utils.print_err(msg)
failure_flag = True
except exc.HTTPException as e:
msg = "'%s': Unable to delete image '%s'" % (e, args_id)
utils.print_err(msg)
failure_flag = True
if failure_flag:
utils.exit() | 5,332,193 |
def save_changes( book, form, new = False):
"""
Save the changes to a given book
"""
import datetime
book.title = form.title.data
book.publisher = form.publisher.data
book.author = form.author.data
book.isbn13 = form.isbn13.data
book.renter_name = ''
book.rented_time = None
book.category = form.category.data
if new:
# Add the book to the database
db.session.add(book)
db.session.commit() | 5,332,194 |
def kmeans_anchors(train_path='../datasets/ego-hand/train.txt', k_clusters=9, img_size=416, save_path=None):
"""Generate anchors for the dataset.
Normalised labels: cls id, center x, center y, width, height
"""
# Get paths of training images and labels
ann_paths = []
train_name = os.path.basename(train_path)
ds_path = train_path[:-len(train_name)]
with open(train_path, 'r') as f:
for line in f:
line = line[:-1]
img_name = os.path.basename(line)
ann_path = os.path.join(ds_path + 'labels', img_name[:-3] + 'txt')
ann_paths.append(ann_path)
# Get NORMALISED widths and heights from annotation files *.txt
ws = []
hs = []
for ann_path in ann_paths:
with open(ann_path, 'r') as f:
for line in f:
line = line[:-1].split()
w, h = [float(i) for i in line[-2:]]
ws.append(w)
hs.append(h)
# Generate input data as [w, h] pairs
ws = np.asarray(ws)
hs = np.asarray(hs)
x = [ws, hs]
x = np.asarray(x).transpose()
# Plot the [w, h] pairs in scatter graph
if save_path:
# New folder
if os.path.exists(save_path):
shutil.rmtree(save_path)
os.makedirs(save_path)
plt.figure(dpi=300)
plt.scatter(x[:, 0], x[:, 1], label='True position')
plt.xlabel('Width')
plt.ylabel('Height')
plt.savefig(save_path + '/True position.pdf')
# Kmeans clustering
kmeans = KMeans(n_clusters=k_clusters).fit(x)
anchors = kmeans.cluster_centers_
anchors = anchors * img_size
# Plot scatter graph of [w, h] pairs
if save_path:
plt.figure(dpi=300)
plt.scatter(x[:, 0], x[:, 1], c=kmeans.labels_, cmap='viridis')
plt.scatter(anchors[:, 0]/img_size, anchors[:, 1]/img_size, color='#a23500')
# plt.title("Width-height Pair Position")
plt.xlabel('Width')
plt.ylabel('Height')
# plt.xlim((0, 1))
# plt.ylim((0, 1))
plt.savefig(save_path + '/anchor-kmeans-ori.pdf')
plt.figure(dpi=300)
plt.scatter(x[:, 0]*img_size, x[:, 1]*img_size, c=kmeans.labels_, cmap='viridis')
plt.scatter(anchors[:, 0], anchors[:, 1], color='#a23500')
# plt.title("Width-height Pair Position (Scaled to {}*{})".format(img_size, img_size))
plt.xlabel('Width')
plt.ylabel('Height')
# plt.xlim((0, img_size))
# plt.ylim((0, img_size))
plt.savefig(save_path + '/anchor-kmeans.pdf')
anchors = np.rint(anchors)
# Plot anchors
if save_path:
fig, ax = plt.subplots(dpi=300)
for k in range(k_clusters):
rect = patches.Rectangle(
(img_size/2 - anchors[k, 0]/2, img_size/2 - anchors[k, 1]/2),
anchors[k, 0], anchors[k, 1],
linewidth=1,
edgecolor='tab:blue',
facecolor='tab:blue',
fill=None
)
ax.add_patch(rect)
ax.set_aspect(1.0)
plt.axis([0, img_size, 0, img_size])
# plt.title("Anchor Boxes (Scaled to {}*{})".format(img_size, img_size))
plt.xlabel("Width")
plt.ylabel("Height")
plt.savefig(save_path + "/anchor-boxes-rects.pdf")
# Print and save anchors
anchors.sort(axis=0)
anchors = anchors.astype(int)
print("Anchors are: \n{}".format(anchors))
if save_path:
with open(os.path.join(ds_path, 'anchors.txt'), 'w') as f:
for w, h in anchors:
f.write("{}, {}\n".format(w, h))
print("\nAnchors saved to {}".format(os.path.join(ds_path, 'anchors.txt')))
return anchors | 5,332,195 |
def test_attach_create_resource(fc_node_builder):
""" Make sure upstream resource-create operator insertion works correctly
"""
node0 = OperatorNode({}, {
'name': 'test0',
'type': 'none',
})
node1 = OperatorNode({}, {
'name': 'test1',
'type': 'none',
})
node2 = OperatorNode({}, {
'name': 'test2',
'type': 'none',
'upstream_dependencies': ['test0'],
})
node3 = OperatorNode({}, {
'name': 'test3',
'type': 'none',
'upstream_dependencies': ['test2'],
'requires_resources': ['cluster'],
})
node4 = OperatorNode({}, {
'name': 'test4',
'type': 'none',
'upstream_dependencies': ['test2', 'test1'],
'requires_resources': ['cluster'],
})
node5 = OperatorNode({}, {
'name': 'test4',
'type': 'none',
'upstream_dependencies': ['test2'],
'requires_resources': ['cluster'],
})
graph = _GraphUtil.build_subgraph(
[node0, node1, node2, node3, node4, node5])
create_resource_node = OperatorNode({}, {
'name': 'create_cluster',
'type': 'none',
})
resource = MockResource(name='cluster',
create_operator=create_resource_node,
destroy_operator=None,
disable_sentinel_node=None)
_GraphUtil.attach_create_resource(resource, graph, fc_node_builder)
assert set(_GraphUtil.upstream_resource_boundary(
'cluster', graph)) == set([node3, node4, node5])
cluster_upstream_deps = _GraphUtil.upstream_dependency_set(
create_resource_node, graph)
# There should be 2 bundlers because there are 2 distinct dependency sets among
# the nodes in the upstream resource boundary
assert len(cluster_upstream_deps) == 2
node3_upstream = _GraphUtil.upstream_dependency_set(node3, graph)
assert len(node3_upstream) == 2
assert _GraphUtil.upstream_dependency_set(node5, graph) == node3_upstream
assert create_resource_node in node3_upstream
node3_bundler = list(node3_upstream - frozenset([create_resource_node]))[0]
assert _GraphUtil.upstream_dependency_set(
node3_bundler, graph) == frozenset([node2])
node4_upstream = _GraphUtil.upstream_dependency_set(node4, graph)
assert len(node4_upstream) == 2
assert create_resource_node in node4_upstream
node4_bundler = list(node4_upstream - frozenset([create_resource_node]))[0]
assert _GraphUtil.upstream_dependency_set(
node4_bundler, graph) == frozenset([node1, node2])
create_resource_downstream_deps = _GraphUtil.downstream_dependency_set(
create_resource_node, graph)
assert create_resource_downstream_deps == frozenset([node3, node4, node5]) | 5,332,196 |
def test_peak_finder():
"""Tests that the correct indices, voltage, and dq/dv value
are returned for peaks found with peak_finder"""
result = peak_finder(test_cycle1_df, 'c', 5, 3, test_datatype, 5, 0.4)
assert len(result) == 3
peak_indices = result[0]
peak_sigx_volts = result[1]
peak_heights = result[2]
assert peak_indices == [2]
# one peak should be found at the second index
assert type(peak_sigx_volts) == list
assert type(peak_heights) == list
assert peak_heights == [8]
assert peak_sigx_volts == [16]
return | 5,332,197 |
def fgt_set_pressureUnit(pressure_index, unit):
"""Override the default pressure unit for a single pressure channel"""
unit_array = (c_char * (len(unit)+1))(*([c_char_converter(c) for c in unit]))
c_error = c_ubyte(lib.fgt_set_pressureUnit(c_uint(pressure_index), unit_array))
return c_error.value, | 5,332,198 |
def login_basic():
"""fails because not using formkey
"""
data = {'email': LOGIN_EMAIL, 'password': LOGIN_PASSWORD}
encoded_data = urllib.urlencode(data)
request = urllib2.Request(LOGIN_URL, encoded_data)
response = urllib2.urlopen(request)
print response.geturl() | 5,332,199 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.