hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
59ac79b02165f363e79977cf2e4e24693e6b4703
| 1,551
|
py
|
Python
|
Human/imbalance_treatment.py
|
dipta007/SEMal
|
8c89e385ff946a2f9b9c5cefff3e0becc8e6a455
|
[
"MIT"
] | 1
|
2020-07-02T00:39:15.000Z
|
2020-07-02T00:39:15.000Z
|
Human/imbalance_treatment.py
|
dipta007/SEMal
|
8c89e385ff946a2f9b9c5cefff3e0becc8e6a455
|
[
"MIT"
] | 7
|
2020-11-13T18:35:24.000Z
|
2022-02-26T22:50:33.000Z
|
Human/imbalance_treatment.py
|
dipta007/SEMal
|
8c89e385ff946a2f9b9c5cefff3e0becc8e6a455
|
[
"MIT"
] | 1
|
2020-10-12T01:51:22.000Z
|
2020-10-12T01:51:22.000Z
|
import numpy as np
from tqdm import tqdm
import math
K = 100
def eucledian_distance(x, y):
sum = 0
for i in range(len(x)):
now = (x[i] - y[i]) * (x[i] - y[i])
sum += now
return math.sqrt(sum)
def eucledian_distance_mat(x, y):
x = np.asarray(x, np.float)
y = np.asarray(y, np.float)
now = x - y
now = now.dot(now)
return math.sqrt(np.sum(now))
def take_first(ele):
return ele[0]
def knn_imbalance(x_p, x_n, y_n):
print(len(x_p), len(x_n))
formatted_n_x = []
formatted_n_y = []
for (neg_ind, x) in enumerate(tqdm(x_n)):
now = []
for i in x_p:
now.append((eucledian_distance_mat(x, i), 1))
for (neg_i, i) in enumerate(x_n):
if neg_ind != neg_i:
now.append((eucledian_distance_mat(x, i), 0))
now.sort(key=take_first)
for (ind, ele) in enumerate(now):
if ind == K:
break
if ele[1] == 1:
flg = 1
break
if flg == 0:
formatted_n_x.append(x)
formatted_n_y.append(y_n[neg_ind])
print(len(x_p), len(formatted_n_x), len(formatted_n_y))
return formatted_n_x, formatted_n_y
if __name__ == '__main__':
npzfile = np.load('../data/features_human.npz', allow_pickle=True)
X_p = npzfile['arr_0']
Y_p = npzfile['arr_1']
X_n = npzfile['arr_2']
Y_n = npzfile['arr_3']
X_n, Y_n = knn_imbalance(X_p, X_n, Y_n)
np.savez('../data/knn_features_human.npz', X_p, Y_p, X_n, Y_n)
| 24.234375
| 70
| 0.557705
|
e0edec036657775bff9ffda293a44c74d7feb55e
| 4,051
|
py
|
Python
|
losses.py
|
srikrishnapriyad/youtube-8m
|
463f9abb926db956ed5191ab26d56fa6f3b15030
|
[
"Apache-2.0"
] | 2
|
2019-07-25T12:53:10.000Z
|
2019-08-18T16:26:23.000Z
|
losses.py
|
srikrishnapriyad/youtube-8m
|
463f9abb926db956ed5191ab26d56fa6f3b15030
|
[
"Apache-2.0"
] | null | null | null |
losses.py
|
srikrishnapriyad/youtube-8m
|
463f9abb926db956ed5191ab26d56fa6f3b15030
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provides definitions for non-regularized training or test losses."""
import tensorflow as tf
class BaseLoss(object):
"""Inherit from this class when implementing new losses."""
def calculate_loss(self, unused_predictions, unused_labels, **unused_params):
"""Calculates the average loss of the examples in a mini-batch.
Args:
unused_predictions: a 2-d tensor storing the prediction scores, in which
each row represents a sample in the mini-batch and each column
represents a class.
unused_labels: a 2-d tensor storing the labels, which has the same shape
as the unused_predictions. The labels must be in the range of 0 and 1.
unused_params: loss specific parameters.
Returns:
A scalar loss tensor.
"""
raise NotImplementedError()
class CrossEntropyLoss(BaseLoss):
"""Calculate the cross entropy loss between the predictions and labels."""
def calculate_loss(self,
predictions,
labels,
label_weights=None,
**unused_params):
with tf.name_scope("loss_xent"):
epsilon = 1e-5
float_labels = tf.cast(labels, tf.float32)
cross_entropy_loss = float_labels * tf.log(predictions + epsilon) + (
1 - float_labels) * tf.log(1 - predictions + epsilon)
cross_entropy_loss = tf.negative(cross_entropy_loss)
if label_weights is not None:
cross_entropy_loss *= label_weights
return tf.reduce_mean(tf.reduce_sum(cross_entropy_loss, 1))
class HingeLoss(BaseLoss):
"""Calculate the hinge loss between the predictions and labels.
Note the subgradient is used in the backpropagation, and thus the optimization
may converge slower. The predictions trained by the hinge loss are between -1
and +1.
"""
def calculate_loss(self, predictions, labels, b=1.0, **unused_params):
with tf.name_scope("loss_hinge"):
float_labels = tf.cast(labels, tf.float32)
all_zeros = tf.zeros(tf.shape(float_labels), dtype=tf.float32)
all_ones = tf.ones(tf.shape(float_labels), dtype=tf.float32)
sign_labels = tf.subtract(tf.scalar_mul(2, float_labels), all_ones)
hinge_loss = tf.maximum(
all_zeros,
tf.scalar_mul(b, all_ones) - sign_labels * predictions)
return tf.reduce_mean(tf.reduce_sum(hinge_loss, 1))
class SoftmaxLoss(BaseLoss):
"""Calculate the softmax loss between the predictions and labels.
The function calculates the loss in the following way: first we feed the
predictions to the softmax activation function and then we calculate
the minus linear dot product between the logged softmax activations and the
normalized ground truth label.
It is an extension to the one-hot label. It allows for more than one positive
labels for each sample.
"""
def calculate_loss(self, predictions, labels, **unused_params):
with tf.name_scope("loss_softmax"):
epsilon = 10e-8
float_labels = tf.cast(labels, tf.float32)
# l1 normalization (labels are no less than 0)
label_rowsum = tf.maximum(
tf.reduce_sum(float_labels, 1, keep_dims=True), epsilon)
norm_float_labels = tf.div(float_labels, label_rowsum)
softmax_outputs = tf.nn.softmax(predictions)
softmax_loss = tf.negative(
tf.reduce_sum(
tf.multiply(norm_float_labels, tf.log(softmax_outputs)), 1))
return tf.reduce_mean(softmax_loss)
| 39.330097
| 80
| 0.707726
|
876b6b46f7508b81d9039d520b2fde84d343a016
| 2,004
|
py
|
Python
|
dexctl/client.py
|
mikedougherty/dexctl-py
|
fd77fa0839ab0891d0f9c4cd14bea2ae1b969600
|
[
"MIT"
] | 1
|
2020-03-26T22:52:05.000Z
|
2020-03-26T22:52:05.000Z
|
dexctl/client.py
|
mikedougherty/dexctl-py
|
fd77fa0839ab0891d0f9c4cd14bea2ae1b969600
|
[
"MIT"
] | null | null | null |
dexctl/client.py
|
mikedougherty/dexctl-py
|
fd77fa0839ab0891d0f9c4cd14bea2ae1b969600
|
[
"MIT"
] | null | null | null |
import typing
import grpc
from .pb import DexStub
from .pb import api_pb2
class DexClient:
OAuth2Client = api_pb2.Client
def __init__(self, channel):
self.stub = DexStub(channel)
def GetVersion(self) -> api_pb2.VersionResp:
return self.stub.GetVersion(api_pb2.VersionReq())
def CreateClient(self, **kwargs) -> api_pb2.CreateClientResp:
return self.stub.CreateClient(api_pb2.CreateClientReq(**kwargs))
def DeleteClient(self, **kwargs) -> api_pb2.DeleteClientResp:
return self.stub.DeleteClient(api_pb2.DeleteClientReq(**kwargs))
@classmethod
def oauth2client_from_k8s(cls, obj: typing.Mapping) -> api_pb2.Client:
keep_keys = set(cls.OAuth2Client.DESCRIPTOR.fields_by_name.keys())
for key in list(obj.keys()):
if key not in keep_keys:
obj.pop(key)
return cls.OAuth2Client(**obj)
class TLSConfig:
ca_cert: typing.ByteString = None
tls_cert: typing.ByteString = None
tls_key: typing.ByteString = None
def __init__(
self,
ca_cert: typing.BinaryIO,
tls_cert: typing.BinaryIO,
tls_key: typing.BinaryIO,
):
self.ca_cert = ca_cert.read()
self.tls_cert = tls_cert.read()
self.tls_key = tls_key.read()
class DexServerConfig:
"""
Common variables for connecting to dex
"""
tls_config: TLSConfig = None
dex_address: typing.AnyStr = "localhost:5000"
def __init__(
self,
ca_cert: typing.BinaryIO,
tls_cert: typing.BinaryIO,
tls_key: typing.BinaryIO,
dex_address: typing.AnyStr,
):
self.tls_config = TLSConfig(ca_cert, tls_cert, tls_key)
self.dex_address = dex_address
def create_dex_client(self) -> DexClient:
creds = grpc.ssl_channel_credentials(
self.tls_config.ca_cert, self.tls_config.tls_key, self.tls_config.tls_cert
)
return DexClient(grpc.secure_channel(self.dex_address, creds))
| 27.081081
| 86
| 0.661677
|
2bac0087005ec9cabec5caf6e0ad3b560c9138b4
| 4,782
|
py
|
Python
|
iCloud/remove_duplicates.py
|
ZacksAmber/utils
|
96b6368de5b5006b56243b59e6d7c6deb2ae94c3
|
[
"MIT"
] | null | null | null |
iCloud/remove_duplicates.py
|
ZacksAmber/utils
|
96b6368de5b5006b56243b59e6d7c6deb2ae94c3
|
[
"MIT"
] | null | null | null |
iCloud/remove_duplicates.py
|
ZacksAmber/utils
|
96b6368de5b5006b56243b59e6d7c6deb2ae94c3
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
################################################################################
# File Name: remove_duplicates.py #
# File Path: /remove_duplicates.py #
# Created Date: 2022-02-15 #
# ----- #
# Company: Zacks Shen #
# Author: Zacks Shen #
# Blog: https://zacks.one #
# Email: <zacks.shen@gmail.com> #
# Github: https://github.com/ZacksAmber #
# ----- #
# Last Modified: 2022-03-08 3:35:39 pm #
# Modified By: Zacks Shen <zacks.shen@gmail.com> #
# ----- #
# Copyright (c) 2022 Zacks Shen #
################################################################################
import os
import sys
import time
def remove_duplicates(dirpath, filenames, mark=2):
"""remove_duplicates removes the duplicated files or dirs with specific mark that marked by MacOS.
Args:
mark (int, optional): The mark of duplicate files or dirs. Defaults to 2.
"""
os.chdir(dirpath)
files = sorted(filenames)
duplicates, new_files = [], []
for f in files:
if f' {mark}' in f:
duplicates.append(f)
else:
new_files.append(f)
# duplicates may be:
# - xx 2.doc
# - xx.yy 2.doc
# - xx 2
# - .xx 2.doc.icloud
# - xx.yy 2.doc.icloud
# - xx 2.icloud
for duplicate in duplicates:
# try:
# filename, ext = duplicate.rsplit('.', 1)
# filename = filename.rsplit(' 2')[0]
# file = filename + '.' + ext
# except ValueError:
# filename = duplicate.rsplit(' 2')[0]
# file = filename
try:
filename, ext = duplicate.rsplit(' 2', 1)
if '.icloud' in ext:
file = filename + ext
file = file.split('.icloud')[0] # remove .icloud
file = file.split('.', 1)[1] # remove ., which makes files hidden
else:
file = filename + ext
except ValueError:
filename = duplicate.rsplit(' 2')[0]
file = filename
# Test only
# print("duplicate:", duplicate)
# print("file:", file)
# print(file in new_files)
if file in new_files:
file_size = os.path.getsize(file)
duplicate_size = os.path.getsize(duplicate)
file_mtime = os.path.getmtime(file)
duplicate_mtime = os.path.getmtime(duplicate)
duplicate_path = os.path.join(dirpath, duplicate)
# Test only
# print(file, file_size)
# print(duplicate, duplicate_size)
# Remove the old files retrived from iCloud since
# new files may be larger than old files.
if (file_size >= duplicate_size) | (file_mtime > duplicate_mtime):
try:
os.remove(duplicate)
print(f'{duplicate_path} has been removed!')
except:
print(f'{duplicate_path} cannot removed due to an issue!')
else:
print(f'{duplicate_path} cannot be removed since its size larger than or modified time later than {file}.')
print(f'New file size: {file_size}')
print(f'Duplicate size: {duplicate_size}')
print(f'New file modified time: {time.ctime(file_mtime)}')
print(
f'Duplicate modified time: {time.ctime(duplicate_mtime)}')
def main():
if len(sys.argv) - 1 > 2:
raise TypeError(f"This program takes 2 positional argument but {len(sys.argv) - 1} were given")
elif len(sys.argv) - 1 == 0:
raise TypeError("This program is required to pass a path such as '/Users/username/Desktop'")
path = sys.argv[1]
try:
mark = sys.argv[2]
except:
mark = 2
if not os.path.exists(path):
raise ValueError(f"{path} is not exists in your machine")
for dirpath, dirnames, filenames in sorted(os.walk(path, topdown=True)):
remove_duplicates(dirpath, filenames, mark)
if __name__ == '__main__':
main()
| 39.196721
| 123
| 0.458595
|
d9dfa74ea780bbb298be9f292c046b5498adb3c3
| 1,420
|
py
|
Python
|
tests/test_automata.py
|
vincentdavis/special-sequences
|
b7b7f8c2bd2f655baeb7b2139ddf007615bffd67
|
[
"MIT"
] | 1
|
2020-04-15T10:46:57.000Z
|
2020-04-15T10:46:57.000Z
|
tests/test_automata.py
|
vincentdavis/special-sequences
|
b7b7f8c2bd2f655baeb7b2139ddf007615bffd67
|
[
"MIT"
] | 1
|
2016-09-14T03:57:25.000Z
|
2016-09-14T03:57:25.000Z
|
tests/test_automata.py
|
vincentdavis/special-sequences
|
b7b7f8c2bd2f655baeb7b2139ddf007615bffd67
|
[
"MIT"
] | null | null | null |
from unittest import TestCase
from seqs.Automata import RegularLanguage
class RegExpTest(TestCase):
# tuples (L,[strings in L],[strings not in L])
languages = [
(RegularLanguage("0"), ["0"], ["", "00"]),
(RegularLanguage("(10+0)*"), ["", "0", "010"], ["1"]),
(RegularLanguage("(0+1)*1(0+1)(0+1)"), ["000100"], ["0011"]),
]
def test_Membership(self):
"""membership tests for RegularLanguage(expression)"""
for L, Li, Lx in self.languages:
for S in Li:
self.assertTrue(S in L)
for S in Lx:
self.assertTrue(S not in L)
def test_Complement(self):
"""membership tests for ~RegularLanguage"""
for L, Li, Lx in self.languages:
L = ~L
for S in Lx:
self.assertTrue(S in L)
for S in Li:
self.assertTrue(S not in L)
def test_Equivalent(self):
"""test that converting NFA->expr->NFA produces same language"""
for L1, Li, Lx in self.languages:
L2 = RegularLanguage(L1.recognizer.RegExp())
self.assertEqual(L1, L2)
def test_Inequivalent(self):
"""test that different regular languages are recognized as different"""
for i in range(len(self.languages)):
for j in range(i):
self.assertNotEqual(self.languages[i][0], self.languages[j][0])
| 34.634146
| 79
| 0.558451
|
3dd06b91fbe55e8ad7a3d0eeb1fd28b1930d02dd
| 2,579
|
py
|
Python
|
etl/jobs/transformation/quality_assurance_transformer_job.py
|
PDCMFinder/pdcm-etl
|
df0006e4ad5ca2ddf9c1387e28a0b7fb24f195de
|
[
"Apache-2.0"
] | 1
|
2022-01-28T16:01:59.000Z
|
2022-01-28T16:01:59.000Z
|
etl/jobs/transformation/quality_assurance_transformer_job.py
|
PDCMFinder/pdcm-etl
|
df0006e4ad5ca2ddf9c1387e28a0b7fb24f195de
|
[
"Apache-2.0"
] | 37
|
2022-02-09T18:19:13.000Z
|
2022-03-29T12:14:19.000Z
|
etl/jobs/transformation/quality_assurance_transformer_job.py
|
PDCMFinder/pdcm-etl
|
df0006e4ad5ca2ddf9c1387e28a0b7fb24f195de
|
[
"Apache-2.0"
] | null | null | null |
import sys
from pyspark.sql import DataFrame, SparkSession
from etl.jobs.util.cleaner import init_cap_and_trim_all
from etl.jobs.util.dataframe_functions import transform_to_fk
from etl.jobs.util.id_assigner import add_id
from pyspark.sql.functions import lit, input_file_name, col
def main(argv):
"""
Creates a parquet file with provider group data.
:param list argv: the list elements should be:
[1]: Parquet file path with raw model validation data
[2]: Parquet file path with raw model data
[3]: Output file
"""
raw_model_validation_parquet_path = argv[1]
model_parquet_path = argv[2]
output_path = argv[3]
spark = SparkSession.builder.getOrCreate()
raw_model_validation_df = spark.read.parquet(raw_model_validation_parquet_path)
model_df = spark.read.parquet(model_parquet_path)
quality_assurance_df = transform_quality_assurance(raw_model_validation_df, model_df)
quality_assurance_df.write.mode("overwrite").parquet(output_path)
def transform_quality_assurance(raw_model_validation_df: DataFrame, model_df: DataFrame) -> DataFrame:
quality_assurance_df = extract_model_validation(raw_model_validation_df)
quality_assurance_df = set_fk_model(quality_assurance_df, model_df)
quality_assurance_df = add_id(quality_assurance_df, "id")
quality_assurance_df = get_columns_expected_order(quality_assurance_df)
return quality_assurance_df
def extract_model_validation(raw_model_validation_df: DataFrame) -> DataFrame:
quality_assurance_df = raw_model_validation_df.withColumn(
"validation_technique", init_cap_and_trim_all("validation_technique"))
return quality_assurance_df
def set_fk_model(quality_assurance_df, model_df):
quality_assurance_df = quality_assurance_df.withColumn("_data_source", lit(input_file_name()))
quality_assurance_df = quality_assurance_df.withColumnRenamed("model_id", "model_id_ref")
quality_assurance_df = quality_assurance_df.withColumn("xyz", col("model_id_ref"))
quality_assurance_df = transform_to_fk(
quality_assurance_df, model_df, "model_id_ref", "external_model_id", "id", "model_id")
return quality_assurance_df
def get_columns_expected_order(quality_assurance_df: DataFrame) -> DataFrame:
return quality_assurance_df.select(
"id",
"description",
"passages_tested",
"validation_technique",
"validation_host_strain_nomenclature",
"model_id"
)
if __name__ == "__main__":
sys.exit(main(sys.argv))
| 37.926471
| 102
| 0.759597
|
1b464f5c77a5862203459b557eb2ebebd5410f5f
| 84
|
py
|
Python
|
mfr/extensions/image/__init__.py
|
yacchin1205/RDM-modular-file-renderer
|
5bd18175a681d21e7be7fe0238132335a1cd8ded
|
[
"Apache-2.0"
] | 36
|
2015-08-31T20:24:22.000Z
|
2021-12-17T17:02:44.000Z
|
mfr/extensions/image/__init__.py
|
yacchin1205/RDM-modular-file-renderer
|
5bd18175a681d21e7be7fe0238132335a1cd8ded
|
[
"Apache-2.0"
] | 190
|
2015-01-02T06:22:01.000Z
|
2022-01-19T11:27:03.000Z
|
mfr/extensions/image/__init__.py
|
yacchin1205/RDM-modular-file-renderer
|
5bd18175a681d21e7be7fe0238132335a1cd8ded
|
[
"Apache-2.0"
] | 47
|
2015-01-27T15:45:22.000Z
|
2021-01-27T22:43:03.000Z
|
from .export import ImageExporter # noqa
from .render import ImageRenderer # noqa
| 28
| 41
| 0.785714
|
a961e7c32d6396faecb34167c9f0b4beff3e2370
| 131
|
py
|
Python
|
test/testdata/execute_params_error.py
|
Scartography/mapchete
|
f7d1a74acb4021adfd3053501416d2b974c40af9
|
[
"MIT"
] | 161
|
2016-02-20T15:18:13.000Z
|
2022-03-28T11:55:32.000Z
|
test/testdata/execute_params_error.py
|
Scartography/mapchete
|
f7d1a74acb4021adfd3053501416d2b974c40af9
|
[
"MIT"
] | 387
|
2015-08-12T07:16:56.000Z
|
2022-03-30T14:27:12.000Z
|
test/testdata/execute_params_error.py
|
Scartography/mapchete
|
f7d1a74acb4021adfd3053501416d2b974c40af9
|
[
"MIT"
] | 20
|
2016-02-22T12:51:54.000Z
|
2022-01-30T22:54:08.000Z
|
"""Test to raise MapcheteProcessImportError."""
def execute():
""" "Function needs to have exactly one argument."""
pass
| 18.714286
| 56
| 0.671756
|
972d51bb4b7f17299a874decb2f1b422317d0f70
| 3,566
|
py
|
Python
|
modules/discriminator.py
|
GGGHSL/InfoSwap-master
|
0484ef5fcb35bf811f2c3ec52ecf77ccade822db
|
[
"CC0-1.0"
] | 37
|
2021-11-01T14:16:23.000Z
|
2022-03-29T14:30:54.000Z
|
modules/discriminator.py
|
GGGHSL/InfoSwap-master
|
0484ef5fcb35bf811f2c3ec52ecf77ccade822db
|
[
"CC0-1.0"
] | 15
|
2021-11-16T12:52:08.000Z
|
2022-03-31T09:22:18.000Z
|
modules/discriminator.py
|
GGGHSL/InfoSwap-master
|
0484ef5fcb35bf811f2c3ec52ecf77ccade822db
|
[
"CC0-1.0"
] | 9
|
2021-11-02T07:25:35.000Z
|
2022-03-04T10:15:44.000Z
|
import torch.nn as nn
import numpy as np
class NLayerDiscriminator(nn.Module):
def __init__(self, input_nc, ndf=64, n_layers=6, norm_layer=nn.BatchNorm2d, use_sigmoid=False, getIntermFeat=False):
super(NLayerDiscriminator, self).__init__()
self.getIntermFeat = getIntermFeat
self.n_layers = n_layers
kw = 4
padw = int(np.ceil((kw-1.0)/2))
sequence = [[nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)]]
nf = ndf
for n in range(1, n_layers):
nf_prev = nf
nf = min(nf * 2, 512)
sequence += [[
nn.Conv2d(nf_prev, nf, kernel_size=kw, stride=2, padding=padw),
norm_layer(nf), nn.LeakyReLU(0.2, True)
]]
nf_prev = nf
nf = min(nf * 2, 512)
sequence += [[
nn.Conv2d(nf_prev, nf, kernel_size=kw, stride=1, padding=padw),
norm_layer(nf),
nn.LeakyReLU(0.2, True)
]]
sequence += [[nn.Conv2d(nf, 1, kernel_size=kw, stride=1, padding=padw)]]
if use_sigmoid:
sequence += [[nn.Sigmoid()]]
if getIntermFeat:
for n in range(len(sequence)):
setattr(self, 'encoder'+str(n), nn.Sequential(*sequence[n]))
else:
sequence_stream = []
for n in range(len(sequence)):
sequence_stream += sequence[n]
self.model = nn.Sequential(*sequence_stream)
def forward(self, x):
if self.getIntermFeat:
res = [x]
for n in range(self.n_layers+2):
model = getattr(self, 'encoder'+str(n))
res.append(model(res[-1]))
return res[1:]
else:
return self.model(x)
class MultiscaleDiscriminator(nn.Module):
def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d,
use_sigmoid=False, num_D=3, getIntermFeat=False):
super(MultiscaleDiscriminator, self).__init__()
self.num_D = num_D
self.n_layers = n_layers
self.getIntermFeat = getIntermFeat
for i in range(num_D):
netD = NLayerDiscriminator(input_nc, ndf, n_layers, norm_layer, use_sigmoid, getIntermFeat)
if getIntermFeat:
for j in range(n_layers + 2):
setattr(self, 'scale' + str(i) + '_layer' + str(j), getattr(netD, 'encoder' + str(j)))
else:
setattr(self, 'layer' + str(i), netD.model)
self.downsample = nn.AvgPool2d(3, stride=2, padding=[1, 1], count_include_pad=False)
def singleD_forward(self, model, input):
if self.getIntermFeat:
result = [input]
for i in range(len(model)):
result.append(model[i](result[-1]))
return result[1:]
else:
return [model(input)]
def forward(self, input):
num_D = self.num_D
result = []
input_downsampled = input
for i in range(num_D):
if self.getIntermFeat:
model = [getattr(self, 'scale' + str(num_D - 1 - i) + '_layer' + str(j)) for j in
range(self.n_layers + 2)]
else:
model = getattr(self, 'layer' + str(num_D - 1 - i))
out = self.singleD_forward(model, input_downsampled)
result.append(out)
if i != (num_D - 1):
input_downsampled = self.downsample(input_downsampled)
return result
| 32.715596
| 120
| 0.547392
|
070eb36f6a76e508b098086b7beb14703e700350
| 4,606
|
py
|
Python
|
final/final_part3/operations.py
|
MrDDaye/cna_cp1855
|
5b83f5877d373e9499e379ba93b04c394c13db5f
|
[
"MIT"
] | null | null | null |
final/final_part3/operations.py
|
MrDDaye/cna_cp1855
|
5b83f5877d373e9499e379ba93b04c394c13db5f
|
[
"MIT"
] | null | null | null |
final/final_part3/operations.py
|
MrDDaye/cna_cp1855
|
5b83f5877d373e9499e379ba93b04c394c13db5f
|
[
"MIT"
] | null | null | null |
"""Operations team manager program."""
from cli import display_lineup_header, display_player, display_player_position, display_player_stats, display_farewell, display_player_updated
from inputs import get_lineup_number, get_string, get_position, get_int
from baseball_stats import batting_average
from files import read_from_file, save_to_file
def display_lineup(players: list[list[str]]) -> None:
"""Displays the lineup of the baseball team."""
display_lineup_header()
for index, player in enumerate(players):
display_player(index + 1, player)
def add_player(players: list[list[str]], players_file: str) -> None:
"""Prompts user for new player information and adds it to team lineup."""
name: str = get_string('Name: ')
position: str = get_position('Position: ')
at_bats: int = get_int('At bats: ')
hits: int = get_int('Hits: ')
average: float = batting_average(at_bats, hits)
players.append([name, position, at_bats, hits, average])
save_players_to_file(players_file, players)
print(f'{name} was added.')
def remove_player(players: list[list[str]], players_file: str) -> None:
"""Prompts user for lineup number and remove player from team lineup."""
lineup_index: int = get_lineup_number('Lineup number: ', players) - 1
removed_player: list[str] = players.pop(lineup_index)
save_players_to_file(players_file, players)
print(f'{removed_player[0]} was removed.')
def move_player(players: list[list[str]], players_file: str) -> None:
"""Prompts user for lineup number and desired new lineup number. Moves player to new lineup position."""
lineup_index: int = get_lineup_number('Current lineup number: ', players) - 1
moved_player: list[str] = players.pop(lineup_index)
print(f'{moved_player[0]} was selected.')
lineup_index: int = get_lineup_number('New lineup number: ', players) - 1
players.insert(lineup_index, moved_player)
save_players_to_file(players_file, players)
print(f'{moved_player[0]} was moved.')
def edit_player_position(players: list[list[str]], players_file: str) -> None:
"""Prompts user for lineup number and allows user to edit selected players position."""
lineup_index: int = get_lineup_number('Lineup number: ', players) - 1
selected_player: list[str] = players[lineup_index]
display_player_position(selected_player)
selected_player[1] = get_position('New position: ')
save_players_to_file(players_file, players)
display_player_updated(selected_player)
def edit_player_stats(players: list[list[str]], players_file: str) -> None:
"""Prompts user for lineup number and allows user to edit selected players statistics."""
lineup_index: int = get_lineup_number('Lineup number: ', players) - 1
selected_player: list[str] = players[lineup_index]
display_player_stats(selected_player)
selected_player[2] = get_int('At bats: ')
selected_player[3] = get_int('Hits: ')
selected_player[4] = batting_average(selected_player[2], selected_player[3])
save_players_to_file(players_file, players)
display_player_updated(selected_player)
def finish(players: list[list[str]], players_file: str) -> None:
"""Close the program."""
save_players_to_file(players_file, players)
display_farewell()
quit()
def read_players_from_file(file: str) -> None:
"""Read players from given csv file."""
try:
players = read_from_file(file)
for player in players:
player[2] = int(player[2])
player[3] = int(player[3])
player[4] = float(player[4])
return players
except FileNotFoundError as e:
print(e)
print('Team data file could not be found.')
print('You can create a new one if you want.')
return []
def save_players_to_file(file: str, players: list[list[str]]) -> None:
"""Save players to given csv file."""
save_to_file(file, players)
def run_option(option: int, players: list[list[str]], players_file: str) -> None:
"""Run the function specified by option."""
if option == 1:
display_lineup(players)
elif option == 2:
add_player(players, players_file)
elif option == 3:
remove_player(players, players_file)
elif option == 4:
move_player(players, players_file)
elif option == 5:
edit_player_position(players, players_file)
elif option == 6:
edit_player_stats(players, players_file)
else:
finish(players, players_file)
| 45.156863
| 143
| 0.688667
|
b791845bcd824f236d4c73eff111d2064e28c2e7
| 6,377
|
py
|
Python
|
tests/apps/migrations/auto/test_serialisation.py
|
aminalaee/piccolo
|
af8d2d45294dcd84f4f9b6028752aa45b699ec15
|
[
"MIT"
] | null | null | null |
tests/apps/migrations/auto/test_serialisation.py
|
aminalaee/piccolo
|
af8d2d45294dcd84f4f9b6028752aa45b699ec15
|
[
"MIT"
] | null | null | null |
tests/apps/migrations/auto/test_serialisation.py
|
aminalaee/piccolo
|
af8d2d45294dcd84f4f9b6028752aa45b699ec15
|
[
"MIT"
] | null | null | null |
from enum import Enum
from unittest import TestCase
from piccolo.apps.migrations.auto.serialisation import serialise_params
from piccolo.columns.base import OnDelete
from piccolo.columns.choices import Choice
from piccolo.columns.column_types import Varchar
from piccolo.columns.defaults import DateNow, TimeNow, TimestampNow, UUID4
from piccolo.columns.reference import LazyTableReference
def example_function():
pass
class TestSerialiseParams(TestCase):
def test_time(self):
serialised = serialise_params(params={"default": TimeNow()})
self.assertEqual(serialised.params["default"].__repr__(), "TimeNow()")
self.assertTrue(len(serialised.extra_imports) == 1)
self.assertEqual(
serialised.extra_imports[0].__str__(),
"from piccolo.columns.defaults.time import TimeNow",
)
def test_date(self):
serialised = serialise_params(params={"default": DateNow()})
self.assertEqual(serialised.params["default"].__repr__(), "DateNow()")
def test_timestamp(self):
serialised = serialise_params(params={"default": TimestampNow()})
self.assertTrue(
serialised.params["default"].__repr__() == "TimestampNow()"
)
def test_uuid(self):
serialised = serialise_params(params={"default": UUID4()})
self.assertTrue(serialised.params["default"].__repr__() == "UUID4()")
def test_lazy_table_reference(self):
# These are equivalent:
references_list = [
LazyTableReference(
table_class_name="Manager", app_name="example_app"
),
LazyTableReference(
table_class_name="Manager",
module_path="tests.example_app.tables",
),
]
for references in references_list:
serialised = serialise_params(params={"references": references})
self.assertTrue(
serialised.params["references"].__repr__() == "Manager"
)
self.assertTrue(len(serialised.extra_imports) == 1)
self.assertEqual(
serialised.extra_imports[0].__str__(),
"from piccolo.table import Table",
)
self.assertTrue(len(serialised.extra_definitions) == 1)
self.assertEqual(
serialised.extra_definitions[0].__str__(),
'class Manager(Table, tablename="manager"): pass',
)
def test_function(self):
serialised = serialise_params(params={"default": example_function})
self.assertTrue(
serialised.params["default"].__repr__() == "example_function"
)
self.assertTrue(len(serialised.extra_imports) == 1)
self.assertEqual(
serialised.extra_imports[0].__str__(),
(
"from tests.apps.migrations.auto.test_serialisation import "
"example_function"
),
)
self.assertTrue(len(serialised.extra_definitions) == 0)
def test_lambda(self):
"""
Make sure lambda functions are rejected.
"""
with self.assertRaises(ValueError) as manager:
serialise_params(params={"default": lambda x: x + 1})
self.assertEqual(
manager.exception.__str__(), "Lambdas can't be serialised"
)
def test_builtins(self):
"""
Make sure builtins can be serialised properly.
"""
serialised = serialise_params(params={"default": list})
self.assertTrue(serialised.params["default"].__repr__() == "list")
self.assertTrue(len(serialised.extra_imports) == 0)
def test_column_instance(self):
"""
Make sure Column instances can be serialised properly. An example
use case is when a `base_column` argument is passed to an `Array`
column.
"""
serialised = serialise_params(params={"base_column": Varchar()})
self.assertEqual(
serialised.params["base_column"].__repr__(),
"Varchar(length=255, default='', null=False, primary=False, key=False, unique=False, index=False, index_method=IndexMethod.btree, choices=None)", # noqa: E501
)
self.assertEqual(
{i.__repr__() for i in serialised.extra_imports},
{
"from piccolo.columns.column_types import Varchar",
"from piccolo.columns.indexes import IndexMethod",
},
)
def test_enum_type(self):
"""
Make sure Enum types can be serialised properly.
"""
class Choices(Enum):
a = 1
b = 2
c = Choice(value=3, display_name="c1")
serialised = serialise_params(params={"choices": Choices})
self.assertEqual(
serialised.params["choices"].__repr__(),
"Enum('Choices', {'a': 1, 'b': 2, 'c': Choice(value=3, display_name='c1')})", # noqa: E501
)
self.assertEqual(
{i.__repr__() for i in serialised.extra_imports},
{
"from piccolo.columns.choices import Choice",
"from enum import Enum",
},
)
def test_custom_enum_instance(self):
"""
Make sure custom Enum instances can be serialised properly. An example
is when a user defines a choices Enum, and then sets the default to
one of those choices.
"""
class Choices(Enum):
a = 1
b = 2
serialised = serialise_params(params={"default": Choices.a})
self.assertEqual(serialised.params["default"], 1)
self.assertEqual(serialised.extra_imports, [])
self.assertEqual(serialised.extra_definitions, [])
def test_builtin_enum_instance(self):
"""
Make sure Enum instances defiend in Piccolo can be serialised properly
- for example, with on_delete.
"""
serialised = serialise_params(params={"on_delete": OnDelete.cascade})
self.assertEqual(
serialised.params["on_delete"].__repr__(), "OnDelete.cascade"
)
self.assertEqual(
[i.__repr__() for i in serialised.extra_imports],
["from piccolo.columns.base import OnDelete"],
)
self.assertEqual(serialised.extra_definitions, [])
| 34.47027
| 171
| 0.604987
|
a318cfffb506fb9fe169e481e671bef86d28e1e3
| 1,869
|
py
|
Python
|
dataPipelines/gc_scrapy/gc_scrapy/GCSeleniumSpider.py
|
dod-advana/gamechanger-crawlers
|
e0113111a39f78bd13f70fa4b3359a688f7dc6e8
|
[
"MIT"
] | 8
|
2021-05-20T18:39:35.000Z
|
2022-02-25T23:24:21.000Z
|
dataPipelines/gc_scrapy/gc_scrapy/GCSeleniumSpider.py
|
dod-advana/gamechanger-crawlers
|
e0113111a39f78bd13f70fa4b3359a688f7dc6e8
|
[
"MIT"
] | 4
|
2021-06-14T13:46:46.000Z
|
2022-03-02T02:01:49.000Z
|
dataPipelines/gc_scrapy/gc_scrapy/GCSeleniumSpider.py
|
dod-advana/gamechanger-crawlers
|
e0113111a39f78bd13f70fa4b3359a688f7dc6e8
|
[
"MIT"
] | 4
|
2021-06-30T22:18:52.000Z
|
2021-11-17T22:43:27.000Z
|
# -*- coding: utf-8 -*-
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
import typing
from dataPipelines.gc_scrapy.gc_scrapy.runspider_settings import general_settings, selenium_settings
from dataPipelines.gc_scrapy.gc_scrapy.middleware_utils.selenium_request import SeleniumRequest
from dataPipelines.gc_scrapy.gc_scrapy.GCSpider import GCSpider
class GCSeleniumSpider(GCSpider):
"""
Selenium Spider with settings applied and selenium request returned for the standard parse method used in crawlers
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
custom_settings: dict = {**general_settings, **selenium_settings}
selenium_request_overrides: dict = {}
selenium_spider_start_request_retries_allowed: int = 5
selenium_spider_start_request_retry_wait: int = 30
def start_requests(self):
"""
Applies selenium_request_overrides dict and returns a selenium response instead of standard scrapy response
"""
opts = {
"url": self.start_urls[0],
"callback": self.parse,
"wait_time": 5,
**self.selenium_request_overrides
}
yield SeleniumRequest(**opts)
@staticmethod
def wait_until_css_clickable(driver, css_selector: str, wait: typing.Union[int, float] = 5):
WebDriverWait(driver, wait).until(
EC.element_to_be_clickable(
(By.CSS_SELECTOR, css_selector)
))
@staticmethod
def wait_until_css_located(driver, css_selector: str, wait: typing.Union[int, float] = 5):
WebDriverWait(driver, wait).until(
EC.presence_of_element_located(
(By.CSS_SELECTOR, css_selector)
))
| 35.264151
| 122
| 0.695559
|
a701a901aa7d6ea8d5f9dc69f88e2f07f7ac3b99
| 11,259
|
py
|
Python
|
pymysqldao/dao/baseDao.py
|
AsuraChj/PyMySQLDao
|
f1638a2b2d449e71b089824193da8f5a95f56114
|
[
"MIT"
] | null | null | null |
pymysqldao/dao/baseDao.py
|
AsuraChj/PyMySQLDao
|
f1638a2b2d449e71b089824193da8f5a95f56114
|
[
"MIT"
] | null | null | null |
pymysqldao/dao/baseDao.py
|
AsuraChj/PyMySQLDao
|
f1638a2b2d449e71b089824193da8f5a95f56114
|
[
"MIT"
] | null | null | null |
# !/usr/bin/env python
# -*- coding: utf-8 -*-
# file_name: baseDao.py
# author: ScCcWe
# time: 2022/3/4 12:35 下午
from typing import List, Dict
from pymysql.connections import Connection
from pymysqldao.log.logger import logger
from pymysqldao.constant.COMMON import DEBUG
from pymysqldao.err import ParamTypeError, ParamNoneError
class BaseDao:
def __init__(self, connection: Connection, table_name: str):
if not connection:
raise ValueError
else:
self.connection = connection
if not table_name:
raise ValueError
else:
self.table_name = table_name
# 全局limit值
self.limit = 500
# 默认如果不配置此属性的话,都是有DEBUG的;
# 可以单独配置关闭;防止多个DaoClass,有的想debug,有的不想;
self.debug = True
# def select_by_id2(self, id_, primary_key="id"):
# """
#
# 根据给定的`主键值`查询出一条数据
#
# :param id_: 需要查询的主键值;如:1, "1", 2, "3"; # type: int / str;
# :param primary_key: 主键名默认是"id";如果主键名不是"id",应该显式的给出;
# :return: Dict / None
# """
# if not id_:
# raise ParamNoneError("param `id` can't accept null-type value")
# if type(id_) != int and type(id_) != str:
# raise ParamTypeError("param `id` can only accept int or str type")
# if type(primary_key) != str:
# raise ParamTypeError("param `primary_key` can only accept str type")
#
# sql = f"select * from {self.table_name} where {primary_key} = %s"
# try:
# with self.connection.cursor() as cursor:
# execute_result = cursor.execute(sql, (str(id_),))
# if DEBUG and self.debug:
# logger.info(f"Execute SQL: {sql}")
# logger.info(f"Query OK, {execute_result} rows affected")
# # 这里使用fetchone是没问题的,因为主键一定是unique
# result = cursor.fetchone()
# except Exception as e:
# logger.error(f"Execute SQL: {sql}")
# logger.error(f"Query Exception: {e}")
# finally:
# return result if result else None
def select_by_id(self, id_value, primary_key="id"):
return self.select_by_field(id_value, field_key=primary_key, limit_size=1)
def select_by_id_list(self, id_list, primary_key="id", limit_tag=False):
"""
query data by id_list
:param id_list: 包含需要查询的所有id值的list
eg: [1, 2, 3], ["1", "2", "3"]
# type: List<int/str>
:param primary_key: 主键名默认是"id";如果主键名不是"id",应该显式的给出;
:param limit_tag: control show all or show limit; if use, the default limit num is self.limit;
:return: List[Dict] / None
"""
if not id_list:
raise ParamNoneError("param `id_list` can't be null-type value")
if not isinstance(id_list, list):
raise ParamTypeError("param `id_list` can only accept List type")
if type(primary_key) != str:
raise ParamTypeError("param `primary_key` can only accept str type")
for item in id_list:
if not item:
raise ParamTypeError("param `id_list` can't be null-type value")
if type(item) == str or type(item) == int:
...
else:
raise ParamTypeError("the type of param `id_list` can be `List<int>`, `List<str>` or `List<str/int>`")
sql = f"select * from {self.table_name} where {primary_key} in %s"
try:
with self.connection.cursor() as cursor:
execute_result = cursor.execute(sql, ([str(_) for _ in id_list],))
if DEBUG and self.debug:
logger.info(f"Execute SQL: {sql}")
logger.info(f"Query OK, {execute_result} rows affected")
if limit_tag:
result = cursor.fetchmany(self.limit)
else:
result = cursor.fetchall()
except Exception as e:
logger.info(f"Execute SQL: {sql}")
logger.info(f"Query Exception: {e}")
finally:
return result if result else None
def select_by_field(self, field_value, field_key: str, limit_size=20):
"""
根据指定的字段名和字段值查询数据
:param field_value: 字段值
:param field_key: 字段名
:param limit_size: 如果方法上设置了limit_size,则优先使用limit_size,而不是self.size
:return:
"""
if not field_value:
raise ParamNoneError("param `field_value` can't accept null-type value")
if type(field_value) != int and type(field_value) != str:
raise ParamTypeError("param `field_value` can only accept int or str type")
if type(field_key) != str:
raise ParamTypeError("param `field_key` can only accept str type")
sql = f"select * from {self.table_name} where {field_key} = %s"
try:
with self.connection.cursor() as cursor:
execute_result = cursor.execute(sql, (str(field_value),))
if DEBUG and self.debug:
logger.info(f"Execute SQL: {sql}")
logger.info(f"Query OK, {execute_result} rows affected")
if limit_size: # by_id / by_unique_field
if limit_size == 1:
result = cursor.fetchone()
else:
result = cursor.fetchmany(limit_size)
elif self.limit: # by_field
result = cursor.fetchmany(self.limit)
else:
result = cursor.fetchall()
except Exception as e:
logger.error(f"Execute SQL: {sql}")
logger.error(f"Query Exception: {e}")
finally:
return result if result else None
def select_list(self, limit_size=20):
"""
根据limit值,查询出一个list
:param limit_size: 查询的limit值;可以为空,默认值是20;
:return: List[Dict]
"""
if not limit_size:
raise ValueError("param `limit_size` can't be none-type value")
if type(limit_size) != int:
raise TypeError("param `limit_size` can only accept int type")
sql = f"select * from {self.table_name}"
try:
with self.connection.cursor() as cursor:
execute = cursor.execute(sql)
if DEBUG and self.debug:
logger.info(f"Execute SQL: {sql}")
logger.info(f"Query OK, {execute} rows affected")
result = cursor.fetchmany(limit_size if self.limit > limit_size else self.limit)
except Exception as e:
logger.info(f"Execute SQL: {sql}")
logger.info(f"Query Exception: {e}")
finally:
return result if result is not None else None
def insert_one(self, obj_dict: Dict):
"""
:param obj_dict: 需要插入的数据(以dict格式
:return:
"""
def generate_sql(obj: Dict):
field_list = []
value_list = []
placeholder_list = []
for key, value in obj.items():
field_list.append(key)
placeholder_list.append("%s")
value_list.append(str(value))
sql = f"INSERT INTO {self.table_name} ({', '.join(field_list)}) " \
f"VALUES ({', '.join(placeholder_list)})"
return sql, value_list
if not isinstance(obj_dict, dict):
raise TypeError("param `obj_dict` can only accept dict type")
try:
with self.connection.cursor() as cursor:
sql, value_list = generate_sql(obj_dict)
row_num = cursor.execute(sql, tuple(value_list))
if DEBUG and self.debug:
logger.info(f"Execute SQL: {sql}")
logger.info(f"Query OK, {row_num} rows affected")
if not self.connection.get_autocommit():
self.connection.commit()
except Exception as e:
logger.error(f"Execute SQL: {sql}")
logger.error(f"Query Exception: {e}")
finally:
return row_num if row_num else None
def insert_many(self, obj_dict_list: List[Dict[str, object]]):
if not obj_dict_list:
raise ParamNoneError("param `obj_dict_list` can't be none-type value")
if not isinstance(obj_dict_list, list):
raise ParamTypeError("param `obj_dict_list` can only accept list type")
for obj in obj_dict_list:
self.insert_one(obj)
def update_by_id(self, obj_dict: Dict, primary_key="id"):
def generate_sql(obj_dict):
field_value_list = []
for field, value in obj_dict.items():
if field != primary_key:
if type(value) == str:
value = "'" + value + "'"
field_value_list.append(field + '=' + str(value))
return f"update {self.table_name} set {', '.join(field_value_list)} where {primary_key} = %s"
if not obj_dict:
raise ValueError("param `obj_dict` can't accept null-type value")
if not isinstance(obj_dict, dict):
raise TypeError("参数类型错误")
if primary_key not in obj_dict: # not obj_dict.has_key("id")
raise KeyError("如果主键列名不是'id',请显式的指出id列名")
try:
with self.connection.cursor() as cursor:
sql = generate_sql(obj_dict)
row_num = cursor.execute(sql, (obj_dict.get(primary_key),))
if DEBUG and self.debug:
logger.info(f"Execute SQL: {sql}")
logger.info(f"Query OK, {row_num} rows affected")
if not self.connection.get_autocommit():
self.connection.commit()
except Exception as e:
logger.error(f"Execute SQL: {sql}")
logger.error(f"Query Exception: {e}")
finally:
return row_num if row_num else None
def delete_by_id(self, id, primary_key="id"):
if not id:
raise ValueError("param `id` can't accept null-type value")
if type(id) != str and type(id) != int:
raise TypeError("param `id` can accept str or int type")
try:
with self.connection.cursor() as cursor:
sql = f"delete from {self.table_name} where {primary_key} = %s"
rows = cursor.execute(sql, (id,))
if DEBUG and self.debug:
logger.info(f"Execute SQL: {sql}")
logger.info(f"Query OK, {rows} rows affected")
if not self.connection.get_autocommit():
self.connection.commit()
except Exception as e:
logger.error(f"Execute SQL: {sql}")
logger.error(f"Query Exception: {e}")
finally:
return rows if rows else None
def delete_by_id_list(self, id_list: List):
if not id_list:
raise ValueError("param `id_list` can't accept null-type value")
if not isinstance(id_list, list):
raise TypeError("param `id_list` can only be list type")
for id in id_list:
self.delete_by_id(id)
| 39.784452
| 118
| 0.557243
|
feb6075676df5afc4bf2e78d9cae9b993d4b827f
| 4,413
|
py
|
Python
|
chemtonic/splitting/clustering.py
|
mldlproject/chemtonic
|
88b1d01d89a2b7d59ed701dccd9849649f9bea1f
|
[
"MIT"
] | null | null | null |
chemtonic/splitting/clustering.py
|
mldlproject/chemtonic
|
88b1d01d89a2b7d59ed701dccd9849649f9bea1f
|
[
"MIT"
] | null | null | null |
chemtonic/splitting/clustering.py
|
mldlproject/chemtonic
|
88b1d01d89a2b7d59ed701dccd9849649f9bea1f
|
[
"MIT"
] | null | null | null |
import pandas as pd
from sklearn.cluster import KMeans
from sklearn.preprocessing import StandardScaler
from chemtonic.featurizer.RDKitMD.getRDKitMD import *
from chemtonic.curation.utils import molStructVerify
from .utils import suggest_K, visualizeElbow
#==========================================================
def suggest_num_clusters(compounds,
estimated_num_cluster=10,
thresold=0.05,
visualize=True,
exportImage=True,
outputPath=None,
ignoreFailedStruct=False,
getFailedStruct=False):
#------------------------
if exportImage:
if outputPath == None:
print("!!!ERROR 'exportImage=True' needs 'outputPath=<Directory>' to be filled !!!")
return None
if outputPath:
if exportImage == False:
print("!!!ERROR 'outputPath=<Directory>' needs to set 'exportImage=True' !!!")
return None
#------------------------
if isinstance(compounds, pd.core.series.Series):
compounds = compounds.tolist()
if isinstance(compounds, pd.core.frame.DataFrame):
compounds = compounds.iloc[:,0].tolist()
if isinstance(compounds, str):
compounds = [compounds]
if isinstance(compounds, list):
compounds = compounds
#------------------------
# Check valid compounds
Unverified_count = len(molStructVerify(compounds, getFailedStruct=True, printlogs=False))
if Unverified_count == len(compounds):
print("There are no valid SMILES found, please check!")
return None
if ignoreFailedStruct:
VerifiedCompounds = molStructVerify(compounds, printlogs=False)
if Unverified_count != 0:
print("There are {} errored SMILES(s) which were/was ignored".format(Unverified_count))
else:
if getFailedStruct:
if Unverified_count !=0:
print("There are {} errored SMILES(s), to ignore them and continue running, please set 'ignoreFailedStruct=True'".format(Unverified_count))
Unverified = molStructVerify(compounds, getFailedStruct=True, printlogs=False)
return Unverified
else:
print("No errored SMILES found")
VerifiedCompounds = molStructVerify(compounds, printlogs=False)
else:
if Unverified_count!=0:
print("Your set of compounds contains errored SMILES(s), you can:")
print("1. Ignore the errored SMILES(s) and continue running by setting 'ignoreFailedStruct=True'")
print("2. Get your errored SMILES(s) to check by setting 'getFailedStruct = True'")
return None
else:
VerifiedCompounds = molStructVerify(compounds, printlogs=False)
print("==============================================")
#------------------------
# Extract RDKitMD-features of input SMILES(s)
print("Start extract RDKitMD features for input SMILES(s)")
RDKitMD_features_df = extract_RDKitMD(VerifiedCompounds)
RDKitMD_features_np = RDKitMD_features_df.iloc[:, 1:].to_numpy()
# Normalize data
scaler = StandardScaler()
data_normal = scaler.fit_transform(RDKitMD_features_np)
#------------------------
# Elbow method
cost =[]
for i in range(1, estimated_num_cluster):
KM = KMeans(n_clusters = i, max_iter = 1000)
KM.fit(data_normal)
# Calculate squared errors for the clustered points
cost.append(KM.inertia_)
print("==============================================")
#------------------------
# Suggestion numbers of cluster (k)
k = suggest_K(list_values=cost, thresold=thresold)
print("The suggested 'k' value (corresponding to elbow point): {}. \n Please see the Elbow cost plot for more details".format(k))
print("==============================================")
#------------------------
# Visualize Elbow
if visualize:
visualizeElbow(cost,
visualize,
exportImage,
outputPath,
estimated_num_cluster)
return k
else:
return k
| 45.494845
| 156
| 0.552005
|
3b354f9aa2bffcdb287989b607d2e93e609bec5e
| 6,709
|
py
|
Python
|
src/main_timm_version.py
|
HamaguchiKazuki/MahalanobisAD-pytorch
|
afdcf574865d6d021a13d81996701e0bcd2783b2
|
[
"Apache-2.0"
] | null | null | null |
src/main_timm_version.py
|
HamaguchiKazuki/MahalanobisAD-pytorch
|
afdcf574865d6d021a13d81996701e0bcd2783b2
|
[
"Apache-2.0"
] | null | null | null |
src/main_timm_version.py
|
HamaguchiKazuki/MahalanobisAD-pytorch
|
afdcf574865d6d021a13d81996701e0bcd2783b2
|
[
"Apache-2.0"
] | null | null | null |
import argparse
from typing import List
import numpy as np
import os
import pickle
from tqdm import tqdm
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
from sklearn.metrics import confusion_matrix
from sklearn.covariance import LedoitWolf
from sklearn.manifold import TSNE
from scipy.spatial.distance import mahalanobis
import matplotlib.pyplot as plt
import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader
from timm.models.efficientnet import EfficientNet
import timm
import datasets.mvtec as mvtec
def parse_args():
parser = argparse.ArgumentParser('MahalanobisAD')
parser.add_argument("-m", "--model_name", type=str, default='tf_efficientnet_b4')
parser.add_argument("-s", "--save_path", type=str, default="./result")
parser.add_argument("-p", "--pool_method", type=str, default="avg")
return parser.parse_args()
def main():
args = parse_args()
dim_reduction_model= TSNE(n_components=2)
# device setup
device = 'cuda' if torch.cuda.is_available() else 'cpu'
# load model
model = timm.create_model(args.model_name, pretrained=True)
model.to(device)
model.eval()
os.makedirs(os.path.join(args.save_path, 'temp'), exist_ok=True)
total_roc_auc = []
for class_name in mvtec.CLASS_NAMES:
train_dataset = mvtec.MVTecDataset(
class_name=class_name, is_train=True)
train_dataloader = DataLoader(
train_dataset, batch_size=32, pin_memory=True)
test_dataset = mvtec.MVTecDataset(
class_name=class_name, is_train=False)
test_dataloader = DataLoader(
test_dataset, batch_size=32, pin_memory=True)
train_outputs = [[] for _ in range(9)]
test_outputs = [[] for _ in range(9)]
youden_index_thresholds = []
# extract train set features
train_feat_filepath = os.path.join(
args.save_path, 'temp', 'train_%s_%s.pkl' % (class_name, args.model_name))
if not os.path.exists(train_feat_filepath):
for (x, y, mask) in tqdm(train_dataloader, '| feature extraction | train | %s |' % class_name):
# model prediction
with torch.no_grad():
feats = extract_features(x.to(device), model, args.pool_method)
for f_idx, feat in enumerate(feats):
train_outputs[f_idx].append(feat)
# fitting a multivariate gaussian to features extracted from every level of ImageNet pre-trained model
for t_idx, train_output in enumerate(train_outputs):
mean = torch.mean(
torch.cat(train_output, 0).squeeze(), dim=0).cpu().detach().numpy()
# covariance estimation by using the Ledoit. Wolf et al. method
cov = LedoitWolf().fit(
torch.cat(train_output, 0).squeeze().cpu().detach().numpy()).covariance_
train_outputs[t_idx] = [mean, cov]
# save extracted feature
with open(train_feat_filepath, 'wb') as f:
pickle.dump(train_outputs, f)
else:
print('load train set feature distribution from: %s' %
train_feat_filepath)
with open(train_feat_filepath, 'rb') as f:
train_outputs = pickle.load(f)
gt_list = []
# extract test set features
for (x, y, mask) in tqdm(test_dataloader, '| feature extraction | test | %s |' % class_name):
gt_list.extend(y.cpu().detach().numpy())
# model prediction
with torch.no_grad():
feats = extract_features(x.to(device), model, args.pool_method)
for feat_idx, feat in enumerate(feats):
test_outputs[feat_idx].append(feat)
for test_idx, test_output in enumerate(test_outputs):
test_outputs[test_idx] = torch.cat(
test_output, 0).squeeze().cpu().detach().numpy()
# calculate Mahalanobis distance per each level of EfficientNet
dist_list = []
each_level_dist = {}
for test_idx, test_output in enumerate(test_outputs):
mean = train_outputs[test_idx][0]
cov_inv = np.linalg.inv(train_outputs[test_idx][1])
print(f"level, {test_idx}, mean shape, {mean.shape}")
dist = [mahalanobis(sample, mean, cov_inv)
for sample in test_output]
each_level_dist[test_idx + 1] = np.array(dist)
dist_list.append(np.array(dist))
# Anomaly score is followed by unweighted summation of the Mahalanobis distances
# scores = np.sum(np.array(dist_list), axis=0)
scores = each_level_dist[7]
# calculate image-level ROC AUC score
fpr, tpr, thresholds = roc_curve(gt_list, scores)
roc_auc = roc_auc_score(gt_list, scores)
total_roc_auc.append(roc_auc)
youden_index_thresholds.append(thresholds[np.argmax(tpr-fpr)])
print(f"{class_name}, youden index, {thresholds[np.argmax(tpr-fpr)]:.1f}")
tn, fp, fn, tp = confusion_matrix(
gt_list, scores >= thresholds[np.argmax(tpr-fpr)]).flatten()
print(
f"conf matrix, tn, fp, fn, tp, {tn, fp, fn, tp}")
# print('%s ROCAUC: %.3f' % (class_name, roc_auc))
# plt.plot(fpr, tpr, label='%s ROCAUC: %.3f' % (class_name, roc_auc))
print('%s ROCAUC: %.3f, mean th: %.1f' % (class_name, roc_auc, np.mean(youden_index_thresholds)))
plt.plot(fpr, tpr, label='%s ROCAUC: %.3f, mean th:%.1f' %
(class_name, roc_auc, np.mean(youden_index_thresholds)))
print('Average ROCAUC: %.3f' % np.mean(total_roc_auc))
plt.title('Average image ROCAUC: %.3f' % np.mean(total_roc_auc))
plt.legend(loc='lower right')
plt.savefig(os.path.join(args.save_path, 'roc_curve_%s.png' %
args.model_name), dpi=200)
def extract_features(inputs: torch.Tensor,model:EfficientNet, pool_method:str)-> List:
""" Returns list of the feature at each level of the EfficientNet """
if pool_method == "avg":
pool = F.adaptive_avg_pool2d
elif pool_method == "max":
pool = F.adaptive_max_pool2d
feat_list = []
# Stem
x = model.conv_stem(inputs)
x = model.bn1(x)
x = model.act1(x)
feat_list.append(pool(x, 1))
# Blocks: 2~8 layer
for _, block_layer in enumerate(model.blocks, start=2):
x = block_layer(x)
feat_list.append(pool(x, 1))
# Head
x = model.conv_head(x)
x = model.bn2(x)
x = model.act2(x)
feat_list.append(pool(x, 1))
return feat_list
if __name__ == '__main__':
main()
| 37.272222
| 114
| 0.629155
|
9c8a38c3ecd6e45fbf3470c0d50c68a4e21b6639
| 12,506
|
py
|
Python
|
hwilib/cli.py
|
tkijewski/HWI
|
0b78229c94dce72f5b33ee4555ba78f25f64d2e1
|
[
"MIT"
] | null | null | null |
hwilib/cli.py
|
tkijewski/HWI
|
0b78229c94dce72f5b33ee4555ba78f25f64d2e1
|
[
"MIT"
] | null | null | null |
hwilib/cli.py
|
tkijewski/HWI
|
0b78229c94dce72f5b33ee4555ba78f25f64d2e1
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python3
from .commands import backup_device, displayaddress, enumerate, find_device, \
get_client, getmasterxpub, getxpub, getkeypool, prompt_pin, restore_device, send_pin, setup_device, \
signmessage, signtx, wipe_device, install_udev_rules
from .errors import (
handle_errors,
DEVICE_CONN_ERROR,
HELP_TEXT,
HWWError,
MISSING_ARGUMENTS,
NO_DEVICE_TYPE,
NO_PASSWORD,
UNAVAILABLE_ACTION,
UNKNWON_DEVICE_TYPE,
UNKNOWN_ERROR
)
from . import __version__
import argparse
import getpass
import logging
import json
import sys
def backup_device_handler(args, client):
return backup_device(client, label=args.label, backup_passphrase=args.backup_passphrase)
def displayaddress_handler(args, client):
return displayaddress(client, desc=args.desc, path=args.path, sh_wpkh=args.sh_wpkh, wpkh=args.wpkh)
def enumerate_handler(args):
return enumerate(password=args.password)
def getmasterxpub_handler(args, client):
return getmasterxpub(client)
def getxpub_handler(args, client):
return getxpub(client, path=args.path)
def getkeypool_handler(args, client):
return getkeypool(client, path=args.path, start=args.start, end=args.end, internal=args.internal, keypool=args.keypool, account=args.account, sh_wpkh=args.sh_wpkh, wpkh=args.wpkh)
def restore_device_handler(args, client):
if args.interactive:
return restore_device(client, label=args.label)
return {'error': 'restore requires interactive mode', 'code': UNAVAILABLE_ACTION}
def setup_device_handler(args, client):
if args.interactive:
return setup_device(client, label=args.label, backup_passphrase=args.backup_passphrase)
return {'error': 'setup requires interactive mode', 'code': UNAVAILABLE_ACTION}
def signmessage_handler(args, client):
return signmessage(client, message=args.message, path=args.path)
def signtx_handler(args, client):
return signtx(client, psbt=args.psbt)
def wipe_device_handler(args, client):
return wipe_device(client)
def prompt_pin_handler(args, client):
return prompt_pin(client)
def send_pin_handler(args, client):
return send_pin(client, pin=args.pin)
def install_udev_rules_handler(args):
return install_udev_rules('udev', args.location)
class HWIArgumentParser(argparse.ArgumentParser):
def print_usage(self, file=None):
if file is None:
file = sys.stderr
super().print_usage(file)
def print_help(self, file=None):
if file is None:
file = sys.stderr
super().print_help(file)
error = {'error': 'Help text requested', 'code': HELP_TEXT}
print(json.dumps(error))
def error(self, message):
self.print_usage(sys.stderr)
args = {'prog': self.prog, 'message': message}
error = {'error': '%(prog)s: error: %(message)s' % args, 'code': MISSING_ARGUMENTS}
print(json.dumps(error))
self.exit(2)
def process_commands(cli_args):
parser = HWIArgumentParser(description='Hardware Wallet Interface, version {}.\nAccess and send commands to a hardware wallet device. Responses are in JSON format.'.format(__version__), formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('--device-path', '-d', help='Specify the device path of the device to connect to')
parser.add_argument('--device-type', '-t', help='Specify the type of device that will be connected. If `--device-path` not given, the first device of this type enumerated is used.')
parser.add_argument('--password', '-p', help='Device password if it has one (e.g. DigitalBitbox)', default='')
parser.add_argument('--stdinpass', help='Enter the device password on the command line', action='store_true')
parser.add_argument('--testnet', help='Use testnet prefixes', action='store_true')
parser.add_argument('--debug', help='Print debug statements', action='store_true')
parser.add_argument('--fingerprint', '-f', help='Specify the device to connect to using the first 4 bytes of the hash160 of the master public key. It will connect to the first device that matches this fingerprint.')
parser.add_argument('--version', action='version', version='%(prog)s {}'.format(__version__))
parser.add_argument('--stdin', help='Enter commands and arguments via stdin', action='store_true')
parser.add_argument('--interactive', '-i', help='Use some commands interactively. Currently required for all device configuration commands', action='store_true')
subparsers = parser.add_subparsers(description='Commands', dest='command')
# work-around to make subparser required
subparsers.required = True
enumerate_parser = subparsers.add_parser('enumerate', help='List all available devices')
enumerate_parser.set_defaults(func=enumerate_handler)
getmasterxpub_parser = subparsers.add_parser('getmasterxpub', help='Get the extended public key at m/44\'/0\'/0\'')
getmasterxpub_parser.set_defaults(func=getmasterxpub_handler)
signtx_parser = subparsers.add_parser('signtx', help='Sign a PSBT')
signtx_parser.add_argument('psbt', help='The Partially Signed Bitcoin Transaction to sign')
signtx_parser.set_defaults(func=signtx_handler)
getxpub_parser = subparsers.add_parser('getxpub', help='Get an extended public key')
getxpub_parser.add_argument('path', help='The BIP 32 derivation path to derive the key at')
getxpub_parser.set_defaults(func=getxpub_handler)
signmsg_parser = subparsers.add_parser('signmessage', help='Sign a message')
signmsg_parser.add_argument('message', help='The message to sign')
signmsg_parser.add_argument('path', help='The BIP 32 derivation path of the key to sign the message with')
signmsg_parser.set_defaults(func=signmessage_handler)
getkeypool_parser = subparsers.add_parser('getkeypool', help='Get JSON array of keys that can be imported to Bitcoin Core with importmulti')
getkeypool_parser.add_argument('--keypool', action='store_true', help='Indicates that the keys are to be imported to the keypool')
getkeypool_parser.add_argument('--internal', action='store_true', help='Indicates that the keys are change keys')
getkeypool_parser.add_argument('--sh_wpkh', action='store_true', help='Generate p2sh-nested segwit addresses (default path: m/49h/0h/0h/[0,1]/*)')
getkeypool_parser.add_argument('--wpkh', action='store_true', help='Generate bech32 addresses (default path: m/84h/0h/0h/[0,1]/*)')
getkeypool_parser.add_argument('--account', help='BIP43 account (default: 0)', type=int, default=0)
getkeypool_parser.add_argument('--path', help='Derivation path, default follows BIP43 convention, e.g. m/84h/0h/0h/1/* with --wpkh --internal. If this argument and --internal is not given, both internal and external keypools will be returned.')
getkeypool_parser.add_argument('start', type=int, help='The index to start at.')
getkeypool_parser.add_argument('end', type=int, help='The index to end at.')
getkeypool_parser.set_defaults(func=getkeypool_handler)
displayaddr_parser = subparsers.add_parser('displayaddress', help='Display an address')
group = displayaddr_parser.add_mutually_exclusive_group(required=True)
group.add_argument('--desc', help='Output Descriptor. E.g. wpkh([00000000/84h/0h/0h]xpub.../0/0), where 00000000 must match --fingerprint and xpub can be obtained with getxpub. See doc/descriptors.md in Bitcoin Core')
group.add_argument('--path', help='The BIP 32 derivation path of the key embedded in the address, default follows BIP43 convention, e.g. m/84h/0h/0h/1/*')
displayaddr_parser.add_argument('--sh_wpkh', action='store_true', help='Display the p2sh-nested segwit address associated with this key path')
displayaddr_parser.add_argument('--wpkh', action='store_true', help='Display the bech32 version of the address associated with this key path')
displayaddr_parser.set_defaults(func=displayaddress_handler)
setupdev_parser = subparsers.add_parser('setup', help='Setup a device. Passphrase protection uses the password given by -p. Requires interactive mode')
setupdev_parser.add_argument('--label', '-l', help='The name to give to the device', default='')
setupdev_parser.add_argument('--backup_passphrase', '-b', help='The passphrase to use for the backup, if applicable', default='')
setupdev_parser.set_defaults(func=setup_device_handler)
wipedev_parser = subparsers.add_parser('wipe', help='Wipe a device')
wipedev_parser.set_defaults(func=wipe_device_handler)
restore_parser = subparsers.add_parser('restore', help='Initiate the device restoring process. Requires interactive mode')
restore_parser.add_argument('--label', '-l', help='The name to give to the device', default='')
restore_parser.set_defaults(func=restore_device_handler)
backup_parser = subparsers.add_parser('backup', help='Initiate the device backup creation process')
backup_parser.add_argument('--label', '-l', help='The name to give to the device', default='')
backup_parser.add_argument('--backup_passphrase', '-b', help='The passphrase to use for the backup, if applicable', default='')
backup_parser.set_defaults(func=backup_device_handler)
promptpin_parser = subparsers.add_parser('promptpin', help='Have the device prompt for your PIN')
promptpin_parser.set_defaults(func=prompt_pin_handler)
sendpin_parser = subparsers.add_parser('sendpin', help='Send the numeric positions for your PIN to the device')
sendpin_parser.add_argument('pin', help='The numeric positions of the PIN')
sendpin_parser.set_defaults(func=send_pin_handler)
if sys.platform.startswith("linux"):
udevrules_parser = subparsers.add_parser('installudevrules', help='Install and load the udev rule files for the hardware wallet devices')
udevrules_parser.add_argument('--location', help='The path where the udev rules files will be copied', default='/etc/udev/rules.d/')
udevrules_parser.set_defaults(func=install_udev_rules_handler)
if any(arg == '--stdin' for arg in cli_args):
blank_count = 0
while True:
try:
line = input()
# Exit loop when we see 2 consecutive newlines (i.e. an empty line)
if line == '':
break
# Split the line and append it to the cli args
import shlex
cli_args.extend(shlex.split(line))
except EOFError:
# If we see EOF, stop taking input
break
# Parse arguments again for anything entered over stdin
args = parser.parse_args(cli_args)
device_path = args.device_path
device_type = args.device_type
password = args.password
command = args.command
result = {}
# Setup debug logging
logging.basicConfig(level=logging.DEBUG if args.debug else logging.WARNING)
# Enter the password on stdin
if args.stdinpass:
password = getpass.getpass('Enter your device password: ')
args.password = password
# List all available hardware wallet devices
if command == 'enumerate':
return args.func(args)
# Install the devices udev rules for Linux
if command == 'installudevrules':
with handle_errors(msg="installudevrules failed:", result=result):
result = args.func(args)
return result
# Auto detect if we are using fingerprint or type to identify device
if args.fingerprint or (args.device_type and not args.device_path):
client = find_device(args.device_path, args.password, args.device_type, args.fingerprint)
if not client:
return {'error':'Could not find device with specified fingerprint','code':DEVICE_CONN_ERROR}
elif args.device_type and args.device_path:
with handle_errors(result=result, code=DEVICE_CONN_ERROR):
client = get_client(device_type, device_path, password)
if 'error' in result:
return result
else:
return {'error':'You must specify a device type or fingerprint for all commands except enumerate','code': NO_DEVICE_TYPE}
client.is_testnet = args.testnet
# Do the commands
with handle_errors(result=result, debug=args.debug):
result = args.func(args, client)
with handle_errors(result=result, debug=args.debug):
client.close()
return result
def main():
result = process_commands(sys.argv[1:])
print(json.dumps(result))
| 50.837398
| 248
| 0.726131
|
b9201724009d8bdf16fedd23e1bd3e1cb23e7b9c
| 2,018
|
py
|
Python
|
PP4E/Examples/PP4E/Gui/Tools/threadtools-test-classes.py
|
BeacherHou/Python-_Markdown-
|
015d79a02d32f49395b80ca10919b3a09b72c4df
|
[
"MIT"
] | null | null | null |
PP4E/Examples/PP4E/Gui/Tools/threadtools-test-classes.py
|
BeacherHou/Python-_Markdown-
|
015d79a02d32f49395b80ca10919b3a09b72c4df
|
[
"MIT"
] | null | null | null |
PP4E/Examples/PP4E/Gui/Tools/threadtools-test-classes.py
|
BeacherHou/Python-_Markdown-
|
015d79a02d32f49395b80ca10919b3a09b72c4df
|
[
"MIT"
] | null | null | null |
# tests thread callback queue, but uses class bound methods for action and callbacks
import time
from threadtools import threadChecker, startThread
from tkinter.scrolledtext import ScrolledText
class MyGUI:
def __init__(self, reps=3):
self.reps = reps # uses default Tk root
self.text = ScrolledText() # save widget as state
self.text.pack()
threadChecker(self.text) # start thread check loop
self.text.bind('<Button-1>', # 3.x need list for map, range ok
lambda event: list(map(self.onEvent, range(6))) )
def onEvent(self, i): # code that spawns thread
myname = 'thread-%s' % i
startThread(
action = self.threadaction,
args = (i, ),
context = (myname,),
onExit = self.threadexit,
onFail = self.threadfail,
onProgress = self.threadprogress)
# thread's main action
def threadaction(self, id, progress): # what the thread does
for i in range(self.reps): # access to object state here
time.sleep(1)
if progress: progress(i) # progress callback: queued
if id % 2 == 1: raise Exception # odd numbered: fail
# thread callbacks: dispatched off queue in main thread
def threadexit(self, myname):
self.text.insert('end', '%s\texit\n' % myname)
self.text.see('end')
def threadfail(self, exc_info, myname): # have access to self state
self.text.insert('end', '%s\tfail\t%s\n' % (myname, exc_info[0]))
self.text.see('end')
def threadprogress(self, count, myname):
self.text.insert('end', '%s\tprog\t%s\n' % (myname, count))
self.text.see('end')
self.text.update() # works here: run in main thread
if __name__ == '__main__': MyGUI().text.mainloop()
| 42.041667
| 85
| 0.557483
|
cbaa53098a9be915ace17bee930847bbf004f640
| 189
|
py
|
Python
|
Python 3 - Estrutura de Controle/Desafio 52.py
|
Paimonz/Python-Estudos
|
bbc4d7abb2eebff7f160eac54a290fd9ea7a1cff
|
[
"MIT"
] | null | null | null |
Python 3 - Estrutura de Controle/Desafio 52.py
|
Paimonz/Python-Estudos
|
bbc4d7abb2eebff7f160eac54a290fd9ea7a1cff
|
[
"MIT"
] | null | null | null |
Python 3 - Estrutura de Controle/Desafio 52.py
|
Paimonz/Python-Estudos
|
bbc4d7abb2eebff7f160eac54a290fd9ea7a1cff
|
[
"MIT"
] | null | null | null |
n = int(input ('Digite um número inteiro:'))
if n % 1 == 0 and n % n == 0 and n % 2 == 1:
print('O número {} é primo!'.format(n))
else:
print (' número {} NÃO é primo!'.format(n))
| 27
| 47
| 0.544974
|
adbaa09e900a7bbf942454d968c965b560041620
| 1,874
|
py
|
Python
|
lib/rucio/common/rse_attributes.py
|
ejr004/rucio
|
81620cc54e3536e3656dfc83a4563da4ee39247e
|
[
"Apache-2.0"
] | null | null | null |
lib/rucio/common/rse_attributes.py
|
ejr004/rucio
|
81620cc54e3536e3656dfc83a4563da4ee39247e
|
[
"Apache-2.0"
] | null | null | null |
lib/rucio/common/rse_attributes.py
|
ejr004/rucio
|
81620cc54e3536e3656dfc83a4563da4ee39247e
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2015-2022 CERN
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# - Wen Guan <wen.guan@cern.ch>, 2015
# - Mario Lassnig <mario.lassnig@cern.ch>, 2017
# - Hannes Hansen <hannes.jakob.hansen@cern.ch>, 2018
# - Andrew Lister <andrew.lister@stfc.ac.uk>, 2019
# - Brandon White <bjwhite@fnal.gov>, 2019
# - Martin Barisits <martin.barisits@cern.ch>, 2020
# - Radu Carpa <radu.carpa@cern.ch>, 2022
"""
methods to get closeness between sites
"""
import logging
import traceback
from dogpile.cache.api import NoValue
from rucio.core import rse as rse_core
from rucio.common.cache import make_region_memcached
REGION = make_region_memcached(expiration_time=3600)
def get_rse_attributes(rse_id, session=None):
"""
List rse attributes
:param rse: the rse name.
:param rse_id: The RSE id.
:param session: The database session in use.
:returns: A dictionary with RSE attributes for a RSE.
"""
key = 'rse_attributes_%s' % (rse_id)
result = REGION.get(key)
if isinstance(result, NoValue):
try:
result = None
result = rse_core.list_rse_attributes(rse_id=rse_id, session=session)
REGION.set(key, result)
except:
logging.warning("Failed to get RSE %s attributes, error: %s" % (rse_id, traceback.format_exc()))
return result
| 30.721311
| 108
| 0.702241
|
f20ddc80c73201d4d9d0a6b0a1cef12bc5e68703
| 1,070
|
py
|
Python
|
Greedy/45_Jump_Game_II.py
|
hren-ron/LeetCode
|
3ba2766f8e6ad2bfb5c9686b362f000824e78474
|
[
"Apache-2.0"
] | null | null | null |
Greedy/45_Jump_Game_II.py
|
hren-ron/LeetCode
|
3ba2766f8e6ad2bfb5c9686b362f000824e78474
|
[
"Apache-2.0"
] | null | null | null |
Greedy/45_Jump_Game_II.py
|
hren-ron/LeetCode
|
3ba2766f8e6ad2bfb5c9686b362f000824e78474
|
[
"Apache-2.0"
] | null | null | null |
'''
Given an array of non-negative integers, you are initially positioned at the first index of the array.
Each element in the array represents your maximum jump length at that position.
Your goal is to reach the last index in the minimum number of jumps.
Example:
Input: [2,3,1,1,4]
Output: 2
Explanation: The minimum number of jumps to reach the last index is 2.
Jump 1 step from index 0 to 1, then 3 steps to the last index.
Note:
You can assume that you can always reach the last index.
'''
class Solution {
public:
int jump(vector<int>& nums) {
int n=nums.size();
if(n==0 || n==1)
return 0;
int *jump=new int[n];
for(int i=0;i<n;i++){
jump[i]=-1;
}
jump[0]=0;
for(int i=0;i<n;i++){
int j=min(n-1,i+nums[i]);
for(;j>i;j--){
if(jump[j]!=-1)
break;
jump[j]=jump[i]+1;
if(j==n-1)
return jump[j];
}
}
}
};
| 22.291667
| 102
| 0.514953
|
e3e90a8224766e0801460c906b7b0a3aabf3c006
| 3,161
|
py
|
Python
|
django_app/todo/todo/settings.py
|
the-vikas/ToDo-List
|
eadf91792e251085f9701c21369e3981123a83a8
|
[
"bzip2-1.0.6"
] | null | null | null |
django_app/todo/todo/settings.py
|
the-vikas/ToDo-List
|
eadf91792e251085f9701c21369e3981123a83a8
|
[
"bzip2-1.0.6"
] | null | null | null |
django_app/todo/todo/settings.py
|
the-vikas/ToDo-List
|
eadf91792e251085f9701c21369e3981123a83a8
|
[
"bzip2-1.0.6"
] | null | null | null |
"""
Django settings for todo project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'gveeatj_ud6yk$@c!49wzbzsn=hgl1m3c=7#yvyxlx60#vm+oi'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'todo_list',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'todo.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'todo.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR,'static'),
]
| 25.288
| 91
| 0.694401
|
0181b3fc4cd5efc6cd8e097ee0d6fc4fcce0a812
| 451
|
py
|
Python
|
great_expectations/render/renderer/__init__.py
|
ncsu-las/great_expectations
|
b4606e184eb97dd84787d7f56699fed708c7b731
|
[
"Apache-2.0"
] | null | null | null |
great_expectations/render/renderer/__init__.py
|
ncsu-las/great_expectations
|
b4606e184eb97dd84787d7f56699fed708c7b731
|
[
"Apache-2.0"
] | null | null | null |
great_expectations/render/renderer/__init__.py
|
ncsu-las/great_expectations
|
b4606e184eb97dd84787d7f56699fed708c7b731
|
[
"Apache-2.0"
] | null | null | null |
from .column_section_renderer import (
ProfilingResultsColumnSectionRenderer,
ValidationResultsColumnSectionRenderer,
ExpectationSuiteColumnSectionRenderer,
)
from .other_section_renderer import (
ProfilingResultsOverviewSectionRenderer
)
from .page_renderer import (
ProfilingResultsPageRenderer,
ExpectationSuitePageRenderer,
ValidationResultsPageRenderer
)
from .site_index_page_renderer import SiteIndexPageRenderer
| 25.055556
| 59
| 0.840355
|
df024893307f81d3c7c616526c7fb739e04ca2d1
| 397
|
py
|
Python
|
leads/migrations/0003_auto_20190211_1142.py
|
exenin/Django-CRM
|
009461beb2279590c656465c6b47cd2e1e5aaa70
|
[
"MIT"
] | 2
|
2020-07-28T12:32:57.000Z
|
2020-08-20T11:47:07.000Z
|
leads/migrations/0003_auto_20190211_1142.py
|
exenin/Django-CRM
|
009461beb2279590c656465c6b47cd2e1e5aaa70
|
[
"MIT"
] | 8
|
2020-06-05T20:58:52.000Z
|
2022-03-11T23:48:48.000Z
|
leads/migrations/0003_auto_20190211_1142.py
|
exenin/Django-CRM
|
009461beb2279590c656465c6b47cd2e1e5aaa70
|
[
"MIT"
] | 5
|
2020-03-21T09:55:05.000Z
|
2020-04-03T06:51:02.000Z
|
# Generated by Django 2.1.5 on 2019-02-11 06:12
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('leads', '0002_lead_tags'),
]
operations = [
migrations.AlterField(
model_name='lead',
name='email',
field=models.EmailField(blank=True, max_length=254, null=True),
),
]
| 20.894737
| 75
| 0.594458
|
744c5e3fc853e5dc2a65c8e3b1d6d952a9a0c656
| 2,657
|
py
|
Python
|
infoxlm/fairseq/fairseq/binarizer.py
|
Maria-philna/unilm
|
5550a335c6d2ae5838b1a90e50cb46f81edcd50f
|
[
"MIT"
] | 5,129
|
2019-09-30T11:21:03.000Z
|
2022-03-31T22:35:12.000Z
|
infoxlm/fairseq/fairseq/binarizer.py
|
Maria-philna/unilm
|
5550a335c6d2ae5838b1a90e50cb46f81edcd50f
|
[
"MIT"
] | 604
|
2019-10-05T00:39:46.000Z
|
2022-03-31T11:12:07.000Z
|
infoxlm/fairseq/fairseq/binarizer.py
|
Maria-philna/unilm
|
5550a335c6d2ae5838b1a90e50cb46f81edcd50f
|
[
"MIT"
] | 1,034
|
2019-09-30T15:01:32.000Z
|
2022-03-31T06:14:50.000Z
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from collections import Counter
import os
from fairseq.tokenizer import tokenize_line
def safe_readline(f):
pos = f.tell()
while True:
try:
return f.readline()
except UnicodeDecodeError:
pos -= 1
f.seek(pos) # search where this character begins
class Binarizer:
@staticmethod
def binarize(filename, dict, consumer, tokenize=tokenize_line, append_eos=True, reverse_order=False,
offset=0, end=-1):
nseq, ntok = 0, 0
replaced = Counter()
def replaced_consumer(word, idx):
if idx == dict.unk_index and word != dict.unk_word:
replaced.update([word])
with open(filename, 'r', encoding='utf-8') as f:
f.seek(offset)
# next(f) breaks f.tell(), hence readline() must be used
line = safe_readline(f)
while line:
if end > 0 and f.tell() > end:
break
ids = dict.encode_line(
line=line,
line_tokenizer=tokenize,
add_if_not_exist=False,
consumer=replaced_consumer,
append_eos=append_eos,
reverse_order=reverse_order,
)
nseq += 1
ntok += len(ids)
consumer(ids)
line = f.readline()
return {'nseq': nseq, 'nunk': sum(replaced.values()), 'ntok': ntok, 'replaced': replaced}
@staticmethod
def binarize_alignments(filename, alignment_parser, consumer, offset=0, end=-1):
nseq = 0
with open(filename, 'r') as f:
f.seek(offset)
line = safe_readline(f)
while line:
if end > 0 and f.tell() > end:
break
ids = alignment_parser(line)
nseq += 1
consumer(ids)
line = f.readline()
return {'nseq': nseq}
@staticmethod
def find_offsets(filename, num_chunks):
with open(filename, 'r', encoding='utf-8') as f:
size = os.fstat(f.fileno()).st_size
chunk_size = size // num_chunks
offsets = [0 for _ in range(num_chunks + 1)]
for i in range(1, num_chunks):
f.seek(chunk_size * i)
safe_readline(f)
offsets[i] = f.tell()
return offsets
| 32.402439
| 104
| 0.521641
|
ac397c39090e60c45aaec83f20f0c525a67ea36a
| 1,628
|
py
|
Python
|
django_cas_ng/migrations/0001_initial.py
|
forcityplatform/django-cas-ng-mutu
|
d798a33c382b174e10d525261c89bbc79ed95bc4
|
[
"MIT"
] | 2
|
2021-04-23T09:16:09.000Z
|
2021-06-14T14:35:22.000Z
|
django_cas_ng/migrations/0001_initial.py
|
forcityplatform/django-cas-ng-mutu
|
d798a33c382b174e10d525261c89bbc79ed95bc4
|
[
"MIT"
] | 1
|
2018-10-26T23:29:46.000Z
|
2018-10-26T23:29:46.000Z
|
django_cas_ng/migrations/0001_initial.py
|
forcityplatform/django-cas-ng-mutu
|
d798a33c382b174e10d525261c89bbc79ed95bc4
|
[
"MIT"
] | 1
|
2020-04-15T11:36:07.000Z
|
2020-04-15T11:36:07.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-01-13 18:08
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='ProxyGrantingTicket',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('session_key', models.CharField(blank=True, max_length=255, null=True)),
('pgtiou', models.CharField(blank=True, max_length=255, null=True)),
('pgt', models.CharField(blank=True, max_length=255, null=True)),
('date', models.DateTimeField(auto_now_add=True)),
('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='SessionTicket',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('session_key', models.CharField(max_length=255)),
('ticket', models.CharField(max_length=255)),
],
),
migrations.AlterUniqueTogether(
name='proxygrantingticket',
unique_together=set([('session_key', 'user')]),
),
]
| 37.860465
| 159
| 0.613636
|
b68123b7fe0f2053712bceab15706dfb5942c062
| 68,614
|
py
|
Python
|
versioneer.py
|
klauer/archstats
|
1ba388b34177c1e6f1960fd31cc6bb3d45e8a98a
|
[
"BSD-3-Clause-LBNL"
] | 1
|
2021-08-25T10:53:56.000Z
|
2021-08-25T10:53:56.000Z
|
versioneer.py
|
klauer/archstats
|
1ba388b34177c1e6f1960fd31cc6bb3d45e8a98a
|
[
"BSD-3-Clause-LBNL"
] | 22
|
2020-09-16T01:25:02.000Z
|
2022-03-07T17:51:49.000Z
|
versioneer.py
|
klauer/archstats
|
1ba388b34177c1e6f1960fd31cc6bb3d45e8a98a
|
[
"BSD-3-Clause-LBNL"
] | 2
|
2020-07-16T22:29:34.000Z
|
2021-04-15T22:07:53.000Z
|
# Version: 0.18
"""The Versioneer - like a rocketeer, but for versions.
The Versioneer
==============
* like a rocketeer, but for versions!
* https://github.com/warner/python-versioneer
* Brian Warner
* License: Public Domain
* Compatible With: python2.6, 2.7, 3.2, 3.3, 3.4, 3.5, 3.6, and pypy
* [![Latest Version]
(https://pypip.in/version/versioneer/badge.svg?style=flat)
](https://pypi.python.org/pypi/versioneer/)
* [![Build Status]
(https://travis-ci.org/warner/python-versioneer.png?branch=master)
](https://travis-ci.org/warner/python-versioneer)
This is a tool for managing a recorded version number in distutils-based
python projects. The goal is to remove the tedious and error-prone "update
the embedded version string" step from your release process. Making a new
release should be as easy as recording a new tag in your version-control
system, and maybe making new tarballs.
## Quick Install
* `pip install versioneer` to somewhere to your $PATH
* add a `[versioneer]` section to your setup.cfg (see below)
* run `versioneer install` in your source tree, commit the results
## Version Identifiers
Source trees come from a variety of places:
* a version-control system checkout (mostly used by developers)
* a nightly tarball, produced by build automation
* a snapshot tarball, produced by a web-based VCS browser, like github's
"tarball from tag" feature
* a release tarball, produced by "setup.py sdist", distributed through PyPI
Within each source tree, the version identifier (either a string or a number,
this tool is format-agnostic) can come from a variety of places:
* ask the VCS tool itself, e.g. "git describe" (for checkouts), which knows
about recent "tags" and an absolute revision-id
* the name of the directory into which the tarball was unpacked
* an expanded VCS keyword ($Id$, etc)
* a `_version.py` created by some earlier build step
For released software, the version identifier is closely related to a VCS
tag. Some projects use tag names that include more than just the version
string (e.g. "myproject-1.2" instead of just "1.2"), in which case the tool
needs to strip the tag prefix to extract the version identifier. For
unreleased software (between tags), the version identifier should provide
enough information to help developers recreate the same tree, while also
giving them an idea of roughly how old the tree is (after version 1.2, before
version 1.3). Many VCS systems can report a description that captures this,
for example `git describe --tags --dirty --always` reports things like
"0.7-1-g574ab98-dirty" to indicate that the checkout is one revision past the
0.7 tag, has a unique revision id of "574ab98", and is "dirty" (it has
uncommitted changes.
The version identifier is used for multiple purposes:
* to allow the module to self-identify its version: `myproject.__version__`
* to choose a name and prefix for a 'setup.py sdist' tarball
## Theory of Operation
Versioneer works by adding a special `_version.py` file into your source
tree, where your `__init__.py` can import it. This `_version.py` knows how to
dynamically ask the VCS tool for version information at import time.
`_version.py` also contains `$Revision$` markers, and the installation
process marks `_version.py` to have this marker rewritten with a tag name
during the `git archive` command. As a result, generated tarballs will
contain enough information to get the proper version.
To allow `setup.py` to compute a version too, a `versioneer.py` is added to
the top level of your source tree, next to `setup.py` and the `setup.cfg`
that configures it. This overrides several distutils/setuptools commands to
compute the version when invoked, and changes `setup.py build` and `setup.py
sdist` to replace `_version.py` with a small static file that contains just
the generated version data.
## Installation
See [INSTALL.md](./INSTALL.md) for detailed installation instructions.
## Version-String Flavors
Code which uses Versioneer can learn about its version string at runtime by
importing `_version` from your main `__init__.py` file and running the
`get_versions()` function. From the "outside" (e.g. in `setup.py`), you can
import the top-level `versioneer.py` and run `get_versions()`.
Both functions return a dictionary with different flavors of version
information:
* `['version']`: A condensed version string, rendered using the selected
style. This is the most commonly used value for the project's version
string. The default "pep440" style yields strings like `0.11`,
`0.11+2.g1076c97`, or `0.11+2.g1076c97.dirty`. See the "Styles" section
below for alternative styles.
* `['full-revisionid']`: detailed revision identifier. For Git, this is the
full SHA1 commit id, e.g. "1076c978a8d3cfc70f408fe5974aa6c092c949ac".
* `['date']`: Date and time of the latest `HEAD` commit. For Git, it is the
commit date in ISO 8601 format. This will be None if the date is not
available.
* `['dirty']`: a boolean, True if the tree has uncommitted changes. Note that
this is only accurate if run in a VCS checkout, otherwise it is likely to
be False or None
* `['error']`: if the version string could not be computed, this will be set
to a string describing the problem, otherwise it will be None. It may be
useful to throw an exception in setup.py if this is set, to avoid e.g.
creating tarballs with a version string of "unknown".
Some variants are more useful than others. Including `full-revisionid` in a
bug report should allow developers to reconstruct the exact code being tested
(or indicate the presence of local changes that should be shared with the
developers). `version` is suitable for display in an "about" box or a CLI
`--version` output: it can be easily compared against release notes and lists
of bugs fixed in various releases.
The installer adds the following text to your `__init__.py` to place a basic
version in `YOURPROJECT.__version__`:
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
## Styles
The setup.cfg `style=` configuration controls how the VCS information is
rendered into a version string.
The default style, "pep440", produces a PEP440-compliant string, equal to the
un-prefixed tag name for actual releases, and containing an additional "local
version" section with more detail for in-between builds. For Git, this is
TAG[+DISTANCE.gHEX[.dirty]] , using information from `git describe --tags
--dirty --always`. For example "0.11+2.g1076c97.dirty" indicates that the
tree is like the "1076c97" commit but has uncommitted changes (".dirty"), and
that this commit is two revisions ("+2") beyond the "0.11" tag. For released
software (exactly equal to a known tag), the identifier will only contain the
stripped tag, e.g. "0.11".
Other styles are available. See [details.md](details.md) in the Versioneer
source tree for descriptions.
## Debugging
Versioneer tries to avoid fatal errors: if something goes wrong, it will tend
to return a version of "0+unknown". To investigate the problem, run `setup.py
version`, which will run the version-lookup code in a verbose mode, and will
display the full contents of `get_versions()` (including the `error` string,
which may help identify what went wrong).
## Known Limitations
Some situations are known to cause problems for Versioneer. This details the
most significant ones. More can be found on Github
[issues page](https://github.com/warner/python-versioneer/issues).
### Subprojects
Versioneer has limited support for source trees in which `setup.py` is not in
the root directory (e.g. `setup.py` and `.git/` are *not* siblings). The are
two common reasons why `setup.py` might not be in the root:
* Source trees which contain multiple subprojects, such as
[Buildbot](https://github.com/buildbot/buildbot), which contains both
"master" and "slave" subprojects, each with their own `setup.py`,
`setup.cfg`, and `tox.ini`. Projects like these produce multiple PyPI
distributions (and upload multiple independently-installable tarballs).
* Source trees whose main purpose is to contain a C library, but which also
provide bindings to Python (and perhaps other langauges) in subdirectories.
Versioneer will look for `.git` in parent directories, and most operations
should get the right version string. However `pip` and `setuptools` have bugs
and implementation details which frequently cause `pip install .` from a
subproject directory to fail to find a correct version string (so it usually
defaults to `0+unknown`).
`pip install --editable .` should work correctly. `setup.py install` might
work too.
Pip-8.1.1 is known to have this problem, but hopefully it will get fixed in
some later version.
[Bug #38](https://github.com/warner/python-versioneer/issues/38) is tracking
this issue. The discussion in
[PR #61](https://github.com/warner/python-versioneer/pull/61) describes the
issue from the Versioneer side in more detail.
[pip PR#3176](https://github.com/pypa/pip/pull/3176) and
[pip PR#3615](https://github.com/pypa/pip/pull/3615) contain work to improve
pip to let Versioneer work correctly.
Versioneer-0.16 and earlier only looked for a `.git` directory next to the
`setup.cfg`, so subprojects were completely unsupported with those releases.
### Editable installs with setuptools <= 18.5
`setup.py develop` and `pip install --editable .` allow you to install a
project into a virtualenv once, then continue editing the source code (and
test) without re-installing after every change.
"Entry-point scripts" (`setup(entry_points={"console_scripts": ..})`) are a
convenient way to specify executable scripts that should be installed along
with the python package.
These both work as expected when using modern setuptools. When using
setuptools-18.5 or earlier, however, certain operations will cause
`pkg_resources.DistributionNotFound` errors when running the entrypoint
script, which must be resolved by re-installing the package. This happens
when the install happens with one version, then the egg_info data is
regenerated while a different version is checked out. Many setup.py commands
cause egg_info to be rebuilt (including `sdist`, `wheel`, and installing into
a different virtualenv), so this can be surprising.
[Bug #83](https://github.com/warner/python-versioneer/issues/83) describes
this one, but upgrading to a newer version of setuptools should probably
resolve it.
### Unicode version strings
While Versioneer works (and is continually tested) with both Python 2 and
Python 3, it is not entirely consistent with bytes-vs-unicode distinctions.
Newer releases probably generate unicode version strings on py2. It's not
clear that this is wrong, but it may be surprising for applications when then
write these strings to a network connection or include them in bytes-oriented
APIs like cryptographic checksums.
[Bug #71](https://github.com/warner/python-versioneer/issues/71) investigates
this question.
## Updating Versioneer
To upgrade your project to a new release of Versioneer, do the following:
* install the new Versioneer (`pip install -U versioneer` or equivalent)
* edit `setup.cfg`, if necessary, to include any new configuration settings
indicated by the release notes. See [UPGRADING](./UPGRADING.md) for details.
* re-run `versioneer install` in your source tree, to replace
`SRC/_version.py`
* commit any changed files
## Future Directions
This tool is designed to make it easily extended to other version-control
systems: all VCS-specific components are in separate directories like
src/git/ . The top-level `versioneer.py` script is assembled from these
components by running make-versioneer.py . In the future, make-versioneer.py
will take a VCS name as an argument, and will construct a version of
`versioneer.py` that is specific to the given VCS. It might also take the
configuration arguments that are currently provided manually during
installation by editing setup.py . Alternatively, it might go the other
direction and include code from all supported VCS systems, reducing the
number of intermediate scripts.
## License
To make Versioneer easier to embed, all its code is dedicated to the public
domain. The `_version.py` that it creates is also in the public domain.
Specifically, both are released under the Creative Commons "Public Domain
Dedication" license (CC0-1.0), as described in
https://creativecommons.org/publicdomain/zero/1.0/ .
"""
from __future__ import print_function
import errno
import json
import os
import re
import subprocess
import sys
try:
import configparser
except ImportError:
import ConfigParser as configparser
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_root():
"""Get the project root directory.
We require that all commands are run from the project root, i.e. the
directory that contains setup.py, setup.cfg, and versioneer.py .
"""
root = os.path.realpath(os.path.abspath(os.getcwd()))
setup_py = os.path.join(root, "setup.py")
versioneer_py = os.path.join(root, "versioneer.py")
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
# allow 'python path/to/setup.py COMMAND'
root = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0])))
setup_py = os.path.join(root, "setup.py")
versioneer_py = os.path.join(root, "versioneer.py")
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
err = ("Versioneer was unable to run the project root directory. "
"Versioneer requires setup.py to be executed from "
"its immediate directory (like 'python setup.py COMMAND'), "
"or in a way that lets it use sys.argv[0] to find the root "
"(like 'python path/to/setup.py COMMAND').")
raise VersioneerBadRootError(err)
try:
# Certain runtime workflows (setup.py install/develop in a setuptools
# tree) execute all dependencies in a single python process, so
# "versioneer" may be imported multiple times, and python's shared
# module-import table will cache the first one. So we can't use
# os.path.dirname(__file__), as that will find whichever
# versioneer.py was first imported, even in later projects.
me = os.path.realpath(os.path.abspath(__file__))
me_dir = os.path.normcase(os.path.splitext(me)[0])
vsr_dir = os.path.normcase(os.path.splitext(versioneer_py)[0])
if me_dir != vsr_dir:
print("Warning: build in %s is using versioneer.py from %s"
% (os.path.dirname(me), versioneer_py))
except NameError:
pass
return root
def get_config_from_root(root):
"""Read the project setup.cfg file to determine Versioneer config."""
# This might raise EnvironmentError (if setup.cfg is missing), or
# configparser.NoSectionError (if it lacks a [versioneer] section), or
# configparser.NoOptionError (if it lacks "VCS="). See the docstring at
# the top of versioneer.py for instructions on writing your setup.cfg .
setup_cfg = os.path.join(root, "setup.cfg")
parser = configparser.SafeConfigParser()
with open(setup_cfg, "r") as f:
parser.readfp(f)
VCS = parser.get("versioneer", "VCS") # mandatory
def get(parser, name):
if parser.has_option("versioneer", name):
return parser.get("versioneer", name)
return None
cfg = VersioneerConfig()
cfg.VCS = VCS
cfg.style = get(parser, "style") or ""
cfg.versionfile_source = get(parser, "versionfile_source")
cfg.versionfile_build = get(parser, "versionfile_build")
cfg.tag_prefix = get(parser, "tag_prefix")
if cfg.tag_prefix in ("''", '""'):
cfg.tag_prefix = ""
cfg.parentdir_prefix = get(parser, "parentdir_prefix")
cfg.verbose = get(parser, "verbose")
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
# these dictionaries contain VCS-specific tools
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
LONG_VERSION_PY['git'] = '''
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.18 (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s"
git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s"
git_date = "%(DOLLAR)sFormat:%%ci%(DOLLAR)s"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "%(STYLE)s"
cfg.tag_prefix = "%(TAG_PREFIX)s"
cfg.parentdir_prefix = "%(PARENTDIR_PREFIX)s"
cfg.versionfile_source = "%(VERSIONFILE_SOURCE)s"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %%s" %% dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %%s" %% (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %%s (error)" %% dispcmd)
print("stdout was %%s" %% stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %%s but none started with prefix %%s" %%
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %%d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%%s', no digits" %% ",".join(refs - tags))
if verbose:
print("likely tags: %%s" %% ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %%s" %% r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %%s not under git control" %% root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%%s*" %% tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%%s'"
%% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%%s' doesn't start with prefix '%%s'"
print(fmt %% (full_tag, tag_prefix))
pieces["error"] = ("tag '%%s' doesn't start with prefix '%%s'"
%% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%%ci", "HEAD"],
cwd=root)[0].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%%d.g%%s" %% (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%%d.g%%s" %% (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%%d" %% pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%%d" %% pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%%s" %% pieces["short"]
else:
# exception #1
rendered = "0.post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%%s" %% pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%%s'" %% style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version", "date": None}
'''
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%s*" % tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"],
cwd=root)[0].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def do_vcs_install(manifest_in, versionfile_source, ipy):
"""Git-specific installation logic for Versioneer.
For Git, this means creating/changing .gitattributes to mark _version.py
for export-subst keyword substitution.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
files = [manifest_in, versionfile_source]
if ipy:
files.append(ipy)
try:
me = __file__
if me.endswith(".pyc") or me.endswith(".pyo"):
me = os.path.splitext(me)[0] + ".py"
versioneer_file = os.path.relpath(me)
except NameError:
versioneer_file = "versioneer.py"
files.append(versioneer_file)
present = False
try:
f = open(".gitattributes", "r")
for line in f.readlines():
if line.strip().startswith(versionfile_source):
if "export-subst" in line.strip().split()[1:]:
present = True
f.close()
except EnvironmentError:
pass
if not present:
f = open(".gitattributes", "a+")
f.write("%s export-subst\n" % versionfile_source)
f.close()
files.append(".gitattributes")
run_command(GITS, ["add", "--"] + files)
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %s but none started with prefix %s" %
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
SHORT_VERSION_PY = """
# This file was generated by 'versioneer.py' (0.18) from
# revision-control system data, or from the parent directory name of an
# unpacked source archive. Distribution tarballs contain a pre-generated copy
# of this file.
import json
version_json = '''
%s
''' # END VERSION_JSON
def get_versions():
return json.loads(version_json)
"""
def versions_from_file(filename):
"""Try to determine the version from _version.py if present."""
try:
with open(filename) as f:
contents = f.read()
except EnvironmentError:
raise NotThisMethod("unable to read _version.py")
mo = re.search(r"version_json = '''\n(.*)''' # END VERSION_JSON",
contents, re.M | re.S)
if not mo:
mo = re.search(r"version_json = '''\r\n(.*)''' # END VERSION_JSON",
contents, re.M | re.S)
if not mo:
raise NotThisMethod("no version_json in _version.py")
return json.loads(mo.group(1))
def write_to_version_file(filename, versions):
"""Write the given version number to the given _version.py file."""
os.unlink(filename)
contents = json.dumps(versions, sort_keys=True,
indent=1, separators=(",", ": "))
with open(filename, "w") as f:
f.write(SHORT_VERSION_PY % contents)
print("set %s to '%s'" % (filename, versions["version"]))
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
class VersioneerBadRootError(Exception):
"""The project root directory is unknown or missing key files."""
def get_versions(verbose=False):
"""Get the project version from whatever source is available.
Returns dict with two keys: 'version' and 'full'.
"""
if "versioneer" in sys.modules:
# see the discussion in cmdclass.py:get_cmdclass()
del sys.modules["versioneer"]
root = get_root()
cfg = get_config_from_root(root)
assert cfg.VCS is not None, "please set [versioneer]VCS= in setup.cfg"
handlers = HANDLERS.get(cfg.VCS)
assert handlers, "unrecognized VCS '%s'" % cfg.VCS
verbose = verbose or cfg.verbose
assert cfg.versionfile_source is not None, \
"please set versioneer.versionfile_source"
assert cfg.tag_prefix is not None, "please set versioneer.tag_prefix"
versionfile_abs = os.path.join(root, cfg.versionfile_source)
# extract version from first of: _version.py, VCS command (e.g. 'git
# describe'), parentdir. This is meant to work for developers using a
# source checkout, for users of a tarball created by 'setup.py sdist',
# and for users of a tarball/zipball created by 'git archive' or github's
# download-from-tag feature or the equivalent in other VCSes.
get_keywords_f = handlers.get("get_keywords")
from_keywords_f = handlers.get("keywords")
if get_keywords_f and from_keywords_f:
try:
keywords = get_keywords_f(versionfile_abs)
ver = from_keywords_f(keywords, cfg.tag_prefix, verbose)
if verbose:
print("got version from expanded keyword %s" % ver)
return ver
except NotThisMethod:
pass
try:
ver = versions_from_file(versionfile_abs)
if verbose:
print("got version from file %s %s" % (versionfile_abs, ver))
return ver
except NotThisMethod:
pass
from_vcs_f = handlers.get("pieces_from_vcs")
if from_vcs_f:
try:
pieces = from_vcs_f(cfg.tag_prefix, root, verbose)
ver = render(pieces, cfg.style)
if verbose:
print("got version from VCS %s" % ver)
return ver
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
ver = versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
if verbose:
print("got version from parentdir %s" % ver)
return ver
except NotThisMethod:
pass
if verbose:
print("unable to compute version")
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None, "error": "unable to compute version",
"date": None}
def get_version():
"""Get the short version string for this project."""
return get_versions()["version"]
def get_cmdclass():
"""Get the custom setuptools/distutils subclasses used by Versioneer."""
if "versioneer" in sys.modules:
del sys.modules["versioneer"]
# this fixes the "python setup.py develop" case (also 'install' and
# 'easy_install .'), in which subdependencies of the main project are
# built (using setup.py bdist_egg) in the same python process. Assume
# a main project A and a dependency B, which use different versions
# of Versioneer. A's setup.py imports A's Versioneer, leaving it in
# sys.modules by the time B's setup.py is executed, causing B to run
# with the wrong versioneer. Setuptools wraps the sub-dep builds in a
# sandbox that restores sys.modules to it's pre-build state, so the
# parent is protected against the child's "import versioneer". By
# removing ourselves from sys.modules here, before the child build
# happens, we protect the child from the parent's versioneer too.
# Also see https://github.com/warner/python-versioneer/issues/52
cmds = {}
# we add "version" to both distutils and setuptools
from distutils.core import Command
class cmd_version(Command):
description = "report generated version string"
user_options = []
boolean_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
vers = get_versions(verbose=True)
print("Version: %s" % vers["version"])
print(" full-revisionid: %s" % vers.get("full-revisionid"))
print(" dirty: %s" % vers.get("dirty"))
print(" date: %s" % vers.get("date"))
if vers["error"]:
print(" error: %s" % vers["error"])
cmds["version"] = cmd_version
# we override "build_py" in both distutils and setuptools
#
# most invocation pathways end up running build_py:
# distutils/build -> build_py
# distutils/install -> distutils/build ->..
# setuptools/bdist_wheel -> distutils/install ->..
# setuptools/bdist_egg -> distutils/install_lib -> build_py
# setuptools/install -> bdist_egg ->..
# setuptools/develop -> ?
# pip install:
# copies source tree to a tempdir before running egg_info/etc
# if .git isn't copied too, 'git describe' will fail
# then does setup.py bdist_wheel, or sometimes setup.py install
# setup.py egg_info -> ?
# we override different "build_py" commands for both environments
if "setuptools" in sys.modules:
from setuptools.command.build_py import build_py as _build_py
else:
from distutils.command.build_py import build_py as _build_py
class cmd_build_py(_build_py):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
_build_py.run(self)
# now locate _version.py in the new build/ directory and replace
# it with an updated value
if cfg.versionfile_build:
target_versionfile = os.path.join(self.build_lib,
cfg.versionfile_build)
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
cmds["build_py"] = cmd_build_py
if "cx_Freeze" in sys.modules: # cx_freeze enabled?
from cx_Freeze.dist import build_exe as _build_exe
# nczeczulin reports that py2exe won't like the pep440-style string
# as FILEVERSION, but it can be used for PRODUCTVERSION, e.g.
# setup(console=[{
# "version": versioneer.get_version().split("+", 1)[0], # FILEVERSION
# "product_version": versioneer.get_version(),
# ...
class cmd_build_exe(_build_exe):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
target_versionfile = cfg.versionfile_source
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
_build_exe.run(self)
os.unlink(target_versionfile)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(LONG %
{"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
})
cmds["build_exe"] = cmd_build_exe
del cmds["build_py"]
if 'py2exe' in sys.modules: # py2exe enabled?
try:
from py2exe.distutils_buildexe import py2exe as _py2exe # py3
except ImportError:
from py2exe.build_exe import py2exe as _py2exe # py2
class cmd_py2exe(_py2exe):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
target_versionfile = cfg.versionfile_source
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
_py2exe.run(self)
os.unlink(target_versionfile)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(LONG %
{"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
})
cmds["py2exe"] = cmd_py2exe
# we override different "sdist" commands for both environments
if "setuptools" in sys.modules:
from setuptools.command.sdist import sdist as _sdist
else:
from distutils.command.sdist import sdist as _sdist
class cmd_sdist(_sdist):
def run(self):
versions = get_versions()
self._versioneer_generated_versions = versions
# unless we update this, the command will keep using the old
# version
self.distribution.metadata.version = versions["version"]
return _sdist.run(self)
def make_release_tree(self, base_dir, files):
root = get_root()
cfg = get_config_from_root(root)
_sdist.make_release_tree(self, base_dir, files)
# now locate _version.py in the new base_dir directory
# (remembering that it may be a hardlink) and replace it with an
# updated value
target_versionfile = os.path.join(base_dir, cfg.versionfile_source)
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile,
self._versioneer_generated_versions)
cmds["sdist"] = cmd_sdist
return cmds
CONFIG_ERROR = """
setup.cfg is missing the necessary Versioneer configuration. You need
a section like:
[versioneer]
VCS = git
style = pep440
versionfile_source = src/myproject/_version.py
versionfile_build = myproject/_version.py
tag_prefix =
parentdir_prefix = myproject-
You will also need to edit your setup.py to use the results:
import versioneer
setup(version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(), ...)
Please read the docstring in ./versioneer.py for configuration instructions,
edit setup.cfg, and re-run the installer or 'python versioneer.py setup'.
"""
SAMPLE_CONFIG = """
# See the docstring in versioneer.py for instructions. Note that you must
# re-run 'versioneer.py setup' after changing this section, and commit the
# resulting files.
[versioneer]
#VCS = git
#style = pep440
#versionfile_source =
#versionfile_build =
#tag_prefix =
#parentdir_prefix =
"""
INIT_PY_SNIPPET = """
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
"""
def do_setup():
"""Main VCS-independent setup function for installing Versioneer."""
root = get_root()
try:
cfg = get_config_from_root(root)
except (EnvironmentError, configparser.NoSectionError,
configparser.NoOptionError) as e:
if isinstance(e, (EnvironmentError, configparser.NoSectionError)):
print("Adding sample versioneer config to setup.cfg",
file=sys.stderr)
with open(os.path.join(root, "setup.cfg"), "a") as f:
f.write(SAMPLE_CONFIG)
print(CONFIG_ERROR, file=sys.stderr)
return 1
print(" creating %s" % cfg.versionfile_source)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(LONG % {"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
})
ipy = os.path.join(os.path.dirname(cfg.versionfile_source),
"__init__.py")
if os.path.exists(ipy):
try:
with open(ipy, "r") as f:
old = f.read()
except EnvironmentError:
old = ""
if INIT_PY_SNIPPET not in old:
print(" appending to %s" % ipy)
with open(ipy, "a") as f:
f.write(INIT_PY_SNIPPET)
else:
print(" %s unmodified" % ipy)
else:
print(" %s doesn't exist, ok" % ipy)
ipy = None
# Make sure both the top-level "versioneer.py" and versionfile_source
# (PKG/_version.py, used by runtime code) are in MANIFEST.in, so
# they'll be copied into source distributions. Pip won't be able to
# install the package without this.
manifest_in = os.path.join(root, "MANIFEST.in")
simple_includes = set()
try:
with open(manifest_in, "r") as f:
for line in f:
if line.startswith("include "):
for include in line.split()[1:]:
simple_includes.add(include)
except EnvironmentError:
pass
# That doesn't cover everything MANIFEST.in can do
# (http://docs.python.org/2/distutils/sourcedist.html#commands), so
# it might give some false negatives. Appending redundant 'include'
# lines is safe, though.
if "versioneer.py" not in simple_includes:
print(" appending 'versioneer.py' to MANIFEST.in")
with open(manifest_in, "a") as f:
f.write("include versioneer.py\n")
else:
print(" 'versioneer.py' already in MANIFEST.in")
if cfg.versionfile_source not in simple_includes:
print(" appending versionfile_source ('%s') to MANIFEST.in" %
cfg.versionfile_source)
with open(manifest_in, "a") as f:
f.write("include %s\n" % cfg.versionfile_source)
else:
print(" versionfile_source already in MANIFEST.in")
# Make VCS-specific changes. For git, this means creating/changing
# .gitattributes to mark _version.py for export-subst keyword
# substitution.
do_vcs_install(manifest_in, cfg.versionfile_source, ipy)
return 0
def scan_setup_py():
"""Validate the contents of setup.py against Versioneer's expectations."""
found = set()
setters = False
errors = 0
with open("setup.py", "r") as f:
for line in f.readlines():
if "import versioneer" in line:
found.add("import")
if "versioneer.get_cmdclass()" in line:
found.add("cmdclass")
if "versioneer.get_version()" in line:
found.add("get_version")
if "versioneer.VCS" in line:
setters = True
if "versioneer.versionfile_source" in line:
setters = True
if len(found) != 3:
print("")
print("Your setup.py appears to be missing some important items")
print("(but I might be wrong). Please make sure it has something")
print("roughly like the following:")
print("")
print(" import versioneer")
print(" setup( version=versioneer.get_version(),")
print(" cmdclass=versioneer.get_cmdclass(), ...)")
print("")
errors += 1
if setters:
print("You should remove lines like 'versioneer.VCS = ' and")
print("'versioneer.versionfile_source = ' . This configuration")
print("now lives in setup.cfg, and should be removed from setup.py")
print("")
errors += 1
return errors
if __name__ == "__main__":
cmd = sys.argv[1]
if cmd == "setup":
errors = do_setup()
errors += scan_setup_py()
if errors:
sys.exit(1)
| 37.576123
| 79
| 0.624144
|
f93963fb7e52f8e28b71628a85a146f113be54eb
| 384
|
py
|
Python
|
plotly/validators/heatmapgl/_z.py
|
faezs/plotly.py
|
6009b5b9c746e5d2a2849ad255a4eb234b551ed7
|
[
"MIT"
] | 2
|
2020-03-24T11:41:14.000Z
|
2021-01-14T07:59:43.000Z
|
plotly/validators/heatmapgl/_z.py
|
faezs/plotly.py
|
6009b5b9c746e5d2a2849ad255a4eb234b551ed7
|
[
"MIT"
] | null | null | null |
plotly/validators/heatmapgl/_z.py
|
faezs/plotly.py
|
6009b5b9c746e5d2a2849ad255a4eb234b551ed7
|
[
"MIT"
] | 4
|
2019-06-03T14:49:12.000Z
|
2022-01-06T01:05:12.000Z
|
import _plotly_utils.basevalidators
class ZValidator(_plotly_utils.basevalidators.DataArrayValidator):
def __init__(self, plotly_name='z', parent_name='heatmapgl', **kwargs):
super(ZValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type='calc',
role='data',
**kwargs
)
| 27.428571
| 75
| 0.635417
|
c373398fb511d61f08caf8d847231639a81b635a
| 1,028
|
py
|
Python
|
inputs/fields/field.py
|
Receiling/ENPAR
|
decd2945d21a7be5a0f73c37cfc5e252301aab15
|
[
"MIT"
] | 5
|
2021-06-09T13:38:22.000Z
|
2022-01-29T05:26:51.000Z
|
inputs/fields/field.py
|
Receiling/ENPAR
|
decd2945d21a7be5a0f73c37cfc5e252301aab15
|
[
"MIT"
] | null | null | null |
inputs/fields/field.py
|
Receiling/ENPAR
|
decd2945d21a7be5a0f73c37cfc5e252301aab15
|
[
"MIT"
] | 1
|
2021-10-31T05:09:16.000Z
|
2021-10-31T05:09:16.000Z
|
from abc import ABC, abstractclassmethod
class Field(ABC):
"""Abstract class `Field` define one indexing method,
generate counter from raw text data and index token in raw text data
Arguments:
ABC {ABC} -- abstract base class
"""
@abstractclassmethod
def count_vocab_items(self, counter, sentences):
"""This function constructs counter using each sentence content,
prepare for vocabulary
Arguments:
counter {dict} -- element count dict
sentences {list} -- text data
"""
raise NotImplementedError
@abstractclassmethod
def index(self, instance, vocab, sentences):
"""This function constrcuts instance using sentences and vocabulary,
each namespace is a mapping method using different type data
Arguments:
instance {dict} -- collections of various fields
vocab {dict} -- vocabulary
sentences {list} -- text data
"""
raise NotImplementedError
| 28.555556
| 76
| 0.643969
|
b76f472b4edd5862bf5c0e8571bc3288585dd768
| 2,507
|
py
|
Python
|
test/functional/feature_help.py
|
raymaker/deftchain-0.17
|
fa0444240eb926b48c475acf50da73322855d7d3
|
[
"MIT"
] | null | null | null |
test/functional/feature_help.py
|
raymaker/deftchain-0.17
|
fa0444240eb926b48c475acf50da73322855d7d3
|
[
"MIT"
] | null | null | null |
test/functional/feature_help.py
|
raymaker/deftchain-0.17
|
fa0444240eb926b48c475acf50da73322855d7d3
|
[
"MIT"
] | 2
|
2019-01-16T18:48:44.000Z
|
2019-01-16T19:01:13.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Verify that starting deftchain with -h works as expected."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal
class HelpTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def setup_network(self):
self.add_nodes(self.num_nodes)
# Don't start the node
def get_node_output(self, *, ret_code_expected):
ret_code = self.nodes[0].process.wait(timeout=5)
assert_equal(ret_code, ret_code_expected)
self.nodes[0].stdout.seek(0)
self.nodes[0].stderr.seek(0)
out = self.nodes[0].stdout.read()
err = self.nodes[0].stderr.read()
self.nodes[0].stdout.close()
self.nodes[0].stderr.close()
# Clean up TestNode state
self.nodes[0].running = False
self.nodes[0].process = None
self.nodes[0].rpc_connected = False
self.nodes[0].rpc = None
return out, err
def run_test(self):
self.log.info("Start deftchain with -h for help text")
self.nodes[0].start(extra_args=['-h'])
# Node should exit immediately and output help to stdout.
output, _ = self.get_node_output(ret_code_expected=0)
assert b'Options' in output
self.log.info("Help text received: {} (...)".format(output[0:60]))
self.log.info("Start deftchain with -version for version information")
self.nodes[0].start(extra_args=['-version'])
# Node should exit immediately and output version to stdout.
output, _ = self.get_node_output(ret_code_expected=0)
assert b'version' in output
self.log.info("Version text received: {} (...)".format(output[0:60]))
# Test that arguments not in the help results in an error
self.log.info("Start deftchaind with -fakearg to make sure it does not start")
self.nodes[0].start(extra_args=['-fakearg'])
# Node should exit immediately and output an error to stderr
_, output = self.get_node_output(ret_code_expected=1)
assert b'Error parsing command line arguments' in output
self.log.info("Error message received: {} (...)".format(output[0:60]))
if __name__ == '__main__':
HelpTest().main()
| 39.793651
| 86
| 0.664539
|
c62c69e06685c3a927c36b7b66d969bf2e686ad8
| 1,263
|
py
|
Python
|
PyOpenGL-3.0.2/OpenGL/raw/GL/INTEL/parallel_arrays.py
|
frederica07/Dragon_Programming_Process
|
c0dff2e20c1be6db5adc6f9977efae8f7f888ef5
|
[
"BSD-2-Clause"
] | null | null | null |
PyOpenGL-3.0.2/OpenGL/raw/GL/INTEL/parallel_arrays.py
|
frederica07/Dragon_Programming_Process
|
c0dff2e20c1be6db5adc6f9977efae8f7f888ef5
|
[
"BSD-2-Clause"
] | null | null | null |
PyOpenGL-3.0.2/OpenGL/raw/GL/INTEL/parallel_arrays.py
|
frederica07/Dragon_Programming_Process
|
c0dff2e20c1be6db5adc6f9977efae8f7f888ef5
|
[
"BSD-2-Clause"
] | null | null | null |
'''Autogenerated by get_gl_extensions script, do not edit!'''
from OpenGL import platform as _p, constants as _cs, arrays
from OpenGL.GL import glget
import ctypes
EXTENSION_NAME = 'GL_INTEL_parallel_arrays'
def _f( function ):
return _p.createFunction( function,_p.GL,'GL_INTEL_parallel_arrays',False)
_p.unpack_constants( """GL_PARALLEL_ARRAYS_INTEL 0x83F4
GL_VERTEX_ARRAY_PARALLEL_POINTERS_INTEL 0x83F5
GL_NORMAL_ARRAY_PARALLEL_POINTERS_INTEL 0x83F6
GL_COLOR_ARRAY_PARALLEL_POINTERS_INTEL 0x83F7
GL_TEXTURE_COORD_ARRAY_PARALLEL_POINTERS_INTEL 0x83F8""", globals())
glget.addGLGetConstant( GL_PARALLEL_ARRAYS_INTEL, (1,) )
@_f
@_p.types(None,_cs.GLint,_cs.GLenum,arrays.GLvoidpArray)
def glVertexPointervINTEL( size,type,pointer ):pass
@_f
@_p.types(None,_cs.GLenum,arrays.GLvoidpArray)
def glNormalPointervINTEL( type,pointer ):pass
@_f
@_p.types(None,_cs.GLint,_cs.GLenum,arrays.GLvoidpArray)
def glColorPointervINTEL( size,type,pointer ):pass
@_f
@_p.types(None,_cs.GLint,_cs.GLenum,arrays.GLvoidpArray)
def glTexCoordPointervINTEL( size,type,pointer ):pass
def glInitParallelArraysINTEL():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( EXTENSION_NAME )
| 39.46875
| 78
| 0.820269
|
0bac9f2479ded23e1264b40704e9cef98be07ec3
| 2,032
|
py
|
Python
|
tests/client/test_config.py
|
cfogg/python-client
|
40e6891c8240e6b2acd5df538e622e9f15de43d6
|
[
"Apache-2.0"
] | null | null | null |
tests/client/test_config.py
|
cfogg/python-client
|
40e6891c8240e6b2acd5df538e622e9f15de43d6
|
[
"Apache-2.0"
] | null | null | null |
tests/client/test_config.py
|
cfogg/python-client
|
40e6891c8240e6b2acd5df538e622e9f15de43d6
|
[
"Apache-2.0"
] | null | null | null |
"""Configuration unit tests."""
# pylint: disable=protected-access,no-self-use,line-too-long
from splitio.client import config
from splitio.engine.impressions import ImpressionsMode
class ConfigSanitizationTests(object):
"""Inmemory storage-based integration tests."""
def test_parse_operation_mode(self):
"""Make sure operation mode is correctly captured."""
assert config._parse_operation_mode('some', {}) == 'inmemory-standalone'
assert config._parse_operation_mode('localhost', {}) == 'localhost-standalone'
assert config._parse_operation_mode('some', {'redisHost': 'x'}) == 'redis-consumer'
assert config._parse_operation_mode('some', {'uwsgiClient': True}) == 'uwsgi-consumer'
def test_sanitize_imp_mode(self):
"""Test sanitization of impressions mode."""
mode, rate = config._sanitize_impressions_mode('OPTIMIZED', 1)
assert mode == ImpressionsMode.OPTIMIZED
assert rate == 60
mode, rate = config._sanitize_impressions_mode('DEBUG', 1)
assert mode == ImpressionsMode.DEBUG
assert rate == 1
mode, rate = config._sanitize_impressions_mode('debug', 1)
assert mode == ImpressionsMode.DEBUG
assert rate == 1
mode, rate = config._sanitize_impressions_mode('ANYTHING', 200)
assert mode == ImpressionsMode.OPTIMIZED
assert rate == 200
mode, rate = config._sanitize_impressions_mode(43, -1)
assert mode == ImpressionsMode.OPTIMIZED
assert rate == 60
mode, rate = config._sanitize_impressions_mode('OPTIMIZED')
assert mode == ImpressionsMode.OPTIMIZED
assert rate == 300
mode, rate = config._sanitize_impressions_mode('DEBUG')
assert mode == ImpressionsMode.DEBUG
assert rate == 60
def test_sanitize(self):
"""Test sanitization."""
configs = {}
processed = config.sanitize('some', configs)
assert processed['redisLocalCacheEnabled'] # check default is True
| 37.62963
| 94
| 0.671752
|
f95e1333ac543570c7afc2700e11021923f0fd0f
| 579
|
py
|
Python
|
torchtools/test.py
|
Takezo87/torchtools
|
4230305d9063dabee3614f0dcd8557739b90f817
|
[
"Apache-2.0"
] | null | null | null |
torchtools/test.py
|
Takezo87/torchtools
|
4230305d9063dabee3614f0dcd8557739b90f817
|
[
"Apache-2.0"
] | 1
|
2022-02-26T06:23:52.000Z
|
2022-02-26T06:23:52.000Z
|
torchtools/test.py
|
Takezo87/torchtools
|
4230305d9063dabee3614f0dcd8557739b90f817
|
[
"Apache-2.0"
] | null | null | null |
# AUTOGENERATED! DO NOT EDIT! File to edit: 100_test.ipynb (unless otherwise specified).
__all__ = ['items_to_arrays']
# Cell
from .core import *
from .data import *
from .models import *
from .datasets import *
from .augmentations import *
from .datablock import *
from .dataloader import *
# Cell
def items_to_arrays(items):
'''convert list of item tuples into X,y numpy arrays (for use with numpy dataloader)'''
# return np.stack([x[0] for x in items]), np.stack([x[1] for x in items])
return tuple(np.stack([x[i] for x in items]) for i in range(len(items[0])))
| 32.166667
| 91
| 0.704663
|
2b8e46981704623c4bb8affc4b14df1d789e6c25
| 1,835
|
py
|
Python
|
testpiechart1.py
|
vigyywaran/foo
|
954c03c9c38e7f0198422a1b46d348a453585949
|
[
"MIT"
] | null | null | null |
testpiechart1.py
|
vigyywaran/foo
|
954c03c9c38e7f0198422a1b46d348a453585949
|
[
"MIT"
] | 1
|
2022-03-13T05:50:41.000Z
|
2022-03-13T05:50:41.000Z
|
testpiechart1.py
|
AshithaDN/Measuring-Calories-of-Food-using-Images
|
abcead7b19d5eb7540c4d125fba4d1404a26510d
|
[
"MIT"
] | null | null | null |
#! /usr/bin/python3
# This python script creates a piechart of the various categories displayed as a result of labelling the input image. The piechart is stored as a png file for later use.
import matplotlib.pyplot as plt
import collections
import csv
def name1():
percentage=[]
item=[]
#labels=[]
calorie={}
explodearray=[0.0,0.0,0.0,0.0,0.0,0.0]
i=0
total=0
# The following code gets the food labels and their respective accuracy scores and stores them in item[] list and percentage[] list respectively.
f=open('test.txt','r')
while True:
st=f.readline()
l=len(st)
if l!=0:
n=st.rfind(' ')
num=st[n:-1].strip()
percentage.append(float(num))
total=total+float(num)
str2=st[:n].strip()
item.append(str2)
i+=1
else:
break
# The following code gets the calorie values of the food labels in item[] and stores it in calorie{} dictionary.
i=0
while True:
with open('prefinal.csv') as csvfile:
read=csv.reader(csvfile)
for row in read:
str2=str(row[0]).strip()
if str2 == item[i]:
calorie.update({item[i]:(row[1])})
i+=1
if i>4:
break
if i>4:
break
print(calorie)
# Others is used to pad the piechart to get a total of 100% in case the various label accuracies dont add up to 100
item.append('others')
percentage.append(1-total)
print(percentage,item)
# Shows and Saves piechart created
fig=plt.Figure()
fig.set_canvas(plt.gcf().canvas)
p1=plt.pie(percentage,explode=explodearray,labels=item,autopct='%1.1f%%',rotatelabels=True)
plt.legend(p1[0],list(calorie.values()),loc='lower left')
#plt.legend(p1[0],labels,loc='lower left')
plt.title('CALORIES OF FOOD LABELS')
fig.savefig("static/piechart" + ".png",format='png')
#plt.show()
| 31.101695
| 169
| 0.650136
|
451db55473765539997b3ba1989f626841c7ccce
| 623
|
py
|
Python
|
handlers/util.py
|
Hironsan/TatsujinDaifugo
|
8dfacf2433650eb4bdb0b498168c91faab3fa92e
|
[
"MIT"
] | 6
|
2016-08-11T14:34:21.000Z
|
2021-11-17T20:19:26.000Z
|
handlers/util.py
|
Hironsan/TatsujinDaifugo
|
8dfacf2433650eb4bdb0b498168c91faab3fa92e
|
[
"MIT"
] | null | null | null |
handlers/util.py
|
Hironsan/TatsujinDaifugo
|
8dfacf2433650eb4bdb0b498168c91faab3fa92e
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from models.user import User
def check_group_permission(f):
"""
グループに所属していない場合は、リダイレクトする。
"""
def wrapper(*args):
user = args[0].get_current_user()
if not user:
args[0].redirect(args[0].reverse_url('login'))
return
user = User.get(user['id'])
if user.belongs_to_group(int(args[1])):
pass
else:
error_message = 'この操作は許可されていません。'
args[0].redirect(args[0].reverse_url('index') + '?error_message={0}'.format(error_message))
return
return f(*args)
return wrapper
| 28.318182
| 103
| 0.561798
|
91d7cbdef5402bfa0734ced83e46cdb9e189103f
| 269
|
py
|
Python
|
Dataset/Leetcode/test/35/644.py
|
kkcookies99/UAST
|
fff81885aa07901786141a71e5600a08d7cb4868
|
[
"MIT"
] | null | null | null |
Dataset/Leetcode/test/35/644.py
|
kkcookies99/UAST
|
fff81885aa07901786141a71e5600a08d7cb4868
|
[
"MIT"
] | null | null | null |
Dataset/Leetcode/test/35/644.py
|
kkcookies99/UAST
|
fff81885aa07901786141a71e5600a08d7cb4868
|
[
"MIT"
] | null | null | null |
class Solution:
def XXX(self, nums: List[int], target: int) -> int:
if target in nums:
return nums.index(target)
for index, value in enumerate(nums):
if value > target:
return index
return len(nums)
| 26.9
| 55
| 0.542751
|
1168288a8b8d5f1d7bff316683f62b394cc50856
| 1,595
|
py
|
Python
|
nipype/interfaces/semtools/brains/tests/test_auto_BRAINSTalairach.py
|
mfalkiewicz/nipype
|
775e21b78fb1ffa2ff9cb12e6f052868bd44d052
|
[
"Apache-2.0"
] | 1
|
2015-01-19T13:12:27.000Z
|
2015-01-19T13:12:27.000Z
|
nipype/interfaces/semtools/brains/tests/test_auto_BRAINSTalairach.py
|
bpinsard/nipype
|
373bdddba9f675ef153951afa368729e2d8950d2
|
[
"Apache-2.0"
] | null | null | null |
nipype/interfaces/semtools/brains/tests/test_auto_BRAINSTalairach.py
|
bpinsard/nipype
|
373bdddba9f675ef153951afa368729e2d8950d2
|
[
"Apache-2.0"
] | null | null | null |
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from __future__ import unicode_literals
from ..segmentation import BRAINSTalairach
def test_BRAINSTalairach_inputs():
input_map = dict(AC=dict(argstr='--AC %s',
sep=',',
),
ACisIndex=dict(argstr='--ACisIndex ',
),
IRP=dict(argstr='--IRP %s',
sep=',',
),
IRPisIndex=dict(argstr='--IRPisIndex ',
),
PC=dict(argstr='--PC %s',
sep=',',
),
PCisIndex=dict(argstr='--PCisIndex ',
),
SLA=dict(argstr='--SLA %s',
sep=',',
),
SLAisIndex=dict(argstr='--SLAisIndex ',
),
args=dict(argstr='%s',
),
environ=dict(nohash=True,
usedefault=True,
),
ignore_exception=dict(deprecated='1.0.0',
nohash=True,
usedefault=True,
),
inputVolume=dict(argstr='--inputVolume %s',
),
outputBox=dict(argstr='--outputBox %s',
hash_files=False,
),
outputGrid=dict(argstr='--outputGrid %s',
hash_files=False,
),
terminal_output=dict(deprecated='1.0.0',
nohash=True,
),
)
inputs = BRAINSTalairach.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value
def test_BRAINSTalairach_outputs():
output_map = dict(outputBox=dict(),
outputGrid=dict(),
)
outputs = BRAINSTalairach.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(outputs.traits()[key], metakey) == value
| 24.921875
| 67
| 0.611285
|
1afd784901b2c94dea1fe82dcff9a52faf2d182f
| 1,175
|
py
|
Python
|
Creating & Running Test Suiter | Batch Testing/TestSuites/All_TestSuites.py
|
scottwedge/Unit-Test-Framework
|
787bef0c2dabd39625ac5b47d9e6681105ccf104
|
[
"Apache-2.0"
] | null | null | null |
Creating & Running Test Suiter | Batch Testing/TestSuites/All_TestSuites.py
|
scottwedge/Unit-Test-Framework
|
787bef0c2dabd39625ac5b47d9e6681105ccf104
|
[
"Apache-2.0"
] | null | null | null |
Creating & Running Test Suiter | Batch Testing/TestSuites/All_TestSuites.py
|
scottwedge/Unit-Test-Framework
|
787bef0c2dabd39625ac5b47d9e6681105ccf104
|
[
"Apache-2.0"
] | null | null | null |
import unittest
from Package1.TC_LoginTest import LoginTest
from Package1.TC_SignupTest import SignupTest
from Package2.TC_PaymentTest import PaymentTest
from Package2.TC_PaymentReturnsTest import PaymentReturnsTest
# Get all the tests from LoginTest, SignUpTest, PaymentTest and PaymentReturnsTest
tc1 = unittest.TestLoader().loadTestsFromTestCase(LoginTest)
tc2 = unittest.TestLoader().loadTestsFromTestCase(SignupTest)
tc3 = unittest.TestLoader().loadTestsFromTestCase(PaymentTest)
tc4 = unittest.TestLoader().loadTestsFromTestCase(PaymentReturnsTest)
# Creating Test Suites
'''Login and SignUp Tests will come into Sanity Test Suite'''
sanityTestSuite = unittest.TestSuite([tc1, tc2]) # Sanity Test Suite
# unittest.TextTestRunner().run(sanityTestSuite)
'''Payment and PaymentReturns Test will come into Functional Test Suite'''
functionalTestSuite = unittest.TestSuite([tc3, tc4]) # Functional Test Suite
# unittest.TextTestRunner().run(functionalTestSuite)
'''Rest all the test cases will fall under Master Test Suite'''
masterTestSuite = unittest.TestSuite([tc1, tc2, tc3, tc4]) # Master Test Suite
unittest.TextTestRunner(verbosity=2).run(masterTestSuite)
| 47
| 82
| 0.813617
|
78312eccc35854cf68c72468520748323f5e69b5
| 358
|
py
|
Python
|
homeassistant/components/remootio/exceptions.py
|
sam43434/core
|
60d41e763daf83cdfef662a99a4daee690f3fb90
|
[
"Apache-2.0"
] | null | null | null |
homeassistant/components/remootio/exceptions.py
|
sam43434/core
|
60d41e763daf83cdfef662a99a4daee690f3fb90
|
[
"Apache-2.0"
] | null | null | null |
homeassistant/components/remootio/exceptions.py
|
sam43434/core
|
60d41e763daf83cdfef662a99a4daee690f3fb90
|
[
"Apache-2.0"
] | null | null | null |
"""Exceptions for the Remootio integration."""
from homeassistant.exceptions import HomeAssistantError
class UnsupportedRemootioDeviceError(HomeAssistantError):
"""Error to indicate unsupported Remootio device."""
class UnsupportedRemootioApiVersionError(UnsupportedRemootioDeviceError):
"""Error to indicate unsupported Remootio API version."""
| 32.545455
| 73
| 0.815642
|
10bd8926ceb0fcc4d1162d7eb8a1f67131d4083d
| 300
|
py
|
Python
|
frappe/desk/doctype/onboarding_step/onboarding_step.py
|
monroy95/frappe
|
a9411e845634174a7f8c8b11aa0f3dbdbaa4ac7c
|
[
"MIT"
] | 1
|
2021-12-18T18:37:29.000Z
|
2021-12-18T18:37:29.000Z
|
frappe/desk/doctype/onboarding_step/onboarding_step.py
|
monroy95/frappe
|
a9411e845634174a7f8c8b11aa0f3dbdbaa4ac7c
|
[
"MIT"
] | 3
|
2021-02-27T11:50:14.000Z
|
2021-05-03T06:48:49.000Z
|
frappe/desk/doctype/onboarding_step/onboarding_step.py
|
monroy95/frappe
|
a9411e845634174a7f8c8b11aa0f3dbdbaa4ac7c
|
[
"MIT"
] | 2
|
2021-09-02T09:51:55.000Z
|
2021-09-07T04:55:42.000Z
|
# -*- coding: utf-8 -*-
# Copyright (c) 2020, Frappe Technologies and contributors
# For license information, please see license.txt
# import frappe
from frappe.model.document import Document
class OnboardingStep(Document):
def before_export(self, doc):
doc.is_complete = 0
doc.is_skipped = 0
| 25
| 58
| 0.75
|
8a267cbddabe126eabedfeee341fe272c6d172a0
| 1,795
|
py
|
Python
|
python/637.average-of-levels-in-binary-tree.py
|
Zhenye-Na/leetcode
|
95196a45f5709ccf7b970ee5ac84a4bf8fe2301e
|
[
"MIT"
] | 10
|
2019-09-15T00:23:57.000Z
|
2022-01-05T12:53:42.000Z
|
python/637.average-of-levels-in-binary-tree.py
|
Zhenye-Na/leetcode
|
95196a45f5709ccf7b970ee5ac84a4bf8fe2301e
|
[
"MIT"
] | 3
|
2021-06-30T00:39:26.000Z
|
2021-08-01T07:13:59.000Z
|
python/637.average-of-levels-in-binary-tree.py
|
Zhenye-Na/leetcode
|
95196a45f5709ccf7b970ee5ac84a4bf8fe2301e
|
[
"MIT"
] | 6
|
2020-02-08T02:55:22.000Z
|
2022-01-02T22:48:18.000Z
|
#
# @lc app=leetcode id=637 lang=python3
#
# [637] Average of Levels in Binary Tree
#
# https://leetcode.com/problems/average-of-levels-in-binary-tree/description/
#
# algorithms
# Easy (64.59%)
# Likes: 1926
# Dislikes: 201
# Total Accepted: 187.7K
# Total Submissions: 284.1K
# Testcase Example: '[3,9,20,null,null,15,7]'
#
# Given the root of a binary tree, return the average value of the nodes on
# each level in the form of an array. Answers within 10^-5 of the actual answer
# will be accepted.
#
# Example 1:
#
#
# Input: root = [3,9,20,null,15,7]
# Output: [3.00000,14.50000,11.00000]
# Explanation: The average value of nodes on level 0 is 3, on level 1 is 14.5,
# and on level 2 is 11.
# Hence return [3, 14.5, 11].
#
#
# Example 2:
#
#
# Input: root = [3,9,20,15,7]
# Output: [3.00000,14.50000,11.00000]
#
#
#
# Constraints:
#
#
# The number of nodes in the tree is in the range [1, 10^4].
# -2^31 <= Node.val <= 2^31 - 1
#
#
#
# @lc code=start
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
from collections import deque
class Solution:
def averageOfLevels(self, root: TreeNode) -> List[float]:
if not root:
return [0]
queue = deque([root])
res = []
while queue:
size = len(queue)
level_total = 0
for _ in range(size):
node = queue.popleft()
level_total += node.val
if node.left:
queue.append(node.left)
if node.right:
queue.append(node.right)
res.append(level_total / size)
return res
# @lc code=end
| 22.4375
| 79
| 0.581058
|
7411d70f33e1f9db50459b8e0fc2a337de4b0af2
| 1,844
|
py
|
Python
|
revenue_tracker/models/people.py
|
mfcovington/django-revenue-tracker
|
336b51484b62fac256b086ac565e4f92f8c7223d
|
[
"BSD-3-Clause"
] | 1
|
2021-09-06T21:07:32.000Z
|
2021-09-06T21:07:32.000Z
|
revenue_tracker/models/people.py
|
mfcovington/django-revenue-tracker
|
336b51484b62fac256b086ac565e4f92f8c7223d
|
[
"BSD-3-Clause"
] | null | null | null |
revenue_tracker/models/people.py
|
mfcovington/django-revenue-tracker
|
336b51484b62fac256b086ac565e4f92f8c7223d
|
[
"BSD-3-Clause"
] | null | null | null |
from django.db import models
from customer_tracker.models import Customer as CustomerBase
class Customer(CustomerBase):
class Meta:
proxy = True
@property
def is_repeat_customer(self):
if self.tx_count > 1:
return True
else:
return False
@property
def reaction_count(self):
return sum(
sum(self.transactions.values_list('number_of_reactions'), ()))
@property
def total_revenue(self):
return sum(
sum(self.transactions.values_list('total_price'), ()))
@property
def tx_count(self):
return len(set(self.transactions.values_list('date')))
class Vendor(models.Model):
class Meta:
ordering = ['name']
name = models.CharField(
max_length=255,
unique=True,
)
contact = models.ForeignKey(
'customer_tracker.Contact',
on_delete=models.PROTECT,
)
country = models.ForeignKey(
'customer_tracker.Country',
on_delete=models.PROTECT,
)
customers = models.ManyToManyField(
'Customer',
through='Transaction',
)
website = models.URLField(
blank=True,
)
def __str__(self):
return self.name
@property
def contact_name(self):
return self.contact.name
@property
def is_repeat_customer(self):
if self.tx_count > 1:
return True
else:
return False
@property
def reaction_count(self):
return sum(
sum(self.transactions.values_list('number_of_reactions'), ()))
@property
def total_revenue(self):
return sum(
sum(self.transactions.values_list('total_price'), ()))
@property
def tx_count(self):
return len(set(self.transactions.values_list('date')))
| 21.694118
| 74
| 0.603037
|
04a4e67fd8312d80a0120d7592da0eea09da44a5
| 2,985
|
py
|
Python
|
openff/recharge/tests/cli/test_generate.py
|
openforcefield/openff-recharge
|
0ea3ef986e33c3ecf05924e64fb2e1872913b093
|
[
"MIT"
] | 7
|
2020-07-20T02:56:48.000Z
|
2022-03-22T18:09:00.000Z
|
openff/recharge/tests/cli/test_generate.py
|
openforcefield/openff-recharge
|
0ea3ef986e33c3ecf05924e64fb2e1872913b093
|
[
"MIT"
] | 94
|
2020-07-07T23:59:40.000Z
|
2022-03-31T09:03:22.000Z
|
openff/recharge/tests/cli/test_generate.py
|
openforcefield/openff-recharge
|
0ea3ef986e33c3ecf05924e64fb2e1872913b093
|
[
"MIT"
] | null | null | null |
import json
import logging
import os
from multiprocessing.pool import Pool
import numpy
import pytest
from openff.recharge.charges.exceptions import OEQuacpacError
from openff.recharge.cli.generate import _compute_esp
from openff.recharge.cli.generate import generate as generate_cli
from openff.recharge.conformers import ConformerGenerator, ConformerSettings
from openff.recharge.conformers.exceptions import OEOmegaError
from openff.recharge.esp import ESPSettings
from openff.recharge.esp.exceptions import Psi4Error
from openff.recharge.esp.psi4 import Psi4ESPGenerator
from openff.recharge.esp.storage import MoleculeESPStore
from openff.recharge.grids import GridSettings
def test_generate(runner, monkeypatch):
# Mock the Psi4 calls so the test can run even when not present.
# This requires also mocking the multiprocessing to ensure the
# monkeypatch on Psi4 holds.
def mock_imap(_, func, iterable):
return [func(x) for x in iterable]
def mock_psi4_generate(*_):
return numpy.zeros((1, 3)), numpy.zeros((1, 1)), numpy.zeros((1, 3))
monkeypatch.setattr(Psi4ESPGenerator, "generate", mock_psi4_generate)
monkeypatch.setattr(Pool, "imap", mock_imap)
# Create a mock set of inputs.
with open("smiles.json", "w") as file:
json.dump(["C"], file)
with open("esp-settings.json", "w") as file:
file.write(ESPSettings(grid_settings=GridSettings(spacing=1.0)).json())
with open("conformer-settings.json", "w") as file:
file.write(ConformerSettings(method="omega", sampling_mode="sparse").json())
result = runner.invoke(generate_cli)
if result.exit_code != 0:
raise result.exception
assert os.path.isfile("esp-store.sqlite")
esp_store = MoleculeESPStore()
assert len(esp_store.retrieve("C")) == 1
@pytest.mark.parametrize("error_type", [OEOmegaError, OEQuacpacError])
def test_compute_esp_oe_error(error_type, caplog, monkeypatch):
def mock_conformer_generate(*_):
raise error_type()
monkeypatch.setattr(ConformerGenerator, "generate", mock_conformer_generate)
with caplog.at_level(logging.ERROR):
_compute_esp(
"C",
ConformerSettings(),
ESPSettings(grid_settings=GridSettings(spacing=1.0)),
)
assert "Coordinates could not be generated for" in caplog.text
assert error_type.__name__ in caplog.text
def test_compute_esp_psi4_error(caplog, monkeypatch):
def mock_psi4_generate(*_):
raise Psi4Error("std_out", "std_err")
monkeypatch.setattr(ConformerGenerator, "generate", lambda *args: [None])
monkeypatch.setattr(Psi4ESPGenerator, "generate", mock_psi4_generate)
with caplog.at_level(logging.ERROR):
_compute_esp(
"C",
ConformerSettings(),
ESPSettings(grid_settings=GridSettings(spacing=1.0)),
)
assert "Psi4 failed to run for conformer" in caplog.text
assert "Psi4Error" in caplog.text
| 33.166667
| 84
| 0.723618
|
1890508ce5ad7ba8bb93185245bf356cf858a538
| 6,045
|
py
|
Python
|
SemEval/dataset_readers/semeval_datareader.py
|
VanDuc0209/Sentiment_Twitter
|
666168b2082a4d3736efed3743c2dc535030065e
|
[
"MIT"
] | 23
|
2019-05-09T08:23:55.000Z
|
2022-03-05T14:10:30.000Z
|
SemEval/dataset_readers/semeval_datareader.py
|
VanDuc0209/Sentiment_Twitter
|
666168b2082a4d3736efed3743c2dc535030065e
|
[
"MIT"
] | null | null | null |
SemEval/dataset_readers/semeval_datareader.py
|
VanDuc0209/Sentiment_Twitter
|
666168b2082a4d3736efed3743c2dc535030065e
|
[
"MIT"
] | 6
|
2020-05-20T15:59:40.000Z
|
2021-04-06T13:54:19.000Z
|
import glob
import html
import json
import logging
import os
import re
import string
from typing import Dict
from allennlp.common.file_utils import cached_path
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.fields import LabelField, TextField
from allennlp.data.instance import Instance
from allennlp.data.token_indexers import SingleIdTokenIndexer, TokenIndexer
from allennlp.data.tokenizers import Tokenizer, WordTokenizer
from nltk.corpus import stopwords
from overrides import overrides
# import SemEval
# from SemEval.models.semeval_classifier import SemEvalClassifier
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
EMBEDDING_DIM = 100
HIDDEN_DIM = 200
@DatasetReader.register("SemEval2017-Task4-SubsetA")
class SemEvalDatasetReader(DatasetReader):
"""
Reads a JSON-lines file containing papers from SemEval2017 Task4 SubsetA.
"""
def __init__(self,
lazy: bool = False,
tokenizer: Tokenizer = None,
token_indexers: Dict[str, TokenIndexer] = None) -> None:
super().__init__(lazy)
self.SEPARATOR = "\t"
# data id set
self.data = set()
# stop words list
self.stop = stopwords.words('english') + list(string.punctuation) + ['rt', 'via']
# tokenizer
self._tokenizer = tokenizer or WordTokenizer()
# token_indexers
self._token_indexers = token_indexers or {
"tokens": SingleIdTokenIndexer()
}
@overrides
def _read(self, folder_path: str):
# read files below the folder
files = glob.glob(os.path.join(folder_path, "*.txt"))
for file_path in files:
with open(cached_path(file_path), "r") as data_file:
logger.info("Reading instances from lines in file at: %s",
file_path)
for index, line in enumerate(data_file):
columns = line.rstrip().split(self.SEPARATOR)
if not columns:
continue
if len(columns)<3:
logger.info(index)
logger.info(columns)
tweet_id = columns[0]
sentiment = columns[1]
text = columns[2:]
text = self.clean_text(''.join(text))
if tweet_id not in self.data:
self.data.add(tweet_id)
yield self.text_to_instance(sentiment, text)
else:
continue
@overrides
def text_to_instance(self, sentiment: str,
text: str = None) -> Instance: # type: ignore
# pylint: disable=arguments-differ
tokenized_text = self._tokenizer.tokenize(text)
text_field = TextField(tokenized_text, self._token_indexers)
fields = {'tokens': text_field}
if sentiment is not None:
fields['label'] = LabelField(sentiment)
return Instance(fields)
def clean_text(self, text: str):
"""
Remove extra quotes from text files and html entities
Args:
text (str): a string of text
Returns: (str): the "cleaned" text
"""
text = text.rstrip()
if '""' in text:
if text[0] == text[-1] == '"':
text = text[1:-1]
text = text.replace('\\""', '"')
text = text.replace('""', '"')
text = text.replace('\\""', '"')
text = html.unescape(text)
text = ' '.join(text.split())
return text
# def classify(text: str, model: SemEvalClassifier):
# tokenizer = WordTokenizer()
# token_indexers = {'tokens': SingleIdTokenIndexer()}
# tokens = tokenizer.tokenize(text)
# instance = Instance({'tokens': TextField(tokens, token_indexers)})
# logits = model.forward_on_instances([instance])[0]['logits']
# label_id = np.argmax(logits)
# label = model.vocab.get_token_from_index(label_id, 'labels')
# print('text: {}, label: {}'.format(text, label))
# def main():
# reader = TatoebaSentenceReader()
# train_set = reader.read('dataset/train/')
# dev_set = reader.read('dataset/test/')
# vocab = Vocabulary.from_instances(train_set,
# min_count={'tokens': 3})
# token_embedding = Embedding(num_embeddings=vocab.get_vocab_size('tokens'),
# embedding_dim=EMBEDDING_DIM)
# word_embeddings = BasicTextFieldEmbedder({"tokens": token_embedding})
# encoder = PytorchSeq2VecWrapper(
# torch.nn.LSTM(EMBEDDING_DIM, HIDDEN_DIM, batch_first=True))
# positive_label = vocab.get_token_index('eng', namespace='labels')
# model = SemEvalClassifier(word_embeddings, encoder, vocab, positive_label=positive_label)
# optimizer = optim.Adam(model.parameters())
# iterator = BucketIterator(batch_size=32, sorting_keys=[("tokens", "num_tokens")])
# iterator.index_with(vocab)
# trainer = Trainer(model=model,
# optimizer=optimizer,
# iterator=iterator,
# train_dataset=train_set,
# validation_dataset=dev_set,
# num_epochs=10)
# trainer.train()
# classify('Take your raincoat in case it rains.', model)
# classify('Tu me recuerdas a mi padre.', model)
# classify('Wie organisierst du das Essen am Mittag?', model)
# classify("Il est des cas où cette règle ne s'applique pas.", model)
# classify('Estou fazendo um passeio em um parque.', model)
# classify('Ve, postmorgaŭ jam estas la limdato.', model)
# classify('Credevo che sarebbe venuto.', model)
# classify('Nem tudja, hogy én egy macska vagyok.', model)
# classify('Nella ur nli qrib acemma deg tenwalt.', model)
# classify('Kurşun kalemin yok, değil mi?', model)
# pass
# if __name__ == "__main__":
# main()
| 35.982143
| 95
| 0.603474
|
088ff46e76b6eb07707aafe59412d52c69286bf5
| 2,099
|
py
|
Python
|
sumo_rl/agents/ql_agent.py
|
joaovitorblabres/sumo-rl
|
ec9d178cd0289366ba0a8648da52972d31d1026e
|
[
"MIT"
] | null | null | null |
sumo_rl/agents/ql_agent.py
|
joaovitorblabres/sumo-rl
|
ec9d178cd0289366ba0a8648da52972d31d1026e
|
[
"MIT"
] | null | null | null |
sumo_rl/agents/ql_agent.py
|
joaovitorblabres/sumo-rl
|
ec9d178cd0289366ba0a8648da52972d31d1026e
|
[
"MIT"
] | null | null | null |
import numpy as np
import random
from sumo_rl.exploration.epsilon_greedy import EpsilonGreedy
class QLAgent:
def __init__(self, starting_state, state_space, action_space, alpha=0.1, gamma=0.95, exploration_strategy=EpsilonGreedy(), groupRecommendation=0.2):
self.state = starting_state
self.state_space = state_space
self.action_space = action_space
self.action = None
self.alpha = alpha
self.gamma = gamma
self.groupAction = None
self.groupActing = False
self.groupRecommendation = groupRecommendation
self.decayGroup = 1
self.minEpsilonGroup = 0.05
self.q_table = {self.state: [0 for _ in range(action_space.n)]}
self.exploration = exploration_strategy
self.acc_reward = 0
self.followed = False
def act(self):
if self.groupActing:
# print(self.groupAction, self.state, self.action_space, self.groupRecommendation)
if self.followGroup:
self.followed = True
self.action = self.groupAction
# print("GROUP", self.action, self.groupAction)
else:
self.followed = False
self.action = self.exploration.choose(self.q_table, self.state, self.action_space)
# print("GREEDY", self.action)
self.groupRecommendation = max(self.groupRecommendation*self.decayGroup, self.minEpsilonGroup)
else:
self.action = self.exploration.choose(self.q_table, self.state, self.action_space)
return self.action
def learn(self, next_state, reward, done=False):
if next_state not in self.q_table:
self.q_table[next_state] = [random.uniform(0, 0) for _ in range(self.action_space.n)]
s = self.state
s1 = next_state
a = self.action
# print(s, a, s1, self.action_space.n)
self.q_table[s][a] = self.q_table[s][a] + self.alpha*(reward[0] + self.gamma*max(self.q_table[s1]) - self.q_table[s][a])
self.state = s1
self.acc_reward += reward[0]
| 39.603774
| 152
| 0.633635
|
1bb0ced574e0da710b6f1e27302367fc80f23204
| 412
|
py
|
Python
|
myproject/museos/migrations/0006_auto_20180516_0852.py
|
pariaspe/X-Serv-Practica-Museos
|
349896dae5b2b35440a5e524e81857300e147163
|
[
"Apache-2.0"
] | null | null | null |
myproject/museos/migrations/0006_auto_20180516_0852.py
|
pariaspe/X-Serv-Practica-Museos
|
349896dae5b2b35440a5e524e81857300e147163
|
[
"Apache-2.0"
] | null | null | null |
myproject/museos/migrations/0006_auto_20180516_0852.py
|
pariaspe/X-Serv-Practica-Museos
|
349896dae5b2b35440a5e524e81857300e147163
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('museos', '0005_auto_20180513_1745'),
]
operations = [
migrations.AlterField(
model_name='comentario',
name='fecha',
field=models.DateTimeField(auto_now_add=True),
),
]
| 20.6
| 58
| 0.614078
|
78989f445ef372f4eac798e2c3277eb36f8f9bb2
| 19,009
|
py
|
Python
|
gui/alplogparser.py
|
hanckmann/alplogs
|
fa9fc538b47303168f131d0e46341e9f9331481a
|
[
"Apache-2.0"
] | null | null | null |
gui/alplogparser.py
|
hanckmann/alplogs
|
fa9fc538b47303168f131d0e46341e9f9331481a
|
[
"Apache-2.0"
] | null | null | null |
gui/alplogparser.py
|
hanckmann/alplogs
|
fa9fc538b47303168f131d0e46341e9f9331481a
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
#
# Copyright (c) Patrick Hanckmann
# All rights reserved.
#
# License information is provided in LICENSE.md
#
# Author: Patrick Hanckmann <hanckmann@gmail.com>
# Project: Alpine System Info Log Viewer
from abc import ABCMeta
from pathlib import Path
from datetime import datetime
from typing import Optional, Union, Tuple
__author__ = 'Patrick Hanckmann'
__copyright__ = 'Copyright 2020, Patrick Hanckmann'
__version__ = '0.0.1'
__email__ = 'hanckmann@gmail.com'
__status__ = 'Testing' # 'Production'
# ############## #
# Module Parsers
# ############## #
def split_first_colon(line: str) -> Tuple[str, str]:
line = line.strip()
parts = line.split(':')
key = parts[0].strip()
data = line.replace(key, '').strip()
data = data[1:].strip()
return key, data
def factory(name):
module_headers = {
'IGNORE': Ignore,
'UNKNOWN': Unknown,
'CPU': CPU,
'MEMORY': Memory,
'NETWORK': Network,
'EXTERNAL IP ADDRESS': IPAddress,
'DISKS': Disks,
'DISK SPACE USAGE': DiskSpaceUsage,
'MOUNT': Mount,
'ZFS POOLS': ZFSPools,
'BTRFS SCRUB RESULTS': BTRFSScrubResults,
'BTRFS STATS': BTRFSStats,
'SMART STATUS': SmartStatus,
'RC STATUS': RCStatus,
'SYSTEMCTL STATUS': SystemctlStatus,
'USB': USB,
'UPGRADABLE PACKAGES': UpgradablePackages,
'PROCESSES': Process,
'USERS': Users,
'GROUPS': Groups,
}
if name in module_headers:
return module_headers[name]()
else:
print('ERROR: Module not supported: "{}"'.format(name))
return module_headers['UNKNOWN']()
class AlpLogModule(metaclass=ABCMeta):
def __init__(self):
super().__init__()
self.timestamp = None
self.items = {'lines': []}
def add_line(self, line) -> None:
if not line.strip():
return
self.items['lines'].append(line)
def finalise(self) -> None:
return
def set_timestamp(self, timestamp: datetime):
self.timestamp = timestamp
def to_dict(self):
return self.items
def to_table_header(self):
return tuple([key for key in self.items.keys() if not key == 'lines'])
def name(self):
return str(type(self))[21:-2]
class Ignore(AlpLogModule):
pass # No extra functionality
class Unknown(AlpLogModule):
pass # No extra functionality
class Header(AlpLogModule):
def add_line(self, line) -> None:
if not line.strip():
return
# Parsing
line = line.strip()
value = None
key, data = split_first_colon(line)
if key == 'date':
value = datetime.strptime(data, '%Y-%m-%d').date()
elif key == 'time':
value = datetime.strptime(data, '%H:%M:%S').time()
elif key == 'Kernel version':
value = data.replace('#', '')
elif key == 'send e-mail':
if data == 'yes':
value = True
else:
value = False
else:
# Simple items and future proofing
if data:
value = data
if value is not None:
self.items[key] = value
if not self.timestamp:
if 'date' in self.items and self.items['date'] and 'time' in self.items and self.items['time']:
self.timestamp = datetime.combine(self.items['date'],
self.items['time'])
class CPU(AlpLogModule):
def add_line(self, line) -> Union[None, AlpLogModule]:
if not line.strip():
return
# Parsing
line = line.strip()
key, data = split_first_colon(line)
if key == 'processor':
# from pudb import set_trace; set_trace()
# Special case, we need to check for extra instance
if 'processor' in self.items:
# Create new instance and return as such
new_instance = CPU()
new_instance.add_line(line)
return new_instance
if key in ('processor', 'cpu family', 'model', 'stepping', 'physical id', 'siblings', 'core id', 'cpu cores', 'apicid', 'initial apicid', 'cpuid level', 'clflush size', 'cache_alignment'):
value = int(data)
elif key in ('cpu MHz', 'bogomips'):
value = float(data)
elif key in ('fpu', 'fpu_exception', 'wp'):
if data.lower() == 'yes':
value = True
else:
value = False
elif key in ('flags', 'bugs', 'power management'):
value = data.split()
else:
value = str(data)
if value is not None:
self.items[key] = value
def name(self):
processor = None
if 'processor' in self.items:
processor = self.items['processor']
return '{} - {}'.format(str(type(self))[21:-2], processor)
class Memory(AlpLogModule):
def add_line(self, line) -> None:
if not line.strip():
return
data = line.split()
if line.startswith('Mem'):
self.items['Mem'] = {
'total': int(data[1]),
'used': int(data[2]),
'free': int(data[3]),
'shared': int(data[4]),
'buff/cache': int(data[5]),
'available': int(data[6]),
}
elif line.startswith('Swap'):
self.items['Swap'] = {
'total': int(data[1]),
'used': int(data[2]),
'free': int(data[3]),
}
else:
pass
def to_dict(self):
items = dict()
for key, value in self.items.items():
if key == 'lines':
continue
for subkey, subvalue in value.items():
items['{} - {}'.format(key, subkey)] = subvalue
return items
def to_table_header(self):
items = list()
for key, value in self.items.items():
if key == 'lines':
continue
for subkey in value.keys():
items.append('{} - {}'.format(key, subkey))
return tuple(items)
class Network(AlpLogModule):
def add_line(self, line) -> Union[None, AlpLogModule]:
if not line.strip():
return
# Parsing
if not line.startswith(' '):
if 'name' in self.items:
# Create new instance and return as such
new_instance = Network()
new_instance.add_line(line)
return new_instance
parts = line.split(':')
self.items['index'] = int(parts[0])
self.items['name'] = parts[1].strip()
self.items['name_other'] = parts[2].strip()
self.items['lines'] = list()
return
self.items['lines'].append(line.strip())
if line.strip().startswith('inet6'):
parts = line.strip().split()
self.items['inet6'] = parts[1]
elif line.strip().startswith('inet'):
parts = line.strip().split()
self.items['inet'] = parts[1]
def name(self):
name = None
if 'name' in self.items:
name = self.items['name']
return '{} - {}'.format(str(type(self))[21:-2], name)
class IPAddress(AlpLogModule):
def add_line(self, line) -> None:
if not line.strip():
return
self.items['ip'] = str(line.strip())
class Disks(AlpLogModule):
using = {
0: 'name',
# 1: 'maj:min',
# 2: 'rm ',
3: 'size',
4: 'ro',
5: 'fstype',
6: 'mountpoint',
7: 'uuid',
}
def add_line(self, line) -> Union[None, AlpLogModule]:
if not line.strip():
return
# Parsing
if line.strip().startswith('NAME'):
return
if 'name' in self.items:
# Create new instance and return as such
new_instance = Disks()
new_instance.add_line(line)
return new_instance
parts = line.split()
for index, part in enumerate(parts):
if index in self.using:
key = self.using[index]
value = part.strip().replace('├', '').replace('├', '').replace('└', '').replace('─', ' ')
self.items[key] = value
def name(self):
name = None
if 'name' in self.items:
name = self.items['name']
return '{} - {}'.format(str(type(self))[21:-2], name)
class Mount(AlpLogModule):
def add_line(self, line) -> None:
if not line.strip():
return
key = line.split()[0]
self.items[key] = line
class DiskSpaceUsage(AlpLogModule):
using = {
0: 'filesystem',
1: 'type',
2: 'size',
3: 'used',
4: 'available',
5: 'use %',
6: 'mounted on',
}
def add_line(self, line) -> None:
if not line.strip():
return
if line.strip().startswith('Filesystem'):
return
if 'filesystem' in self.items:
# Create new instance and return as such
new_instance = DiskSpaceUsage()
new_instance.add_line(line)
return new_instance
parts = line.split()
for index, part in enumerate(parts):
if index in self.using:
key = self.using[index]
value = part.strip()
self.items[key] = value
def name(self):
name = None
if 'filesystem' in self.items:
name = self.items['filesystem'].replace('/dev/', '')
return '{} - {}'.format('Disk Usage', name)
class ZFSPools(AlpLogModule):
using = {
0: 'name',
1: 'size',
2: 'alloc',
3: 'free',
4: 'ckpoint',
5: 'expandsz',
6: 'frag',
7: 'cap',
8: 'dedup',
9: 'health',
10: 'altroot',
}
def add_line(self, line) -> Union[None, AlpLogModule]:
if not line.strip():
return
# Parsing
if line.strip().startswith('NAME'):
return
# Part 1 or parts 2
parts = line.split()
if len(parts) == 11 and 'pool' in parts[0]:
if 'name' in self.items:
# Create new instance and return as such
new_instance = ZFSPools()
new_instance.add_line(line)
return new_instance
for index, part in enumerate(parts):
if index in self.using:
key = self.using[index]
value = part.strip()
if value == '-':
value = ''
self.items[key] = value
def name(self):
name = None
if 'name' in self.items:
name = self.items['name']
return '{} - {}'.format(str(type(self))[21:-2], name)
class BTRFSScrubResults(AlpLogModule):
using = {
0: 'PATH',
1: 'UUID',
2: 'Scrub started',
3: 'Status',
4: 'Duration',
5: 'Total to scrub',
6: 'Rate',
7: 'Error summary',
}
def add_line(self, line) -> None:
if not line.strip():
return
parts = line.split(':')
if 'PATH' in self.items and parts[0] in 'PATH':
# Create new instance and return as such
new_instance = BTRFSScrubResults()
new_instance.add_line(line)
return new_instance
if parts[0] in self.using.values():
key = parts[0].strip()
value = parts[1].strip()
self.items[key] = value
else:
pass
def name(self):
name = None
if 'PATH' in self.items:
name = self.items['PATH'].split('/')[-1]
if not name:
name = '/'
return '{} - {}'.format('BTRFS Scrub Result', name)
class BTRFSStats(AlpLogModule):
using = {
0: 'write_io_errs',
1: 'read_io_errs',
2: 'flush_io_errs',
3: 'corruption_errs',
4: 'generation_errs',
}
def add_line(self, line) -> None:
if not line.strip():
return
parts = line.split()
fparts = parts[0].split('.')
device = fparts[0][1:-1]
key = fparts[1]
if 'filesystem' not in self.items:
self.items['filesystem'] = device
if self.items['filesystem'] != device:
new_instance = BTRFSStats()
new_instance.add_line(line)
return new_instance
if key in self.using.values():
value = parts[1].strip()
self.items[key] = value
else:
pass
def name(self):
name = None
if 'filesystem' in self.items:
name = self.items['filesystem']
return '{} - {}'.format('BTRFS Stats', name)
class SmartStatus(AlpLogModule):
using = {
0: 'filesystem',
1: 'Model Family',
2: 'Device Model',
3: 'Serial Number',
4: 'LU WWN Device Id',
5: 'Firmware Version',
6: 'User Capacity',
7: 'Sector Sizes',
8: 'Sector Size',
9: 'Rotation Rate',
10: 'Form Factor',
11: 'Device is',
12: 'ATA Version is',
13: 'SATA Version is',
14: 'SMART support is',
15: 'test result',
}
def add_line(self, line) -> None:
if not line.strip():
return
if line.strip().startswith('Filesystem'):
return
if line.startswith('------------------- /'):
if 'filesystem' in self.items:
# Create new instance and return as such
new_instance = SmartStatus()
new_instance.add_line(line)
return new_instance
parts = line.split(':')
if parts[0] in self.using.values():
key = parts[0].strip()
value = parts[1].strip()
self.items[key] = value
elif line.startswith('------------------- /'):
key = 'filesystem'
value = line.replace('-', '').strip()
self.items[key] = value
elif line.startswith('SMART overall-health self-assessment test result'):
key = 'test result'
value = line.split(':')[1].strip()
self.items[key] = value
else:
pass
def name(self):
name = None
if 'filesystem' in self.items:
name = self.items['filesystem'].replace('/dev/', '')
return '{} - {}'.format('Smart Status', name)
class RCStatus(AlpLogModule):
def add_line(self, line) -> None:
if not line.strip():
return
parts = line.strip().split()
if line.startswith('Runlevel'):
self.items[parts[1].strip().upper()] = ''
else:
self.items[parts[0].strip()] = parts[2].strip()
class SystemctlStatus(AlpLogModule):
def add_line(self, line) -> None:
if not line.strip():
return
parts = line.strip().split()
if len(parts) >= 4:
self.items[parts[0].strip()] = parts[3].strip()
class USB(AlpLogModule):
def add_line(self, line) -> None:
if not line.strip():
return
class UpgradablePackages(AlpLogModule):
def __init__(self):
super().__init__()
self._packages = list()
def add_line(self, line) -> None:
if not line.strip():
return
if line.startswith('UPGRADABLE PACKAGES'):
return
if line.startswith('-'):
return
if line.startswith('Installed'):
return
self._packages.append(line.strip())
def finalise(self) -> None:
self.items['count'] = str(len(self._packages)) if len(self._packages) else ''
class Process(AlpLogModule):
def add_line(self, line) -> None:
if not line.strip():
return
class Users(AlpLogModule):
def __init__(self):
super().__init__()
self._users = list()
def add_line(self, line) -> None:
if not line.strip():
return
self._users.append(line.strip())
def finalise(self) -> None:
for index, user in enumerate(self._users):
self.items[index] = user
class Groups(AlpLogModule):
def __init__(self):
super().__init__()
self._groups = list()
def add_line(self, line) -> None:
if not line.strip():
return
self._groups.append(line.strip())
def finalise(self) -> None:
for index, group in enumerate(self._groups):
self.items[index] = group
# ############## #
# Logfile Parser
# ############## #
class AlpLogParser():
def __init__(self, filepath: Path):
# Open file and iterate over lines
# Evaluate two lines to detect modules (headers)
self.modules = list()
with open(filepath) as fp:
current_module = None
for line in fp:
new_module = self.detect_module(line)
if new_module:
if current_module:
self.finalise_module(module=current_module)
current_module = new_module
if self.modules and self.modules[0]:
current_module.set_timestamp(self.modules[0].timestamp)
else:
if current_module:
new_module = current_module.add_line(line)
if new_module:
self.finalise_module(module=current_module)
current_module = new_module
current_module.set_timestamp(self.modules[0].timestamp)
self.finalise_module(module=current_module)
def finalise_module(self, module):
if not isinstance(module, Ignore):
module.finalise()
self.modules.append(module)
def detect_module(self, line: str) -> Optional[AlpLogModule]:
if line.strip() == 'STATUS INFORMATION':
# File header detected
return Header()
if line.strip() == 'UPGRADABLE PACKAGES':
# File header detected
return UpgradablePackages()
if line.strip() == 'ACCESS INFORMATION':
# File header detected
return Ignore()
if line.startswith('# ') or line.startswith('### '):
# Module header detected
line = line.replace('### ', '')
line = line.replace('# ', '')
line = line.replace(':', '')
line = line.strip()
return factory(line)
return None
def names(self):
return [module.name() for module in self.modules]
| 28.757943
| 196
| 0.512231
|
fc6b0f6726ada08b88ac940cce7e1f95ba3f819c
| 7,818
|
py
|
Python
|
pylot/perception/detection/lane_detection_canny_operator.py
|
Shaviv-Hoffman-Lowitz/pylot
|
d9a16ffb6fbfdad21c910d9c8efa8c4c831c752f
|
[
"Apache-2.0"
] | 2
|
2021-06-07T08:51:44.000Z
|
2021-06-07T08:54:00.000Z
|
pylot/perception/detection/lane_detection_canny_operator.py
|
akhilkanduri/pylot_edited
|
d1295a42f0edd79670dc64053824a3e075d433e2
|
[
"Apache-2.0"
] | null | null | null |
pylot/perception/detection/lane_detection_canny_operator.py
|
akhilkanduri/pylot_edited
|
d1295a42f0edd79670dc64053824a3e075d433e2
|
[
"Apache-2.0"
] | null | null | null |
"""Implements an operator that detects lanes."""
import math
from collections import namedtuple
import cv2
import erdos
import numpy as np
Line = namedtuple("Line", "x1, y1, x2, y2, slope")
class CannyEdgeLaneDetectionOperator(erdos.Operator):
"""Detects driving lanes using a camera.
The operator uses standard vision techniques (Canny edge).
Args:
camera_stream (:py:class:`erdos.ReadStream`): The stream on which
camera frames are received.
detected_lanes_stream (:py:class:`erdos.WriteStream`): Stream on which
the operator sends
:py:class:`~pylot.perception.messages.LanesMessage` messages.
flags (absl.flags): Object to be used to access absl flags.
"""
def __init__(self, camera_stream, detected_lanes_stream, flags):
camera_stream.add_callback(self.on_msg_camera_stream,
[detected_lanes_stream])
self._flags = flags
self._logger = erdos.utils.setup_logging(self.config.name,
self.config.log_file_name)
self._kernel_size = 7
@staticmethod
def connect(camera_stream):
"""Connects the operator to other streams.
Args:
camera_stream (:py:class:`erdos.ReadStream`): The stream on which
camera frames are received.
Returns:
:py:class:`erdos.WriteStream`: Stream on which the operator sends
:py:class:`~pylot.perception.messages.LanesMessage` messages.
"""
detected_lanes_stream = erdos.WriteStream()
return [detected_lanes_stream]
@erdos.profile_method()
def on_msg_camera_stream(self, msg, detected_lanes_stream):
"""Invoked whenever a frame message is received on the stream.
Args:
msg: A :py:class:`~pylot.perception.messages.FrameMessage`.
detected_lanes_stream (:py:class:`erdos.WriteStream`): Stream on
which the operator sends
:py:class:`~pylot.perception.messages.LanesMessage` messages.
"""
self._logger.debug('@{}: {} received message'.format(
msg.timestamp, self.config.name))
assert msg.frame.encoding == 'BGR', 'Expects BGR frames'
# Make a copy of the image coming into the operator.
image = np.copy(msg.frame.as_numpy_array())
# Get the dimensions of the image.
x_lim, y_lim = image.shape[1], image.shape[0]
# Convert to grayscale.
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Apply gaussian blur.
image = cv2.GaussianBlur(image, (self._kernel_size, self._kernel_size),
0)
# Apply the Canny Edge Detector.
image = cv2.Canny(image, 30, 60)
# Define a region of interest.
points = np.array(
[[
(0, y_lim), # Bottom left corner.
(0, y_lim - 60),
(x_lim // 2 - 20, y_lim // 2),
(x_lim // 2 + 20, y_lim // 2),
(x_lim, y_lim - 60),
(x_lim, y_lim), # Bottom right corner.
]],
dtype=np.int32)
image = self._region_of_interest(image, points)
# Hough lines.
image = self._draw_lines(image)
detected_lanes_stream.send(erdos.Message(msg.timestamp, image))
def _region_of_interest(self, image, points):
mask = np.zeros_like(image)
cv2.fillPoly(mask, points, 255)
return cv2.bitwise_and(image, mask)
def _extrapolate_lines(self, image, left_line, right_line):
top_y = None
if left_line is not None and right_line is not None:
top_y = min(
[left_line.y1, left_line.y2, right_line.y1, right_line.y2])
base_y = image.shape[0]
final_lines = []
if left_line is not None:
actual_slope = float(left_line.y2 -
left_line.y1) / float(left_line.x2 -
left_line.x1)
base_x = int((base_y - left_line.y1) / actual_slope) + left_line.x1
final_lines.append(
Line(base_x, base_y, left_line.x1, left_line.y1, actual_slope))
if top_y is None:
top_y = min([left_line.y1, left_line.y2])
top_x = int((top_y - left_line.y2) / actual_slope) + left_line.x2
final_lines.append(
Line(top_x, top_y, left_line.x2, left_line.y2, actual_slope))
if right_line is not None:
actual_slope = float(right_line.y2 -
right_line.y1) / float(right_line.x2 -
right_line.x1)
base_x = int(
(base_y - right_line.y1) / actual_slope) + right_line.x1
final_lines.append(
Line(base_x, base_y, right_line.x1, right_line.y1,
actual_slope))
if top_y is None:
top_y = min([right_line.y1, right_line.y2])
top_x = int((top_y - right_line.y2) / actual_slope) + right_line.x2
final_lines.append(
Line(top_x, top_y, right_line.x2, right_line.y2, actual_slope))
return final_lines
def _draw_lines(self, image):
lines = cv2.HoughLinesP(image,
rho=1,
theta=np.pi / 180.0,
threshold=40,
minLineLength=10,
maxLineGap=30)
line_img = np.zeros((image.shape[0], image.shape[1], 3),
dtype=np.uint8)
if lines is None:
return line_img
# Construct the Line tuple collection.
cmp_lines = []
for line in lines:
for x1, y1, x2, y2 in line:
slope = math.degrees(math.atan2(y2 - y1, x2 - x1))
cmp_lines.append(Line(x1, y1, x2, y2, slope))
# Sort the lines by their slopes after filtering lines whose slopes
# are > 20 or < -20.
cmp_lines = sorted(filter(
lambda line: line.slope > 20 or line.slope < -20, cmp_lines),
key=lambda line: line.slope)
if len(cmp_lines) == 0:
return line_img
# Filter the lines with a positive and negative slope and choose
# a single line out of those.
left_lines = [
line for line in cmp_lines if line.slope < 0 and line.x1 < 300
]
right_lines = [
line for line in cmp_lines
if line.slope > 0 and line.x1 > image.shape[1] - 300
]
final_lines = []
# Find the longest line from the left and the right lines and
# extrapolate to the middle of the image.
left_line = None
if len(left_lines) != 0:
left_line = max(left_lines,
key=lambda line: abs(line.y2 - line.y1))
final_lines.append(left_line)
right_line = None
if len(right_lines) != 0:
right_line = max(right_lines,
key=lambda line: abs(line.y2 - line.y1))
final_lines.append(right_line)
final_lines.extend(
self._extrapolate_lines(image, left_line, right_line))
for x1, y1, x2, y2, slope in final_lines:
cv2.line(line_img, (x1, y1), (x2, y2),
color=(255, 0, 0),
thickness=2)
cv2.putText(line_img, "({}, {})".format(x1, y1), (x1, y1),
cv2.FONT_HERSHEY_SIMPLEX, 0.4, (0, 255, 0), 2,
cv2.LINE_AA)
return line_img
| 37.586538
| 79
| 0.549757
|
c4b4f4c0e448dfbe2f8d6c40ac0e6e9e5609f531
| 2,063
|
py
|
Python
|
concourse/model/traits/options.py
|
adracus/cc-utils
|
dcd1ff544d8b18a391188903789d1cac929f50f9
|
[
"Apache-2.0"
] | null | null | null |
concourse/model/traits/options.py
|
adracus/cc-utils
|
dcd1ff544d8b18a391188903789d1cac929f50f9
|
[
"Apache-2.0"
] | null | null | null |
concourse/model/traits/options.py
|
adracus/cc-utils
|
dcd1ff544d8b18a391188903789d1cac929f50f9
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed
# under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from util import not_none
from concourse.model.base import (
AttributeSpec,
Trait,
TraitTransformer
)
ATTRIBUTES = (
AttributeSpec.optional(
name='build_logs_to_retain',
default=1000,
doc='the amount of build logs to retain before log rotation occurs',
type=int,
),
AttributeSpec.optional(
name='public_build_logs',
default=False,
doc='whether or not build logs are accessible to unauthenticated users',
type=bool,
),
)
class OptionsTrait(Trait):
def _attribute_specs(self):
return ATTRIBUTES
def _defaults_dict(self):
return AttributeSpec.defaults_dict(ATTRIBUTES)
def _optional_attributes(self):
return set(AttributeSpec.optional_attr_names(ATTRIBUTES))
def build_logs_to_retain(self):
return self.raw['build_logs_to_retain']
def public_build_logs(self):
return self.raw['public_build_logs']
def transformer(self):
return OptionsTraitTransformer(trait=self)
class OptionsTraitTransformer(TraitTransformer):
name = 'options'
def __init__(self, trait: OptionsTrait, *args, **kwargs):
super().__init__(*args, **kwargs)
self.trait = not_none(trait)
def process_pipeline_args(self, pipeline_args: 'JobVariant'):
pass
| 29.898551
| 99
| 0.709161
|
e6f661f7a2cc86978e1cf02d95b5a928efe21652
| 27,891
|
py
|
Python
|
map_test.py
|
slarda/ur-analysis-tools
|
7e7ade4a2418d798ccccc7949b8362faab1e10c9
|
[
"Apache-2.0"
] | 1
|
2019-03-25T02:27:48.000Z
|
2019-03-25T02:27:48.000Z
|
map_test.py
|
slarda/ur-analysis-tools
|
7e7ade4a2418d798ccccc7949b8362faab1e10c9
|
[
"Apache-2.0"
] | null | null | null |
map_test.py
|
slarda/ur-analysis-tools
|
7e7ade4a2418d798ccccc7949b8362faab1e10c9
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
#####!/usr/local/bin/python3
import os
import pytz
import click
import random
import logging
import harness
import datetime
import pandas as pd
import numpy as np
import ml_metrics as metrics
from tqdm import tqdm
from uuid import uuid4
from dateutil import parser
from config import init_config
from pyspark import SparkContext
from pyspark.sql import SQLContext
from pyspark.sql import functions as F
from report import CSVReport, ExcelReport
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
#logging = logging.getlogging(__name__)
#logging.setLevel(level=logging.DEBUG)
cfg = init_config('config_testing.json')
logging.debug("Application was launched with config: %s" % str(cfg.init_dict))
def get_split_date(df, split_event, train_ratio=0.8):
"""Calculates split date
Calculates the moment of time that we will use to split
data into the train (befor the moment) and the test sets
Args:
df: Spark DataFrame
train_ratio: ratio of samples in train set
Returns:
A datetime object
"""
date_rdd = (df
.filter("event = '%s'" % split_event)
.select("Date")
.sort("Date", ascending=True)
.rdd)
total_primary_events = date_rdd.count()
split_date = (date_rdd
.zipWithIndex()
.filter(lambda x: x[1] > total_primary_events * train_ratio)
.first()[0][0])
return split_date
def split_data(df):
if cfg.splitting.type == "random":
return df.randomSplit([cfg.splitting.train_ratio, 1 - cfg.splitting.train_ratio], seed=cfg.splitting.random_seed)
elif cfg.splitting.type == "date":
split_date = get_split_date(df, cfg.splitting.split_event, cfg.splitting.train_ratio)
return df.filter(F.col("Date") < split_date), df.filter(F.col("Date") >= split_date)
def mk_intersection_matrix(by_rows, columns_for_matrix,
horizontal_suffix="", vertical_suffix=""):
""" Makes pandas dataframe of intersections out of list of rows
"""
result = pd.DataFrame(columns=[col + horizontal_suffix for col in columns_for_matrix])
for en in columns_for_matrix:
result.loc[en + vertical_suffix, :] = [0] * len(columns_for_matrix)
for r in by_rows:
row = r.asDict()
en_h = row['event_left']
en_v = row['event_right']
count = row['count']
result.loc[en_v + vertical_suffix, en_h + horizontal_suffix] = count
return result
@click.command()
@click.option('--intersections', is_flag=True)
@click.option('--csv_report', is_flag=True)
def split(intersections, csv_report):
logging.info('Splitting started')
if csv_report:
if cfg.reporting.use_uuid:
uuid = uuid4()
reporter = CSVReport(cfg.reporting.csv_dir, uuid)
else:
reporter = CSVReport(cfg.reporting.csv_dir, None)
else:
reporter = ExcelReport(cfg.reporting.file)
logging.info('Spark initialization')
sc = SparkContext(cfg.spark.master, 'map_test: split')
sqlContext = SQLContext(sc)
logging.info('Source file reading')
df = sqlContext.read.json(cfg.splitting.source_file)
df = df.withColumn("Date", F.from_utc_timestamp("eventTime", "UTC"))
df = df[(df.event != '$set') & (df.event != '$unset')]
users_with_event_count = df.groupBy(F.col("entityId").alias("user")).count()
logging.info('Filter users with small number of events')
min_events = 10
users_with_few_events = (users_with_event_count
.filter("count < %d" % (min_events))
.select(F.col("user").alias("user_with_few_events")))
ndf = df.join(users_with_few_events,
F.col("entityId")==F.col("user_with_few_events"),
how="left_outer")
df1 = ndf.filter("user_with_few_events is NULL").drop("user_with_few_events")
logging.info('Split data into train and test')
train_df, test_df = split_data(df)
train_df.coalesce(1).write.format('json').save(cfg.splitting.train_file)
test_df.coalesce(1).write.format('json').save(cfg.splitting.test_file)
train_df = train_df.select("entityId", "event", "targetEntityId").cache()
test_df = test_df.select("entityId", "event", "targetEntityId").cache()
logging.info('Calculation of different stat metrics of datasets')
events_by_type = (df
.groupBy("event")
.count()
.select(F.col("event"), F.col("count").alias("count_total"))
.toPandas())
events_by_type_test = (test_df
.groupBy("event")
.count()
.select(F.col("event"), F.col("count").alias("count_test"))
.toPandas()
.set_index("event"))
events_by_type_train = (train_df
.groupBy("event")
.count()
.select(F.col("event"), F.col("count").alias("count_train"))
.toPandas()
.set_index("event"))
unique_users_by_event = (df
.select(F.col("entityId"), F.col("event"))
.distinct()
.groupBy("event")
.count()
.select(F.col("event"), F.col("count").alias("unique_users_total"))
.toPandas()
.set_index("event"))
unique_users_by_event_train = (train_df
.select(F.col("entityId"), F.col("event"))
.distinct()
.groupBy("event")
.count()
.select(F.col("event"), F.col("count").alias("unique_users_train"))
.toPandas()
.set_index("event"))
unique_users_by_event_test = (test_df
.select(F.col("entityId"), F.col("event"))
.distinct()
.groupBy("event")
.count()
.select(F.col("event"), F.col("count").alias("unique_users_test"))
.toPandas()
.set_index("event"))
unique_items_by_event = (df
.select(F.col("targetEntityId"), F.col("event"))
.distinct()
.groupBy("event")
.count()
.select(F.col("event"), F.col("count").alias("unique_items_total"))
.toPandas()
.set_index("event"))
unique_items_by_event_train = (train_df
.select(F.col("targetEntityId"), F.col("event"))
.distinct()
.groupBy("event")
.count()
.select(F.col("event"), F.col("count").alias("unique_items_train"))
.toPandas()
.set_index("event"))
unique_items_by_event_test = (test_df
.select(F.col("targetEntityId"), F.col("event"))
.distinct()
.groupBy("event")
.count()
.select(F.col("event"), F.col("count").alias("unique_items_test"))
.toPandas()
.set_index("event"))
logging.info('Calculate total counts')
events = df.count()
events_train = train_df.count()
events_test = test_df.count()
unique_users = df.select("entityId").distinct().count()
unique_users_train = train_df.select("entityId").distinct().count()
unique_users_test = test_df.select("entityId").distinct().count()
unique_items = df.select(F.col("targetEntityId")).distinct().count()
unique_items_train = train_df.select(F.col("targetEntityId")).distinct().count()
unique_items_test = test_df.select(F.col("targetEntityId")).distinct().count()
info_df = events_by_type
dfs = [unique_users_by_event, unique_items_by_event,
events_by_type_train, events_by_type_test,
unique_users_by_event_train, unique_users_by_event_test,
unique_items_by_event_train, unique_items_by_event_test]
for data_frame in dfs:
info_df = info_df.join(data_frame, on="event")
n_rows, n_cols = info_df.shape
# totals
info_df.loc[n_rows] = ['ANY EVENT', events, unique_users, unique_items,
events_train, events_test,
unique_users_train, unique_users_test,
unique_items_train, unique_items_test]
info_df.insert(4, 'events per user', info_df.ix[:, 1] / info_df.ix[:, 2])
info_df.insert(5, 'events per item', info_df.ix[:, 1] / info_df.ix[:, 3])
info_df = info_df.fillna(0)
logging.info('Create event stat worksheet')
reporter.start_new_sheet('Events stat')
reporter.report(
['event', 'event count', 'unique users', 'unique items',
'events per user', 'events per item',
'event count train', 'event count test',
'unique users train', 'unique users test',
'unique items train', 'unique items test'],
[column.tolist() for _, column in info_df.iteritems()],
selected_rows=[next(info_df.iteritems())[1].tolist().index(cfg.testing.primary_event)],
cfg=cfg)
reporter.finish_sheet()
if intersections:
logging.info('Start intersections calculation')
reporter.start_new_sheet('Intersections')
columns_for_matrix = cfg.testing.events
logging.info('Process train / train user intersection')
train_train_users = (
train_df
.select(F.col("entityId").alias("user"), F.col("event").alias("event_left"))
.distinct()
.join(train_df.select(F.col("entityId").alias("user"), F.col("event").alias("event_right")).distinct(),
on="user", how="inner")
.groupBy(["event_left", "event_right"])
.count()
.collect())
trtru = mk_intersection_matrix(train_train_users, columns_for_matrix)
reporter.report(
[''] + list(trtru.columns.values),
[trtru.index.tolist()] + [column for _, column in trtru.iteritems()],
title='Train / train user intersection')
logging.info('Process train / test user intersection')
train_test_users = (
train_df
.select(F.col("entityId").alias("user"), F.col("event").alias("event_left"))
.distinct()
.join(test_df.select(F.col("entityId").alias("user"), F.col("event").alias("event_right")).distinct(),
on="user", how="inner")
.groupBy(["event_left", "event_right"])
.count()
.collect())
trtsu = mk_intersection_matrix(train_test_users, columns_for_matrix,
horizontal_suffix=" train", vertical_suffix=" test")
reporter.report(
[''] + list(trtsu.columns.values),
[trtsu.index.tolist()] + [column for _, column in trtsu.iteritems()],
title='Train / test user intersection')
logging.info('Process train / train item intersection')
train_train_items = (
train_df
.select(F.col("targetEntityId").alias("item"), F.col("event").alias("event_left"))
.distinct()
.join(train_df.select(F.col("targetEntityId").alias("item"), F.col("event").alias("event_right")).distinct(),
on="item", how="inner")
.groupBy(["event_left", "event_right"])
.count()
.collect())
trtri = mk_intersection_matrix(train_train_items, columns_for_matrix)
reporter.report(
[''] + list(trtri.columns.values),
[trtri.index.tolist()] + [column for _, column in trtri.iteritems()],
title='Train / train item intersection'
)
logging.info('Process train / test item intersection')
train_test_items = (
train_df
.select(F.col("targetEntityId").alias("item"), F.col("event").alias("event_left"))
.distinct()
.join(test_df.select(F.col("targetEntityId").alias("item"), F.col("event").alias("event_right")).distinct(),
on="item", how="inner")
.groupBy(["event_left", "event_right"])
.count()
.collect())
trtsi = mk_intersection_matrix(train_test_items, columns_for_matrix,
horizontal_suffix=" train", vertical_suffix=" test")
reporter.report(
[''] + list(trtsi.columns.values),
[trtsi.index.tolist()] + [column for _, column in trtsi.iteritems()],
title='Train / test item intersection'
)
reporter.report_config(cfg)
reporter.finish_document()
logging.info('Splitting finished successfully')
def run_map_test_dummy(data, items=None, probs=None, uniform=True, top=True,
users=None, primaryEvent=cfg.testing.primary_event, K=10, no_progress=False):
"""Performs dummy test
Args:
data: list of event rows
items: np.array or list of items sorted in descending popularity order
probs: np.array or list of corresponding probabilities (needed for experiment #2)
uniform: Boolean flag to use uniform sampling
top: Boolean flag to use top items
users: set of users to consider
primaryEvent: str name of primary event
K: int for MAP @ K
no_progress: Boolean flag not to show the progress bar during calculations
Returns:
list of [MAP@1, MAP@2, ... MAP@K] evaluations
"""
user_information = {}
for rec in data:
if rec.event == primaryEvent:
user = rec.entityId
item = rec.targetEntityId
if not users or user in users:
user_information.setdefault(user, []).append(item)
holdoutUsers = [*user_information.keys()]
prediction = []
ground_truth = []
if no_progress:
gen = holdoutUsers
else:
gen = tqdm(holdoutUsers)
for user in gen:
if top:
test_items = items[0:K]
elif uniform:
test_items = np.random.choice(items, size=(K,))
else:
test_items = np.random.choice(items, size=(K,), p=probs)
prediction.append(test_items)
ground_truth.append(user_information.get(user, []))
return [metrics.mapk(ground_truth, prediction, k) for k in range(1, K + 1)]
def import_events(engine_client, events_data,
seed = cfg.splitting.random_seed):
random.seed(seed)
count = 0
logging.info('Importing data..')
for line in events_data:
dict_data = line.asDict()
event_time = parser.parse(dict_data["eventTime"])
if dict_data["event"] != "$set":
engine_client.create(
event = dict_data["event"],
entity_type = "user",
entity_id = dict_data["entityId"],
target_entity_type = "item",
target_entity_id = dict_data["targetEntityId"],
event_time = event_time,
)
print("Event: " + str(dict_data))
else:
engine_client.create(
event = "$set",
entity_type = "item",
entity_id = dict_data['entityId'],
event_time = event_time,
properties = dict_data["properties"].asDict()
)
print("Event: " + str(dict_data))
def run_map_test(data, eventNames, users = None,
primaryEvent = cfg.testing.primary_event,
consider_non_zero_scores = cfg.testing.consider_non_zero_scores_only,
num = 200, K = cfg.testing.map_k,
test = False, harness_url = "http://localhost:9090"):
N_TEST = 2000
user_information = {}
res_data = {}
# Create harness engine for events...
engine_client = harness.EventsClient(
engine_id = cfg.engine_id,
url = harness_url,
threads = 5,
qsize = 500)
import_events(engine_client, data)
logging.info(engine_client.host)
engine_client.close()
# Create query client in harness...
logging.info("Queries for " + cfg.engine_id)
query_client = harness.QueriesClient(
engine_id = cfg.engine_id,
url = harness_url,
threads=5,
qsize=500)
for rec in data:
if rec.event == primaryEvent:
user = rec.entityId
item = rec.targetEntityId
if not users or user in users:
user_information.setdefault(user, []).append(item)
if test:
holdoutUsers = [*user_information.keys()][1:N_TEST]
else:
holdoutUsers = [*user_information.keys()]
prediction = []
ground_truth = []
user_items_cnt = 0.0
users_cnt = 0
for user in tqdm(holdoutUsers):
query = {
"user": user,
"eventNames": eventNames,
"num": num,
}
try:
res = query_client.send_query(query)
# Sort by score then by item name
tuples = sorted([(r["score"], r["item"]) for r in res.json_body['result']], reverse=True)
scores = [score for score, item in tuples]
items = [item for score, item in tuples]
res_data[user] = {
"items": items,
"scores": scores,
}
# Consider only non-zero scores
if consider_non_zero_scores:
if len(scores) > 0 and scores[0] != 0.0:
prediction.append(items)
ground_truth.append(user_information.get(user, []))
user_items_cnt += len(user_information.get(user, []))
users_cnt += 1
else:
prediction.append(items)
ground_truth.append(user_information.get(user, []))
user_items_cnt += len(user_information.get(user, []))
users_cnt += 1
except harness.NotFoundError:
print("Error with user: %s" % user)
return ([metrics.mapk(ground_truth, prediction, k) for k in range(1, K + 1)],
res_data, user_items_cnt / (users_cnt + 0.00001))
def get_nonzero(r_data):
users = [user for user, res_data in r_data.items() if res_data['scores'][0] != 0.0]
return users
@click.command()
@click.option('--csv_report', is_flag = True)
@click.option('--all', default = True, is_flag=True)
@click.option('--dummy_test', is_flag = True)
@click.option('--separate_test', is_flag = True)
@click.option('--all_but_test', is_flag = True)
@click.option('--primary_pairs_test', is_flag = True)
@click.option('--custom_combos_test', is_flag = True)
@click.option('--non_zero_users_from_file', is_flag = True)
def test(csv_report,
all,
dummy_test,
separate_test,
all_but_test,
primary_pairs_test,
custom_combos_test,
non_zero_users_from_file):
logging.info('Testing started')
if csv_report:
if cfg.reporting.use_uuid:
uuid = uuid4()
reporter = CSVReport(cfg.reporting.csv_dir, uuid)
else:
reporter = CSVReport(cfg.reporting.csv_dir, None)
else:
reporter = ExcelReport(cfg.reporting.file)
logging.info('Spark context initialization')
sc = SparkContext(cfg.spark.master, 'map_test: train')
sqlContext = SQLContext(sc)
logging.info('Train data reading')
test_df = sqlContext.read.json(cfg.splitting.test_file).cache()
test_data = test_df.filter("event = '%s'" % (cfg.testing.primary_event)).collect()
#non_zero_users = set([r[0] for r in test_data][500:650]) # Because actually all our users have 0.0 scores -- too few data
if all or dummy_test:
logging.info('Train data reading')
train_df = sqlContext.read.json(cfg.splitting.train_file).cache()
counts = train_df.filter("event = '%s'" % (cfg.testing.primary_event)).groupBy("targetEntityId").count().collect()
sorted_rating = sorted([(row.asDict()['count'], row.asDict()['targetEntityId']) for row in counts], reverse=True)
elements = np.array([item for cnt, item in sorted_rating])
probs = np.array([cnt for cnt, item in sorted_rating])
probs = 1.0 * probs / probs.sum()
logging.info('Process dummy test')
# case 1. Random sampling from items (uniform)
dummy_uniform_res = run_map_test_dummy(test_data, items=elements, probs=probs,
uniform=True, top=False, K=cfg.testing.map_k)
# case 2. Random sampling from items (according to their distribution in training data)
dummy_res = run_map_test_dummy(test_data, items=elements, probs=probs,
uniform=False, top=False, K=cfg.testing.map_k)
# case 3. Top-N items from training data
dummy_top_res = run_map_test_dummy(test_data, items=elements, probs=probs,
uniform=True, top=True, K=cfg.testing.map_k)
reporter.start_new_sheet('Dummy MAP benchmark')
reporter.report(
['', 'Random uniform', 'Random sampled from train', 'Top - N'],
[[('MAP @ %d' % i) for i in range(1, len(dummy_res)+1)]] + [dummy_uniform_res, dummy_res, dummy_top_res],
cfg=cfg
)
reporter.finish_sheet()
logging.info('Process top 20 dummy test')
scores = []
for i in range(20):
scores.append(run_map_test_dummy(test_data, items=elements[i:], uniform=True,
top=True, K=1, no_progress=True)[0])
reporter.start_new_sheet('Top-20 perfomance')
reporter.report(
['Rank', 'MAP@1'],
[list(range(1, 21)), scores],
bold_first_column=False,
cfg=cfg
)
reporter.finish_sheet()
if all or separate_test or all_but_test or primary_pairs_test or custom_combos_test:
logging.info('Non zero users')
if non_zero_users_from_file:
with open(cfg.testing.non_zero_users_file) as input:
non_zero_users = set(input.read().split(','))
else:
_, r_data, _ = run_map_test(test_data, [cfg.testing.primary_event], test=False)
non_zero_users = get_nonzero(r_data)
with open(cfg.testing.non_zero_users_file, 'w') as output:
output.write(','.join(non_zero_users))
if all or separate_test:
logging.info('Process "map separate events" test')
columns = []
for ev in cfg.testing.events:
(r_scores, r_data, ipu) = run_map_test(test_data, [ev], users=non_zero_users, test=False)
columns.append(r_scores + [len(non_zero_users)])
first_column = [('MAP @ %d' % i) for i in range(1, len(columns[0]))] + ['non-zero users']
reporter.start_new_sheet('MAP separate events')
reporter.report(
['event'] + cfg.testing.events,
[first_column] + columns,
selected_columns=[cfg.testing.events.index(cfg.testing.primary_event) + 1],
cfg=cfg
)
reporter.finish_sheet()
if all or all_but_test:
logging.info('Process "map all but..." test')
events_scores = []
for ev in cfg.testing.events:
evs = list(cfg.testing.events)
evs.remove(ev)
(r_scores, r_data, ipu) = run_map_test(test_data, evs, users=non_zero_users, test=False)
events_scores.append(r_scores + [len(non_zero_users)])
evl = cfg.testing.events
all_scores, r_data, ipu = run_map_test(test_data, evl, users=non_zero_users, test=False)
all_scores.append(len(non_zero_users))
first_column = [('MAP @ %d' % i) for i in range(1, len(all_scores))] + ['non-zero users']
reporter.start_new_sheet('MAP all but...')
reporter.report(
['event'] + cfg.testing.events + ['All'],
[first_column] + events_scores + [all_scores],
selected_columns=[cfg.testing.events.index(cfg.testing.primary_event) + 1],
cfg=cfg
)
reporter.finish_sheet()
if all or primary_pairs_test:
logging.info('Process "map pairs with primary" test')
columns = []
events_without_primary = [event for event in cfg.testing.events if event != cfg.testing.primary_event]
for event in events_without_primary:
(r_scores, r_data, ipu) = run_map_test(test_data, [cfg.testing.primary_event, event],
users=non_zero_users, test=False)
columns.append(r_scores + [len(non_zero_users)])
first_column = [('MAP @ %d' % i) for i in range(1, len(columns[0]))] + ['non-zero users']
reporter.start_new_sheet('MAP pairs with primary')
reporter.report(
['event'] + events_without_primary,
[first_column] + columns,
cfg=cfg
)
reporter.finish_sheet()
if all or custom_combos_test:
logging.info('Process "custom combos" test')
columns = []
for event_group in cfg.testing.custom_combos.event_groups:
if len(event_group) == 2 and cfg.testing.primary_event in event_group and primary_pairs_test:
logging.warn("Report for group %s already generated in 'MAP pairs with primary'" % str(event_group))
continue
if len(event_group) == 1 and separate_test:
logging.warn("Report for group %s already generated in 'MAP separate events'" % str(event_group))
continue
if len(event_group) >= len(cfg.testing.events) - 1 and all_but_test:
logging.warn("Report for group %s already generated in 'All but...'" % str(event_group))
continue
if not (set(cfg.testing.events) & set(event_group)):
logging.warn("Event group is not corect!")
continue
(r_scores, r_data, ipu) = run_map_test(test_data, event_group,
users = non_zero_users,
test=False)
columns.append(r_scores + [len(non_zero_users)])
if columns:
first_column = [('MAP @ %d' % i) for i in range(1, len(columns[0]))] + ['non-zero users']
reporter.start_new_sheet('Custom combos')
reporter.report(
['event'] + [str([s.encode('utf-8') for s in group]) for group in cfg.testing.custom_combos.event_groups],
[first_column] + columns,
cfg=cfg
)
reporter.finish_sheet()
reporter.finish_document()
logging.info('Testing finished successfully')
# root group
@click.group()
def root():
pass
root.add_command(split)
root.add_command(test)
if __name__ == "__main__":
root()
| 38.845404
| 126
| 0.571582
|
91e40b1084ec34e1f901ef665fd545922f27b8d1
| 22,226
|
py
|
Python
|
cirq/ops/linear_combinations.py
|
jeffreygrover/Cirq
|
17d94cf45f6b09ddf40048ddbb173e50fa293995
|
[
"Apache-2.0"
] | null | null | null |
cirq/ops/linear_combinations.py
|
jeffreygrover/Cirq
|
17d94cf45f6b09ddf40048ddbb173e50fa293995
|
[
"Apache-2.0"
] | null | null | null |
cirq/ops/linear_combinations.py
|
jeffreygrover/Cirq
|
17d94cf45f6b09ddf40048ddbb173e50fa293995
|
[
"Apache-2.0"
] | 1
|
2020-12-18T16:36:41.000Z
|
2020-12-18T16:36:41.000Z
|
# Copyright 2019 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import defaultdict
from typing import (Mapping, Optional, Tuple, Union, List, FrozenSet,
DefaultDict)
import numbers
import numpy as np
from cirq import protocols, value
from cirq._doc import document
from cirq.linalg import operator_spaces
from cirq.ops import identity, raw_types, pauli_gates, pauli_string
from cirq.ops.pauli_string import PauliString, _validate_qubit_mapping
from cirq.value.linear_dict import _format_terms
UnitPauliStringT = FrozenSet[Tuple[raw_types.Qid, pauli_gates.Pauli]]
PauliSumLike = Union[int, float, complex, PauliString, 'PauliSum', pauli_string.
SingleQubitPauliStringGateOperation]
document(
PauliSumLike, # type: ignore
"""Any value that can be easily translated into a sum of Pauli products.
""")
class LinearCombinationOfGates(value.LinearDict[raw_types.Gate]):
"""Represents linear operator defined by a linear combination of gates.
Suppose G1, G2, ..., Gn are gates and b1, b2, ..., bn are complex
numbers. Then
LinearCombinationOfGates({G1: b1, G2: b2, ..., Gn: bn})
represents the linear operator
A = b1 G1 + b2 G2 + ... + bn Gn
Note that A may not be unitary or even normal.
Rather than creating LinearCombinationOfGates instance explicitly, one may
use overloaded arithmetic operators. For example,
cirq.LinearCombinationOfGates({cirq.X: 2, cirq.Z: -2})
is equivalent to
2 * cirq.X - 2 * cirq.Z
"""
def __init__(self, terms: Mapping[raw_types.Gate, value.Scalar]) -> None:
"""Initializes linear combination from a collection of terms.
Args:
terms: Mapping of gates to coefficients in the linear combination
being initialized.
"""
super().__init__(terms, validator=self._is_compatible)
def num_qubits(self) -> Optional[int]:
"""Returns number of qubits in the domain if known, None if unknown."""
if not self:
return None
any_gate = next(iter(self))
return any_gate.num_qubits()
def _is_compatible(self, gate: raw_types.Gate) -> bool:
return (self.num_qubits() is None or
self.num_qubits() == gate.num_qubits())
def __add__(self,
other: Union[raw_types.Gate, 'LinearCombinationOfGates']
) -> 'LinearCombinationOfGates':
if not isinstance(other, LinearCombinationOfGates):
other = other.wrap_in_linear_combination()
return super().__add__(other)
def __iadd__(self,
other: Union[raw_types.Gate, 'LinearCombinationOfGates']
) -> 'LinearCombinationOfGates':
if not isinstance(other, LinearCombinationOfGates):
other = other.wrap_in_linear_combination()
return super().__iadd__(other)
def __sub__(self,
other: Union[raw_types.Gate, 'LinearCombinationOfGates']
) -> 'LinearCombinationOfGates':
if not isinstance(other, LinearCombinationOfGates):
other = other.wrap_in_linear_combination()
return super().__sub__(other)
def __isub__(self,
other: Union[raw_types.Gate, 'LinearCombinationOfGates']
) -> 'LinearCombinationOfGates':
if not isinstance(other, LinearCombinationOfGates):
other = other.wrap_in_linear_combination()
return super().__isub__(other)
def __pow__(self, exponent: int) -> 'LinearCombinationOfGates':
if not isinstance(exponent, int):
return NotImplemented
if exponent < 0:
return NotImplemented
if self.num_qubits() != 1:
return NotImplemented
pauli_basis = {
identity.I,
pauli_gates.X,
pauli_gates.Y,
pauli_gates.Z,
}
if not set(self.keys()).issubset(pauli_basis):
return NotImplemented
ai = self[identity.I]
ax = self[pauli_gates.X]
ay = self[pauli_gates.Y]
az = self[pauli_gates.Z]
bi, bx, by, bz = operator_spaces.pow_pauli_combination(
ai, ax, ay, az, exponent)
return LinearCombinationOfGates({
identity.I: bi,
pauli_gates.X: bx,
pauli_gates.Y: by,
pauli_gates.Z: bz
})
def matrix(self) -> np.ndarray:
"""Reconstructs matrix of self using unitaries of underlying gates.
Raises:
TypeError: if any of the gates in self does not provide a unitary.
"""
num_qubits = self.num_qubits()
if num_qubits is None:
raise ValueError('Unknown number of qubits')
num_dim = 2 ** num_qubits
result = np.zeros((num_dim, num_dim), dtype=np.complex128)
for gate, coefficient in self.items():
result += protocols.unitary(gate) * coefficient
return result
def _pauli_expansion_(self) -> value.LinearDict[str]:
result = value.LinearDict({}) # type: value.LinearDict[str]
for gate, coefficient in self.items():
result += protocols.pauli_expansion(gate) * coefficient
return result
class LinearCombinationOfOperations(value.LinearDict[raw_types.Operation]):
"""Represents operator defined by linear combination of gate operations.
If G1, ..., Gn are gate operations, {q1_1, ..., q1_k1}, {q2_1, ..., q2_k2},
..., {qn_1, ..., qn_kn} are (not necessarily disjoint) sets of qubits and
b1, b2, ..., bn are complex numbers, then
LinearCombinationOfOperations({
G1(q1_1, ..., q1_k1): b1,
G2(q2_1, ..., q2_k2): b2,
...,
Gn(qn_1, ..., qn_kn): bn})
represents the linear operator
A = b1 G1(q1_1, ..., q1_k1) +
+ b2 G2(q2_1, ..., q2_k2) +
+ ... +
+ bn Gn(qn_1, ..., qn_kn)
where in each term qubits not explicitly listed are assumed to be acted on
by the identity operator. Note that A may not be unitary or even normal.
"""
def __init__(self,
terms: Mapping[raw_types.Operation, value.Scalar]) -> None:
"""Initializes linear combination from a collection of terms.
Args:
terms: Mapping of gate operations to coefficients in the linear
combination being initialized.
"""
super().__init__(terms, validator=self._is_compatible)
def _is_compatible(self, operation: raw_types.Operation) -> bool:
return isinstance(operation, raw_types.Operation)
@property
def qubits(self) -> Tuple[raw_types.Qid, ...]:
"""Returns qubits acted on self."""
if not self:
return ()
qubit_sets = [set(op.qubits) for op in self.keys()]
all_qubits = set.union(*qubit_sets)
return tuple(sorted(all_qubits))
def __pow__(self, exponent: int) -> 'LinearCombinationOfOperations':
if not isinstance(exponent, int):
return NotImplemented
if exponent < 0:
return NotImplemented
if len(self.qubits) != 1:
return NotImplemented
qubit = self.qubits[0]
i = identity.I(qubit)
x = pauli_gates.X(qubit)
y = pauli_gates.Y(qubit)
z = pauli_gates.Z(qubit)
pauli_basis = {i, x, y, z}
if not set(self.keys()).issubset(pauli_basis):
return NotImplemented
ai, ax, ay, az = self[i], self[x], self[y], self[z]
bi, bx, by, bz = operator_spaces.pow_pauli_combination(
ai, ax, ay, az, exponent)
return LinearCombinationOfOperations({i: bi, x: bx, y: by, z: bz})
def matrix(self) -> np.ndarray:
"""Reconstructs matrix of self using unitaries of underlying operations.
Raises:
TypeError: if any of the gates in self does not provide a unitary.
"""
num_qubits = len(self.qubits)
num_dim = 2**num_qubits
qubit_to_axis = {q: i for i, q in enumerate(self.qubits)}
result = np.zeros((2,) * (2 * num_qubits), dtype=np.complex128)
for op, coefficient in self.items():
identity = np.eye(num_dim,
dtype=np.complex128).reshape(result.shape)
workspace = np.empty_like(identity)
axes = tuple(qubit_to_axis[q] for q in op.qubits)
u = protocols.apply_unitary(
op, protocols.ApplyUnitaryArgs(identity, workspace, axes))
result += coefficient * u
return result.reshape((num_dim, num_dim))
def _pauli_expansion_(self) -> value.LinearDict[str]:
"""Computes Pauli expansion of self from Pauli expansions of terms."""
def extend_term(pauli_names: str, qubits: Tuple[raw_types.Qid, ...],
all_qubits: Tuple[raw_types.Qid, ...]) -> str:
"""Extends Pauli product on qubits to product on all_qubits."""
assert len(pauli_names) == len(qubits)
qubit_to_pauli_name = dict(zip(qubits, pauli_names))
return ''.join(qubit_to_pauli_name.get(q, 'I') for q in all_qubits)
def extend(expansion: value.LinearDict[str],
qubits: Tuple[raw_types.Qid, ...],
all_qubits: Tuple[raw_types.Qid, ...]
) -> value.LinearDict[str]:
"""Extends Pauli expansion on qubits to expansion on all_qubits."""
return value.LinearDict({
extend_term(p, qubits, all_qubits): c
for p, c in expansion.items()
})
result = value.LinearDict({}) # type: value.LinearDict[str]
for op, coefficient in self.items():
expansion = protocols.pauli_expansion(op)
extended_expansion = extend(expansion, op.qubits, self.qubits)
result += extended_expansion * coefficient
return result
def _is_linear_dict_of_unit_pauli_string(
linear_dict: value.LinearDict[UnitPauliStringT]) -> bool:
if not isinstance(linear_dict, value.LinearDict):
return False
for k in linear_dict.keys():
if not isinstance(k, frozenset):
return False
for qid, pauli in k:
if not isinstance(qid, raw_types.Qid):
return False
if not isinstance(pauli, pauli_gates.Pauli):
return False
return True
def _pauli_string_from_unit(unit: UnitPauliStringT,
coefficient: Union[int, float, complex] = 1):
return PauliString(qubit_pauli_map=dict(unit), coefficient=coefficient)
@value.value_equality(approximate=True)
class PauliSum:
"""Represents operator defined by linear combination of PauliStrings.
Since PauliStrings store their own coefficients, this class
does not implement the LinearDict interface. Instead, you can
add and subtract terms and then iterate over the resulting
(simplified) expression.
Under the hood, this class is backed by a LinearDict with coefficient-less
PauliStrings as keys. PauliStrings are reconstructed on-the-fly during
iteration.
"""
def __init__(
self,
linear_dict: Optional[value.LinearDict[UnitPauliStringT]] = None):
if linear_dict is None:
linear_dict = value.LinearDict()
if not _is_linear_dict_of_unit_pauli_string(linear_dict):
raise ValueError(
"PauliSum constructor takes a LinearDict[UnitPauliStringT]. "
"Consider using PauliSum.from_pauli_strings() or adding and "
"subtracting PauliStrings")
self._linear_dict = linear_dict
def _value_equality_values_(self):
return self._linear_dict
@staticmethod
def wrap(val: PauliSumLike) -> 'PauliSum':
if isinstance(val, PauliSum):
return val
return PauliSum() + val
@classmethod
def from_pauli_strings(cls, terms: Union[PauliString, List[PauliString]]
) -> 'PauliSum':
if isinstance(terms, PauliString):
terms = [terms]
termdict: DefaultDict[UnitPauliStringT, value.Scalar] = defaultdict(
lambda: 0)
for pstring in terms:
key = frozenset(pstring._qubit_pauli_map.items())
termdict[key] += pstring.coefficient
return cls(linear_dict=value.LinearDict(termdict))
@property
def qubits(self) -> Tuple[raw_types.Qid, ...]:
qs = {q for k in self._linear_dict.keys() for q, _ in k}
return tuple(sorted(qs))
def copy(self) -> 'PauliSum':
factory = type(self)
return factory(self._linear_dict.copy())
def expectation_from_wavefunction(self,
state: np.ndarray,
qubit_map: Mapping[raw_types.Qid, int],
*,
atol: float = 1e-7,
check_preconditions: bool = True
) -> float:
"""Evaluate the expectation of this PauliSum given a wavefunction.
See `PauliString.expectation_from_wavefunction`.
Args:
state: An array representing a valid wavefunction.
qubit_map: A map from all qubits used in this PauliSum to the
indices of the qubits that `state` is defined over.
atol: Absolute numerical tolerance.
check_preconditions: Whether to check that `state` represents a
valid wavefunction.
Returns:
The expectation value of the input state.
"""
if any(abs(p.coefficient.imag) > 0.0001 for p in self):
raise NotImplementedError(
"Cannot compute expectation value of a non-Hermitian "
"PauliString <{}>. Coefficient must be real.".format(self))
# FIXME: Avoid enforce specific complex type. This is necessary to
# prevent an `apply_unitary` bug (Issue #2041).
if state.dtype.kind != 'c':
raise TypeError("Input state dtype must be np.complex64 or "
"np.complex128")
size = state.size
num_qubits = size.bit_length() - 1
_validate_qubit_mapping(qubit_map, self.qubits, num_qubits)
if len(state.shape) != 1 and state.shape != (2,) * num_qubits:
raise ValueError("Input array does not represent a wavefunction "
"with shape `(2 ** n,)` or `(2, ..., 2)`.")
if check_preconditions:
# HACK: avoid circular import
from cirq.sim.wave_function import validate_normalized_state
validate_normalized_state(state=state,
qid_shape=(2,) * num_qubits,
dtype=state.dtype,
atol=atol)
return sum(
p._expectation_from_wavefunction_no_validation(state, qubit_map)
for p in self)
def expectation_from_density_matrix(self,
state: np.ndarray,
qubit_map: Mapping[raw_types.Qid, int],
*,
atol: float = 1e-7,
check_preconditions: bool = True
) -> float:
"""Evaluate the expectation of this PauliSum given a density matrix.
See `PauliString.expectation_from_density_matrix`.
Args:
state: An array representing a valid density matrix.
qubit_map: A map from all qubits used in this PauliSum to the
indices of the qubits that `state` is defined over.
atol: Absolute numerical tolerance.
check_preconditions: Whether to check that `state` represents a
valid density matrix.
Returns:
The expectation value of the input state.
"""
if any(abs(p.coefficient.imag) > 0.0001 for p in self):
raise NotImplementedError(
"Cannot compute expectation value of a non-Hermitian "
"PauliString <{}>. Coefficient must be real.".format(self))
# FIXME: Avoid enforce specific complex type. This is necessary to
# prevent an `apply_unitary` bug (Issue #2041).
if state.dtype.kind != 'c':
raise TypeError("Input state dtype must be np.complex64 or "
"np.complex128")
size = state.size
num_qubits = int(np.sqrt(size)).bit_length() - 1
_validate_qubit_mapping(qubit_map, self.qubits, num_qubits)
dim = int(np.sqrt(size))
if state.shape != (dim, dim) and state.shape != (2, 2) * num_qubits:
raise ValueError("Input array does not represent a density matrix "
"with shape `(2 ** n, 2 ** n)` or `(2, ..., 2)`.")
if check_preconditions:
# HACK: avoid circular import
from cirq.sim.density_matrix_utils import to_valid_density_matrix
# Do not enforce reshaping if the state all axes are dimension 2.
_ = to_valid_density_matrix(density_matrix_rep=state.reshape(
dim, dim),
num_qubits=num_qubits,
dtype=state.dtype,
atol=atol)
return sum(
p._expectation_from_density_matrix_no_validation(state, qubit_map)
for p in self)
def __iter__(self):
for vec, coeff in self._linear_dict.items():
yield _pauli_string_from_unit(vec, coeff)
def __len__(self) -> int:
return len(self._linear_dict)
def __iadd__(self, other):
if isinstance(other, numbers.Complex):
other = PauliSum.from_pauli_strings(
[PauliString(coefficient=other)])
elif isinstance(other, PauliString):
other = PauliSum.from_pauli_strings([other])
if not isinstance(other, PauliSum):
return NotImplemented
self._linear_dict += other._linear_dict
return self
def __add__(self, other):
if not isinstance(other, (numbers.Complex, PauliString, PauliSum)):
return NotImplemented
result = self.copy()
result += other
return result
def __radd__(self, other):
return self.__add__(other)
def __rsub__(self, other):
return -self.__sub__(other)
def __isub__(self, other):
if isinstance(other, numbers.Complex):
other = PauliSum.from_pauli_strings(
[PauliString(coefficient=other)])
if isinstance(other, PauliString):
other = PauliSum.from_pauli_strings([other])
if not isinstance(other, PauliSum):
return NotImplemented
self._linear_dict -= other._linear_dict
return self
def __sub__(self, other):
if not isinstance(other, (numbers.Complex, PauliString, PauliSum)):
return NotImplemented
result = self.copy()
result -= other
return result
def __neg__(self):
factory = type(self)
return factory(-self._linear_dict)
def __imul__(self, other: PauliSumLike):
if not isinstance(other, (numbers.Complex, PauliString, PauliSum)):
return NotImplemented
if isinstance(other, numbers.Complex):
self._linear_dict *= other
elif isinstance(other, PauliString):
temp = PauliSum.from_pauli_strings([term * other for term in self])
self._linear_dict = temp._linear_dict
elif isinstance(other, PauliSum):
temp = PauliSum.from_pauli_strings(
[term * other_term for term in self for other_term in other])
self._linear_dict = temp._linear_dict
return self
def __mul__(self, other: PauliSumLike):
if not isinstance(other, (numbers.Complex, PauliString, PauliSum)):
return NotImplemented
result = self.copy()
result *= other
return result
def __rmul__(self, other: PauliSumLike):
if isinstance(other, numbers.Complex):
result = self.copy()
result *= other
return result
elif isinstance(other, PauliString):
result = self.copy()
return PauliSum.from_pauli_strings([other]) * result
return NotImplemented
def __pow__(self, exponent: int):
if not isinstance(exponent, numbers.Integral):
return NotImplemented
if exponent == 0:
return PauliSum(value.LinearDict({frozenset(): 1 + 0j}))
if exponent > 0:
base = self.copy()
for _ in range(exponent - 1):
base *= base
return base
return NotImplemented
def __truediv__(self, a: value.Scalar):
return self.__mul__(1 / a)
def __bool__(self) -> bool:
return bool(self._linear_dict)
def __repr__(self) -> str:
class_name = self.__class__.__name__
return 'cirq.{}({!r})'.format(class_name, self._linear_dict)
def __format__(self, format_spec: str) -> str:
terms = [(_pauli_string_from_unit(v), self._linear_dict[v])
for v in self._linear_dict.keys()]
return _format_terms(terms=terms, format_spec=format_spec)
def __str__(self):
return self.__format__('.3f')
| 38.586806
| 80
| 0.602133
|
bcf364002885b6882b13f2328ed566c33e8ec985
| 1,735
|
py
|
Python
|
lqrrt/constraints.py
|
jnez71/lqRRT
|
4796ee3fa8d1e658dc23c143f576b38d22642e45
|
[
"MIT"
] | 71
|
2016-10-20T05:19:39.000Z
|
2022-02-13T05:57:11.000Z
|
lqrrt/constraints.py
|
jnez71/lqRRT
|
4796ee3fa8d1e658dc23c143f576b38d22642e45
|
[
"MIT"
] | 12
|
2016-10-02T05:17:15.000Z
|
2017-05-04T17:46:30.000Z
|
lqrrt/constraints.py
|
jnez71/lqRRT
|
4796ee3fa8d1e658dc23c143f576b38d22642e45
|
[
"MIT"
] | 23
|
2016-10-02T04:18:49.000Z
|
2022-02-28T09:37:15.000Z
|
"""
Class for lqrrt constraints.
An instance of this class must be given to an lqrrt planner
to fully define the search problem.
"""
################################################# DEPENDENCIES
from __future__ import division
import numpy as np
import numpy.linalg as npl
################################################# PRIMARY CLASS
class Constraints:
"""
To initialize, provide...
nstates: The dimensionality of the state space.
ncontrols: The dimensionality of the effort space.
goal_buffer: Half-edge lengths of box defining goal region.
is_feasible: Function that takes a state and effort and returns a bool.
"""
def __init__(self, nstates, ncontrols, goal_buffer, is_feasible):
self.nstates = nstates
self.ncontrols = ncontrols
self.set_buffers(goal_buffer)
self.set_feasibility_function(is_feasible)
#################################################
def set_buffers(self, goal_buffer=None):
"""
See class docstring for argument definitions.
Arguments not given are not modified.
"""
if goal_buffer is not None:
if len(goal_buffer) == self.nstates:
self.goal_buffer = np.abs(goal_buffer).astype(np.float64)
else:
raise ValueError("The goal_buffer must have same dimensionality as state.")
#################################################
def set_feasibility_function(self, is_feasible):
"""
See class docstring for argument definitions.
"""
if hasattr(is_feasible, '__call__'):
self.is_feasible = is_feasible
else:
raise ValueError("Expected is_feasible to be a function.")
| 27.983871
| 91
| 0.588473
|
0ba46ee5801be4b17b5413a7560cfb8507b01b38
| 3,378
|
py
|
Python
|
mitmproxy/proxy/config.py
|
illera88/mitmproxy
|
4f464001841e5119bf57ef620a3257892ded2ded
|
[
"MIT"
] | 6
|
2020-11-25T07:33:05.000Z
|
2022-01-25T07:25:54.000Z
|
mitmproxy/proxy/config.py
|
illera88/mitmproxy
|
4f464001841e5119bf57ef620a3257892ded2ded
|
[
"MIT"
] | 18
|
2020-12-28T20:12:26.000Z
|
2022-03-15T20:44:40.000Z
|
mitmproxy/proxy/config.py
|
illera88/mitmproxy
|
4f464001841e5119bf57ef620a3257892ded2ded
|
[
"MIT"
] | 4
|
2021-03-14T16:14:27.000Z
|
2021-09-25T03:01:15.000Z
|
import os
import re
import typing
from OpenSSL import crypto
from mitmproxy import certs
from mitmproxy import exceptions
from mitmproxy import options as moptions
from mitmproxy.net import server_spec
class HostMatcher:
def __init__(self, handle, patterns=tuple()):
self.handle = handle
self.patterns = list(patterns)
self.regexes = [re.compile(p, re.IGNORECASE) for p in self.patterns]
def __call__(self, address):
if not address:
return False
host = "%s:%s" % address
if self.handle in ["ignore", "tcp"]:
return any(rex.search(host) for rex in self.regexes)
else: # self.handle == "allow"
return not any(rex.search(host) for rex in self.regexes)
def __bool__(self):
return bool(self.patterns)
class ProxyConfig:
def __init__(self, options: moptions.Options) -> None:
self.options = options
self.certstore: certs.CertStore
self.check_filter: typing.Optional[HostMatcher] = None
self.check_tcp: typing.Optional[HostMatcher] = None
self.upstream_server: typing.Optional[server_spec.ServerSpec] = None
self.configure(options, set(options.keys()))
options.changed.connect(self.configure)
def configure(self, options: moptions.Options, updated: typing.Any) -> None:
if options.allow_hosts and options.ignore_hosts:
raise exceptions.OptionsError("--ignore-hosts and --allow-hosts are mutually "
"exclusive; please choose one.")
if options.ignore_hosts:
self.check_filter = HostMatcher("ignore", options.ignore_hosts)
elif options.allow_hosts:
self.check_filter = HostMatcher("allow", options.allow_hosts)
else:
self.check_filter = HostMatcher(False)
if "tcp_hosts" in updated:
self.check_tcp = HostMatcher("tcp", options.tcp_hosts)
certstore_path = os.path.expanduser(options.confdir)
if not os.path.exists(os.path.dirname(certstore_path)):
raise exceptions.OptionsError(
"Certificate Authority parent directory does not exist: %s" %
os.path.dirname(certstore_path)
)
key_size = options.key_size
passphrase = options.cert_passphrase.encode("utf-8") if options.cert_passphrase else None
self.certstore = certs.CertStore.from_store(
certstore_path,
moptions.CONF_BASENAME,
key_size,
passphrase
)
for c in options.certs:
parts = c.split("=", 1)
if len(parts) == 1:
parts = ["*", parts[0]]
cert = os.path.expanduser(parts[1])
if not os.path.exists(cert):
raise exceptions.OptionsError(
"Certificate file does not exist: %s" % cert
)
try:
self.certstore.add_cert_file(parts[0], cert, passphrase)
except crypto.Error:
raise exceptions.OptionsError(
"Invalid certificate format: %s" % cert
)
m = options.mode
if m.startswith("upstream:") or m.startswith("reverse:"):
_, spec = server_spec.parse_with_mode(options.mode)
self.upstream_server = spec
| 36.322581
| 97
| 0.608644
|
9e75a26a2ca3215e39e3eba35d3ac2e27ebd7f45
| 1,658
|
py
|
Python
|
watch_later_convert.py
|
david-ruffner/Youtube-Video-ID-Converter
|
abe56f2881c7d6852ae5d14720990642bb51a7c3
|
[
"MIT"
] | null | null | null |
watch_later_convert.py
|
david-ruffner/Youtube-Video-ID-Converter
|
abe56f2881c7d6852ae5d14720990642bb51a7c3
|
[
"MIT"
] | null | null | null |
watch_later_convert.py
|
david-ruffner/Youtube-Video-ID-Converter
|
abe56f2881c7d6852ae5d14720990642bb51a7c3
|
[
"MIT"
] | null | null | null |
import csv
import requests
import json
import pandas as pd
import openpyxl
class Video:
def __init__(self, title, channel):
self.title = title
self.channel = channel
def GetVideoInfo(videoId):
response = requests.get("https://youtube.googleapis.com/youtube/v3/videos?part=snippet&id={0}&key=[Your Google API Key Here]"
.format(videoId))
if response.status_code == 200:
responseObj = json.loads(response.text)
try:
title = responseObj["items"][0]["snippet"]["title"]
channel = responseObj["items"][0]["snippet"]["channelTitle"]
return Video(title, channel)
except:
return Video(videoId, "Invalid")
else:
return None
def GetVideoIds(fileName):
videoIds = []
with open(fileName, 'r') as csvFile:
csvReader = csv.reader(csvFile)
for row in csvReader:
videoIds.append(row[0])
return videoIds
videoIds = GetVideoIds("./watch_later_csv.csv")
totalVideos = len(videoIds)
currentVideo = 0
titles = []
channels = []
for id in videoIds:
currentVideo = currentVideo + 1
finishedPercentage = currentVideo / totalVideos
print("Processing Video {0} of {1} - {2:.2f}% Finished\n".format(currentVideo, totalVideos, (finishedPercentage * 100)))
vid = GetVideoInfo(id)
if vid != None:
titles.append(vid.title)
channels.append(vid.channel)
else:
print("Sorry, something went wrong with video: {0}\n".format(id))
df = pd.DataFrame({'Title':titles, 'Channel':channels})
df.to_excel("./watch_later.xlsx")
| 29.607143
| 130
| 0.624246
|
98bebd6603bbed75923ed756d9394c967e7166a2
| 2,430
|
py
|
Python
|
scrapy/get_enti_and_know.py
|
LouisYZK/recruitKG
|
2f65f005230ea0ca05eb45d9e1e689f83dec2720
|
[
"MIT"
] | null | null | null |
scrapy/get_enti_and_know.py
|
LouisYZK/recruitKG
|
2f65f005230ea0ca05eb45d9e1e689f83dec2720
|
[
"MIT"
] | null | null | null |
scrapy/get_enti_and_know.py
|
LouisYZK/recruitKG
|
2f65f005230ea0ca05eb45d9e1e689f83dec2720
|
[
"MIT"
] | null | null | null |
import sqlite3
import requests
import json
import time
"""
Input: doc from zhilian_doc.db
Aim:
get the entities/knowledges in the doc.
store them into entites.json/knowledges.json
entities.json:
{
'name+position':List(entities),
}
konwledges.json:
{
'entity':[
['relation', 'entity'],
...
],
}
"""
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36'
}
def flatten(items):
for x in items:
if hasattr(x,'__iter__') and not isinstance(x, (str, bytes)):
# for sub_x in flatten(x):
# yield sub_x
yield from flatten(x)
else:
yield x
def get_entity(doc):
url = 'http://shuyantech.com/api/entitylinking/cutsegment'
doc = doc.split('。')
entities = []
for item in doc:
params = {'q':item}
r = requests.get(url, params=params, headers=headers)
entity = json.loads(r.text)['entities']
entities.append([item2[1] for item2 in entity])
return entities
def get_triple_tuple(entities):
url = 'http://shuyantech.com/api/cndbpedia/avpair'
know = {}
for item in entities:
if item not in seen_entity:
seen_entity.add(item)
params = {'q':item}
text = requests.get(url, params=params, headers=headers).text
knowledge = json.loads(text)['ret']
know[item] = knowledge
return know
def en_store_to_json(name, pos, entities):
en = {}
with open('./entities.json', 'a') as fp:
en[name + pos] = entities
json.dump(en, fp)
def konw_store_to_json(name, pos, knows):
with open('./knows.json', 'a') as fp:
json.dump(knows, fp)
def get_proxy():
return requests.get("http://127.0.0.1:5010/get/").content
def delete_proxy(proxy):
requests.get("http://127.0.0.1:5010/delete/?proxy={}".format(proxy))
conn = sqlite3.connect('zhilian_doc.db')
cur = conn.cursor()
data = cur.execute('select * from zhilian_doc')
seen_entity = set()
name, pos, doc = next(data)
entities = get_entity(doc)
while True:
name, pos, doc = next(data)
time.sleep(3)
entities = get_entity(doc)
entities = list(flatten(entities))
# knows = get_triple_tuple(entities)
print(entities)
# en_store_to_json(name, pos, entities)
# konw_store_to_json(name, pos, knows)
| 25.851064
| 139
| 0.617695
|
8aba4c938888ec1a0757356734b82dbbf8ef1d27
| 4,222
|
py
|
Python
|
keras_retinanet/utils/visualization.py
|
Accioy/keras-retinanet
|
01dce4547f78588185fa6a138c45279609bfa1c9
|
[
"Apache-2.0"
] | 7,141
|
2018-03-22T16:27:31.000Z
|
2022-03-31T07:18:34.000Z
|
keras_retinanet/utils/visualization.py
|
Accioy/keras-retinanet
|
01dce4547f78588185fa6a138c45279609bfa1c9
|
[
"Apache-2.0"
] | 1,472
|
2017-11-11T23:10:27.000Z
|
2022-03-25T11:04:22.000Z
|
keras_retinanet/utils/visualization.py
|
Accioy/keras-retinanet
|
01dce4547f78588185fa6a138c45279609bfa1c9
|
[
"Apache-2.0"
] | 2,580
|
2017-05-14T14:33:41.000Z
|
2022-03-31T15:04:14.000Z
|
"""
Copyright 2017-2018 Fizyr (https://fizyr.com)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import cv2
import numpy as np
from .colors import label_color
def draw_box(image, box, color, thickness=2):
""" Draws a box on an image with a given color.
# Arguments
image : The image to draw on.
box : A list of 4 elements (x1, y1, x2, y2).
color : The color of the box.
thickness : The thickness of the lines to draw a box with.
"""
b = np.array(box).astype(int)
cv2.rectangle(image, (b[0], b[1]), (b[2], b[3]), color, thickness, cv2.LINE_AA)
def draw_caption(image, box, caption):
""" Draws a caption above the box in an image.
# Arguments
image : The image to draw on.
box : A list of 4 elements (x1, y1, x2, y2).
caption : String containing the text to draw.
"""
b = np.array(box).astype(int)
cv2.putText(image, caption, (b[0], b[1] - 10), cv2.FONT_HERSHEY_PLAIN, 1, (0, 0, 0), 2)
cv2.putText(image, caption, (b[0], b[1] - 10), cv2.FONT_HERSHEY_PLAIN, 1, (255, 255, 255), 1)
def draw_boxes(image, boxes, color, thickness=2):
""" Draws boxes on an image with a given color.
# Arguments
image : The image to draw on.
boxes : A [N, 4] matrix (x1, y1, x2, y2).
color : The color of the boxes.
thickness : The thickness of the lines to draw boxes with.
"""
for b in boxes:
draw_box(image, b, color, thickness=thickness)
def draw_detections(image, boxes, scores, labels, color=None, label_to_name=None, score_threshold=0.5):
""" Draws detections in an image.
# Arguments
image : The image to draw on.
boxes : A [N, 4] matrix (x1, y1, x2, y2).
scores : A list of N classification scores.
labels : A list of N labels.
color : The color of the boxes. By default the color from keras_retinanet.utils.colors.label_color will be used.
label_to_name : (optional) Functor for mapping a label to a name.
score_threshold : Threshold used for determining what detections to draw.
"""
selection = np.where(scores > score_threshold)[0]
for i in selection:
c = color if color is not None else label_color(labels[i])
draw_box(image, boxes[i, :], color=c)
# draw labels
caption = (label_to_name(labels[i]) if label_to_name else labels[i]) + ': {0:.2f}'.format(scores[i])
draw_caption(image, boxes[i, :], caption)
def draw_annotations(image, annotations, color=(0, 255, 0), label_to_name=None):
""" Draws annotations in an image.
# Arguments
image : The image to draw on.
annotations : A [N, 5] matrix (x1, y1, x2, y2, label) or dictionary containing bboxes (shaped [N, 4]) and labels (shaped [N]).
color : The color of the boxes. By default the color from keras_retinanet.utils.colors.label_color will be used.
label_to_name : (optional) Functor for mapping a label to a name.
"""
if isinstance(annotations, np.ndarray):
annotations = {'bboxes': annotations[:, :4], 'labels': annotations[:, 4]}
assert('bboxes' in annotations)
assert('labels' in annotations)
assert(annotations['bboxes'].shape[0] == annotations['labels'].shape[0])
for i in range(annotations['bboxes'].shape[0]):
label = annotations['labels'][i]
c = color if color is not None else label_color(label)
caption = '{}'.format(label_to_name(label) if label_to_name else label)
draw_caption(image, annotations['bboxes'][i], caption)
draw_box(image, annotations['bboxes'][i], color=c)
| 39.457944
| 136
| 0.639034
|
69b428edd2e7fa4ea051ebe8df33202b38a5e2e4
| 3,169
|
py
|
Python
|
Pkg_Template/scripts/ROSPY_TEMPLATE.py
|
jwatson-CO-edu/ROS1_Templates_Helpers
|
d2bc9f091c8bfdda01353df9afe70e312f7c7b54
|
[
"MIT"
] | null | null | null |
Pkg_Template/scripts/ROSPY_TEMPLATE.py
|
jwatson-CO-edu/ROS1_Templates_Helpers
|
d2bc9f091c8bfdda01353df9afe70e312f7c7b54
|
[
"MIT"
] | null | null | null |
Pkg_Template/scripts/ROSPY_TEMPLATE.py
|
jwatson-CO-edu/ROS1_Templates_Helpers
|
d2bc9f091c8bfdda01353df9afe70e312f7c7b54
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
########## INIT ####################################################################################
##### Future First #####
from __future__ import division # Future imports must be called before everything else, including triple-quote docs!
"""
James Watson , Template Version: 2020-05-18
Built on Gedit for Python 2.7
Dependencies: numpy , rospy
"""
"""
~~~ Developmnent Plan ~~~
[ ] Find-Replace `CLASSNAME`
[ ] NEXT_STEP
"""
# === Init Environment =====================================================================================================================
# ~~~ Prepare Paths ~~~
import sys, os.path
SOURCEDIR = os.path.dirname( os.path.abspath( __file__ ) ) # URL, dir containing source file: http://stackoverflow.com/a/7783326
PARENTDIR = os.path.dirname( SOURCEDIR )
# ~~~ Imports ~~~
# ~~ Standard ~~
from math import pi , sqrt , sin , cos
# ~~ Special ~~
import numpy as np
import rospy , rospkg
# ~~ Local ~~
import rospy_helpers
from rospy_helpers import BasicROSNode
rospy_helpers.install_constants()
# ~~ Messages ~~
# ___ End Init _________________________________________________________________________________________________________
# === Main Application =================================================================================================
# ~~ Program Constants ~~
# == Program Classes ==
class CLASSNAME( BasicROSNode ):
""" A_ONE_LINE_DESCRIPTION_OF_NODE_PURPOSE """
def __init__( self , name = "CLASSNAME" , rate = 300 ):
""" A_ONE_LINE_DESCRIPTION_OF_INIT """
super( CLASSNAME , self ).__init__( nodeName=name , refreshRate=rate )
# ~~~ 3. Subscribers and Listeners ~~~
# rospy.Subscriber( "TOPIC_NAME" , MSG_TYPE , CALLBACK_FUNC )
# ~~~ 4. Publishers ~~~
# self.pub = rospy.Publisher( "TOPIC_NAME" , MSG_TYPE , queue_size = 10 )
def run( self ):
""" A_ONE_LINE_DESCRIPTION_OF_RUNTIME_ACTIVITY """
# 0. While ROS is running
while ( not rospy.is_shutdown() ):
# 1. FIXME: THINGS TO DO WHILE THE NODE IS RUNNING
# N-1: Wait until the node is supposed to fire next
self.idle.sleep()
# N. Post-shutdown activities
else:
self.obit() # Post-run uptime report
# __ End Class __
# ~~~ Start Node ~~~
if __name__ == "__main__":
termArgs = sys.argv[1:] # Terminal arguments , if they exist
try:
obj = CLASSNAME()
obj.run()
except rospy.ROSInterruptException:
obj.obit()
# ___ End Main _________________________________________________________________________________________________________
# === Spare Parts ======================================================================================================
# ___ End Spare ________________________________________________________________________________________________________ ____________________________________________________________________________________________________________________________
| 30.471154
| 245
| 0.575891
|
164a1bd27629991b95d85e7b9374333089e8923d
| 1,608
|
py
|
Python
|
tests/python/gaia-ui-tests/gaiatest/tests/functional/contacts/test_import_contacts_menu_no_network.py
|
TheoChevalier/gaia
|
803d04e3829fd4fe9261211aa0ddca6b79d4e328
|
[
"Apache-2.0"
] | 3
|
2016-08-17T08:52:51.000Z
|
2020-03-29T04:56:45.000Z
|
tests/python/gaia-ui-tests/gaiatest/tests/functional/contacts/test_import_contacts_menu_no_network.py
|
TheoChevalier/gaia
|
803d04e3829fd4fe9261211aa0ddca6b79d4e328
|
[
"Apache-2.0"
] | 1
|
2017-02-21T21:36:12.000Z
|
2017-02-21T21:36:30.000Z
|
tests/python/gaia-ui-tests/gaiatest/tests/functional/contacts/test_import_contacts_menu_no_network.py
|
TheoChevalier/gaia
|
803d04e3829fd4fe9261211aa0ddca6b79d4e328
|
[
"Apache-2.0"
] | 3
|
2019-03-31T04:27:13.000Z
|
2020-04-12T17:58:15.000Z
|
# -*- coding: iso-8859-15 -*-
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from gaiatest import GaiaTestCase
from gaiatest.apps.contacts.app import Contacts
class TestImportContactsMenuNoNetwork(GaiaTestCase):
def setUp(self):
GaiaTestCase.setUp(self)
self.connect_to_local_area_network()
def test_import_contacts_menu_no_network(self):
'''
https://moztrap.mozilla.org/manage/case/15183/
'''
contacts_app = Contacts(self.marionette)
contacts_app.launch()
contacts_settings = contacts_app.tap_settings()
contacts_settings.tap_import_contacts()
self.assertFalse(contacts_settings.is_gmail_import_service_in_error)
self.assertTrue(contacts_settings.is_gmail_import_enabled)
self.assertFalse(contacts_settings.is_outlook_import_service_in_error)
self.assertTrue(contacts_settings.is_outlook_import_enabled)
self.assertFalse(contacts_settings.is_error_message_displayed)
self.disable_all_network_connections()
self.apps.switch_to_displayed_app()
self.assertTrue(contacts_settings.is_gmail_import_service_in_error)
self.assertFalse(contacts_settings.is_gmail_import_enabled)
self.assertTrue(contacts_settings.is_outlook_import_service_in_error)
self.assertFalse(contacts_settings.is_outlook_import_enabled)
self.assertTrue(contacts_settings.is_error_message_displayed)
| 34.956522
| 78
| 0.755597
|
f1ccc9043ea96a35cdfbfaa0d3d6644d5f8b9c45
| 816
|
bzl
|
Python
|
recipes/llvm/config.bzl
|
curoky/rules_cc
|
943408c05e2204e1e603b70db05037217a53868d
|
[
"Apache-2.0"
] | null | null | null |
recipes/llvm/config.bzl
|
curoky/rules_cc
|
943408c05e2204e1e603b70db05037217a53868d
|
[
"Apache-2.0"
] | null | null | null |
recipes/llvm/config.bzl
|
curoky/rules_cc
|
943408c05e2204e1e603b70db05037217a53868d
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 curoky(cccuroky@gmail.com).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
config = {
"type": "new_local_repository",
"build_file": "default/BUILD",
"used_version": "heads/master",
"path": "/home/linuxbrew/.linuxbrew/opt/llvm",
"versions": {
"heads/master": {},
},
}
| 34
| 74
| 0.708333
|
c27ba8e67d43d9795e02b1cf8dee88ec2e18184d
| 8,881
|
py
|
Python
|
tests/importer/test_importer.py
|
sheganinans/hy
|
d7c333e61c88fdc73414cf42c9ec33861e4a9ed9
|
[
"MIT"
] | null | null | null |
tests/importer/test_importer.py
|
sheganinans/hy
|
d7c333e61c88fdc73414cf42c9ec33861e4a9ed9
|
[
"MIT"
] | null | null | null |
tests/importer/test_importer.py
|
sheganinans/hy
|
d7c333e61c88fdc73414cf42c9ec33861e4a9ed9
|
[
"MIT"
] | null | null | null |
# Copyright 2021 the authors.
# This file is part of Hy, which is free software licensed under the Expat
# license. See the LICENSE.
import os
import sys
import ast
import tempfile
import runpy
import importlib
from fractions import Fraction
from importlib import reload
import pytest
import hy
from hy.lex import hy_parse
from hy.errors import HyLanguageError
from hy.lex.exceptions import PrematureEndOfInput
from hy.compiler import hy_eval, hy_compile
from hy.importer import HyLoader
def test_basics():
"Make sure the basics of the importer work"
assert os.path.isfile('tests/resources/__init__.py')
resources_mod = importlib.import_module('tests.resources')
assert hasattr(resources_mod, 'kwtest')
assert os.path.isfile('tests/resources/bin/__init__.hy')
bin_mod = importlib.import_module('tests.resources.bin')
assert hasattr(bin_mod, '_null_fn_for_import_test')
def test_runpy():
# XXX: `runpy` won't update cached bytecode! Don't know if that's
# intentional or not.
basic_ns = runpy.run_path('tests/resources/importer/basic.hy')
assert 'square' in basic_ns
main_ns = runpy.run_path('tests/resources/bin')
assert main_ns['visited_main'] == 1
del main_ns
main_ns = runpy.run_module('tests.resources.bin')
assert main_ns['visited_main'] == 1
with pytest.raises(IOError):
runpy.run_path('tests/resources/foobarbaz.py')
def test_stringer():
_ast = hy_compile(hy_parse("(defn square [x] (* x x))"), __name__)
assert type(_ast.body[0]) == ast.FunctionDef
def test_imports():
path = os.getcwd() + "/tests/resources/importer/a.hy"
testLoader = HyLoader("tests.resources.importer.a", path)
def _import_test():
try:
return testLoader.load_module()
except:
return "Error"
assert _import_test() == "Error"
assert _import_test() is not None
def test_import_error_reporting():
"Make sure that (import) reports errors correctly."
with pytest.raises(HyLanguageError):
hy_compile(hy_parse("(import \"sys\")"), __name__)
def test_import_error_cleanup():
"Failed initial imports should not leave dead modules in `sys.modules`."
with pytest.raises(hy.errors.HyMacroExpansionError):
importlib.import_module('tests.resources.fails')
assert 'tests.resources.fails' not in sys.modules
@pytest.mark.skipif(sys.dont_write_bytecode,
reason="Bytecode generation is suppressed")
def test_import_autocompiles():
"Test that (import) byte-compiles the module."
with tempfile.NamedTemporaryFile(suffix='.hy', delete=True) as f:
f.write(b'(defn pyctest [s] (+ "X" s "Y"))')
f.flush()
pyc_path = importlib.util.cache_from_source(f.name)
try:
os.remove(pyc_path)
except (IOError, OSError):
pass
test_loader = HyLoader("mymodule", f.name).load_module()
assert hasattr(test_loader, 'pyctest')
assert os.path.exists(pyc_path)
os.remove(pyc_path)
def test_eval():
def eval_str(s):
return hy_eval(hy.read_str(s), filename='<string>', source=s)
assert eval_str('[1 2 3]') == [1, 2, 3]
assert eval_str('{"dog" "bark" "cat" "meow"}') == {
'dog': 'bark', 'cat': 'meow'}
assert eval_str('(, 1 2 3)') == (1, 2, 3)
assert eval_str('#{3 1 2}') == {1, 2, 3}
assert eval_str('1/2') == Fraction(1, 2)
assert eval_str('(.strip " fooooo ")') == 'fooooo'
assert eval_str(
'(if True "this is if true" "this is if false")') == "this is if true"
assert eval_str('(lfor num (range 100) :if (= (% num 2) 1) (pow num 2))') == [
pow(num, 2) for num in range(100) if num % 2 == 1]
def test_reload():
"""Generate a test module, confirm that it imports properly (and puts the
module in `sys.modules`), then modify the module so that it produces an
error when reloaded. Next, fix the error, reload, and check that the
module is updated and working fine. Rinse, repeat.
This test is adapted from CPython's `test_import.py`.
"""
def unlink(filename):
os.unlink(source)
bytecode = importlib.util.cache_from_source(source)
if os.path.isfile(bytecode):
os.unlink(bytecode)
TESTFN = 'testfn'
source = TESTFN + os.extsep + "hy"
with open(source, "w") as f:
f.write("(setv a 1)")
f.write("(setv b 2)")
sys.path.insert(0, os.curdir)
try:
mod = importlib.import_module(TESTFN)
assert TESTFN in sys.modules
assert mod.a == 1
assert mod.b == 2
# On WinXP, just replacing the .py file wasn't enough to
# convince reload() to reparse it. Maybe the timestamp didn't
# move enough. We force it to get reparsed by removing the
# compiled file too.
unlink(source)
# Now damage the module.
with open(source, "w") as f:
f.write("(setv a 10)")
f.write("(setv b (// 20 0))")
with pytest.raises(ZeroDivisionError):
reload(mod)
# But we still expect the module to be in sys.modules.
mod = sys.modules.get(TESTFN)
assert mod is not None
# We should have replaced a w/ 10, but the old b value should
# stick.
assert mod.a == 10
assert mod.b == 2
# Now fix the issue and reload the module.
unlink(source)
with open(source, "w") as f:
f.write("(setv a 11)")
f.write("(setv b (// 20 1))")
reload(mod)
mod = sys.modules.get(TESTFN)
assert mod is not None
assert mod.a == 11
assert mod.b == 20
# Now cause a syntax error
unlink(source)
with open(source, "w") as f:
# Missing paren...
f.write("(setv a 11")
f.write("(setv b (// 20 1))")
with pytest.raises(PrematureEndOfInput):
reload(mod)
mod = sys.modules.get(TESTFN)
assert mod is not None
assert mod.a == 11
assert mod.b == 20
# Fix it and retry
unlink(source)
with open(source, "w") as f:
f.write("(setv a 12)")
f.write("(setv b (// 10 1))")
reload(mod)
mod = sys.modules.get(TESTFN)
assert mod is not None
assert mod.a == 12
assert mod.b == 10
finally:
del sys.path[0]
if TESTFN in sys.modules:
del sys.modules[TESTFN]
unlink(source)
def test_reload_reexecute(capsys):
"""A module is re-executed when it's reloaded, even if it's
unchanged.
https://github.com/hylang/hy/issues/712"""
import tests.resources.hello_world
assert capsys.readouterr().out == 'hello world\n'
assert capsys.readouterr().out == ''
reload(tests.resources.hello_world)
assert capsys.readouterr().out == 'hello world\n'
def test_circular():
"""Test circular imports by creating a temporary file/module that calls a
function that imports itself."""
sys.path.insert(0, os.path.abspath('tests/resources/importer'))
try:
mod = runpy.run_module('circular')
assert mod['f']() == 1
finally:
sys.path.pop(0)
def test_shadowed_basename():
"""Make sure Hy loads `.hy` files instead of their `.py` counterparts (.e.g
`__init__.py` and `__init__.hy`).
"""
sys.path.insert(0, os.path.realpath('tests/resources/importer'))
try:
assert os.path.isfile('tests/resources/importer/foo/__init__.hy')
assert os.path.isfile('tests/resources/importer/foo/__init__.py')
assert os.path.isfile('tests/resources/importer/foo/some_mod.hy')
assert os.path.isfile('tests/resources/importer/foo/some_mod.py')
foo = importlib.import_module('foo')
assert foo.__file__.endswith('foo\\__init__.hy' if os.sys.platform == "win32" else 'foo/__init__.hy')
assert foo.ext == 'hy'
some_mod = importlib.import_module('foo.some_mod')
assert some_mod.__file__.endswith('foo\\some_mod.hy' if os.sys.platform == "win32" else 'foo/some_mod.hy')
assert some_mod.ext == 'hy'
finally:
sys.path.pop(0)
def test_docstring():
"""Make sure a module's docstring is loaded."""
sys.path.insert(0, os.path.realpath('tests/resources/importer'))
try:
mod = importlib.import_module('docstring')
expected_doc = ("This module has a docstring.\n\n"
"It covers multiple lines, too!\n")
assert mod.__doc__ == expected_doc
assert mod.a == 1
finally:
sys.path.pop(0)
def test_hy_python_require():
# https://github.com/hylang/hy/issues/1911
test = "(do (require [tests.resources.macros [test-macro]]) (test-macro) blah)"
assert hy.eval(hy.read_str(test)) == 1
| 29.802013
| 114
| 0.623466
|
75d52fe3e538b606d2aa9faadf36f31eeda8edd2
| 40,972
|
py
|
Python
|
src/natcap/invest/sdr.py
|
hkotaro1215/invest
|
1ba08bd746977bfa8a4600ad8c821fc43598c421
|
[
"BSD-3-Clause"
] | null | null | null |
src/natcap/invest/sdr.py
|
hkotaro1215/invest
|
1ba08bd746977bfa8a4600ad8c821fc43598c421
|
[
"BSD-3-Clause"
] | null | null | null |
src/natcap/invest/sdr.py
|
hkotaro1215/invest
|
1ba08bd746977bfa8a4600ad8c821fc43598c421
|
[
"BSD-3-Clause"
] | null | null | null |
"""InVEST Sediment Delivery Ratio (SDR) module.
The SDR method in this model is based on:
Winchell, M. F., et al. "Extension and validation of a geographic
information system-based method for calculating the Revised Universal
Soil Loss Equation length-slope factor for erosion risk assessments in
large watersheds." Journal of Soil and Water Conservation 63.3 (2008):
105-111.
"""
from __future__ import absolute_import
import os
import logging
from osgeo import gdal
from osgeo import ogr
import numpy
import pygeoprocessing
import natcap.invest.pygeoprocessing_0_3_3.routing
import natcap.invest.pygeoprocessing_0_3_3.routing.routing_core
from . import utils
from . import validation
LOGGER = logging.getLogger('natcap.invest.sdr')
_OUTPUT_BASE_FILES = {
'rkls_path': 'rkls.tif',
'sed_export_path': 'sed_export.tif',
'stream_path': 'stream.tif',
'usle_path': 'usle.tif',
'sed_retention_index_path': 'sed_retention_index.tif',
'sed_retention_path': 'sed_retention.tif',
'watershed_results_sdr_path': 'watershed_results_sdr.shp',
'stream_and_drainage_path': 'stream_and_drainage.tif',
}
_INTERMEDIATE_BASE_FILES = {
'dem_offset_path': 'dem_offset.tif',
'slope_path': 'slope.tif',
'thresholded_slope_path': 'thresholded_slope.tif',
'flow_direction_path': 'flow_direction.tif',
'flow_accumulation_path': 'flow_accumulation.tif',
'ls_path': 'ls.tif',
'w_bar_path': 'w_bar.tif',
's_bar_path': 's_bar.tif',
'd_up_path': 'd_up.tif',
'd_dn_path': 'd_dn.tif',
'd_dn_bare_soil_path': 'd_dn_bare_soil.tif',
'd_up_bare_soil_path': 'd_up_bare_soil.tif',
'ic_bare_soil_path': 'ic_bare_soil.tif',
'sdr_bare_soil_path': 'sdr_bare_soil.tif',
'ws_factor_path': 'ws_factor.tif',
'ic_path': 'ic.tif',
'sdr_path': 'sdr_factor.tif',
'w_path': 'w.tif',
}
_TMP_BASE_FILES = {
'cp_factor_path': 'cp.tif',
'aligned_dem_path': 'aligned_dem.tif',
'aligned_lulc_path': 'aligned_lulc.tif',
'aligned_erosivity_path': 'aligned_erosivity.tif',
'aligned_erodibility_path': 'aligned_erodibility.tif',
'aligned_drainage_path': 'aligned_drainage.tif',
'zero_absorption_source_path': 'zero_absorption_source.tif',
'loss_path': 'loss.tif',
'w_accumulation_path': 'w_accumulation.tif',
's_accumulation_path': 's_accumulation.tif',
'thresholded_w_path': 'w_threshold.tif',
'ws_inverse_path': 'ws_inverse.tif',
's_inverse_path': 's_inverse.tif',
}
# Target nodata is for general rasters that are positive, and _IC_NODATA are
# for rasters that are any range
_TARGET_NODATA = -1.0
_IC_NODATA = numpy.finfo('float32').min
def execute(args):
"""Sediment Delivery Ratio.
This function calculates the sediment export and retention of a landscape
using the sediment delivery ratio model described in the InVEST user's
guide.
Parameters:
args['workspace_dir'] (string): output directory for intermediate,
temporary, and final files
args['results_suffix'] (string): (optional) string to append to any
output file names
args['dem_path'] (string): path to a digital elevation raster
args['erosivity_path'] (string): path to rainfall erosivity index
raster
args['erodibility_path'] (string): a path to soil erodibility raster
args['lulc_path'] (string): path to land use/land cover raster
args['watersheds_path'] (string): path to vector of the watersheds
args['biophysical_table_path'] (string): path to CSV file with
biophysical information of each land use classes. contain the
fields 'usle_c' and 'usle_p'
args['threshold_flow_accumulation'] (number): number of upstream pixels
on the dem to threshold to a stream.
args['k_param'] (number): k calibration parameter
args['sdr_max'] (number): max value the SDR
args['ic_0_param'] (number): ic_0 calibration parameter
args['drainage_path'] (string): (optional) path to drainage raster that
is used to add additional drainage areas to the internally
calculated stream layer
Returns:
None.
"""
file_suffix = utils.make_suffix_string(args, 'results_suffix')
biophysical_table = utils.build_lookup_from_csv(
args['biophysical_table_path'], 'lucode')
# Test to see if c or p values are outside of 0..1
for table_key in ['usle_c', 'usle_p']:
for (lulc_code, table) in biophysical_table.iteritems():
try:
float_value = float(table[table_key])
if float_value < 0 or float_value > 1:
raise ValueError(
'Value should be within range 0..1 offending value '
'table %s, lulc_code %s, value %s' % (
table_key, str(lulc_code), str(float_value)))
except ValueError:
raise ValueError(
'Value is not a floating point value within range 0..1 '
'offending value table %s, lulc_code %s, value %s' % (
table_key, str(lulc_code), table[table_key]))
intermediate_output_dir = os.path.join(
args['workspace_dir'], 'intermediate_outputs')
output_dir = os.path.join(args['workspace_dir'])
utils.make_directories([output_dir, intermediate_output_dir])
f_reg = utils.build_file_registry(
[(_OUTPUT_BASE_FILES, output_dir),
(_INTERMEDIATE_BASE_FILES, intermediate_output_dir),
(_TMP_BASE_FILES, output_dir)], file_suffix)
base_list = []
aligned_list = []
for file_key in ['dem', 'lulc', 'erosivity', 'erodibility']:
base_list.append(args[file_key + "_path"])
aligned_list.append(f_reg["aligned_" + file_key + "_path"])
drainage_present = False
if 'drainage_path' in args and args['drainage_path'] != '':
drainage_present = True
base_list.append(args['drainage_path'])
aligned_list.append(f_reg['aligned_drainage_path'])
dem_pixel_size = pygeoprocessing.get_raster_info(
args['dem_path'])['pixel_size']
pygeoprocessing.align_and_resize_raster_stack(
base_list, aligned_list, ['nearest'] * len(base_list),
dem_pixel_size, 'intersection',
base_vector_path_list=[args['watersheds_path']],
raster_align_index=0)
LOGGER.info("calculating slope")
natcap.invest.pygeoprocessing_0_3_3.calculate_slope(
f_reg['aligned_dem_path'], f_reg['slope_path'])
_threshold_slope(f_reg['slope_path'], f_reg['thresholded_slope_path'])
LOGGER.info("calculating flow direction")
natcap.invest.pygeoprocessing_0_3_3.routing.flow_direction_d_inf(
f_reg['aligned_dem_path'], f_reg['flow_direction_path'])
LOGGER.info("calculating flow accumulation")
natcap.invest.pygeoprocessing_0_3_3.routing.flow_accumulation(
f_reg['flow_direction_path'], f_reg['aligned_dem_path'],
f_reg['flow_accumulation_path'])
LOGGER.info('calculate ls term')
_calculate_ls_factor(
f_reg['flow_accumulation_path'], f_reg['slope_path'],
f_reg['flow_direction_path'], f_reg['ls_path'])
LOGGER.info("classifying streams from flow accumulation raster")
natcap.invest.pygeoprocessing_0_3_3.routing.stream_threshold(
f_reg['flow_accumulation_path'],
float(args['threshold_flow_accumulation']),
f_reg['stream_path'])
if drainage_present:
_add_drainage(
f_reg['stream_path'],
f_reg['aligned_drainage_path'],
f_reg['stream_and_drainage_path'])
f_reg['drainage_raster_path'] = (
f_reg['stream_and_drainage_path'])
else:
f_reg['drainage_raster_path'] = (
f_reg['stream_path'])
LOGGER.info('calculate per pixel W')
_calculate_w(
biophysical_table, f_reg['aligned_lulc_path'], f_reg['w_path'],
f_reg['thresholded_w_path'])
LOGGER.info('calculate CP raster')
_calculate_cp(
biophysical_table, f_reg['aligned_lulc_path'],
f_reg['cp_factor_path'])
LOGGER.info('calculating RKLS')
_calculate_rkls(*[f_reg[key] for key in [
'ls_path', 'aligned_erosivity_path', 'aligned_erodibility_path',
'drainage_raster_path', 'rkls_path']])
LOGGER.info('calculating USLE')
_calculate_usle(*[f_reg[key] for key in [
'rkls_path', 'cp_factor_path', 'drainage_raster_path', 'usle_path']])
LOGGER.info('calculating w_bar')
for factor_path, accumulation_path, out_bar_path in [
(f_reg['thresholded_w_path'], f_reg['w_accumulation_path'],
f_reg['w_bar_path']),
(f_reg['thresholded_slope_path'], f_reg['s_accumulation_path'],
f_reg['s_bar_path'])]:
_calculate_bar_factor(
f_reg['aligned_dem_path'], factor_path,
f_reg['flow_accumulation_path'], f_reg['flow_direction_path'],
f_reg['zero_absorption_source_path'], f_reg['loss_path'],
accumulation_path, out_bar_path)
LOGGER.info('calculating d_up')
_calculate_d_up(
*[f_reg[key] for key in [
'w_bar_path', 's_bar_path', 'flow_accumulation_path',
'd_up_path']])
LOGGER.info('calculate WS factor')
_calculate_inverse_ws_factor(
f_reg['thresholded_slope_path'], f_reg['thresholded_w_path'],
f_reg['ws_inverse_path'])
LOGGER.info('calculating d_dn')
natcap.invest.pygeoprocessing_0_3_3.routing.routing_core.distance_to_stream(
f_reg['flow_direction_path'], f_reg['drainage_raster_path'],
f_reg['d_dn_path'], factor_uri=f_reg['ws_inverse_path'])
LOGGER.info('calculate ic')
_calculate_ic(
f_reg['d_up_path'], f_reg['d_dn_path'], f_reg['ic_path'])
LOGGER.info('calculate sdr')
_calculate_sdr(
float(args['k_param']), float(args['ic_0_param']),
float(args['sdr_max']), f_reg['ic_path'],
f_reg['drainage_raster_path'], f_reg['sdr_path'])
LOGGER.info('calculate sed export')
_calculate_sed_export(
f_reg['usle_path'], f_reg['sdr_path'], f_reg['sed_export_path'])
LOGGER.info('calculate sediment retention index')
_calculate_sed_retention_index(
f_reg['rkls_path'], f_reg['usle_path'], f_reg['sdr_path'],
float(args['sdr_max']), f_reg['sed_retention_index_path'])
LOGGER.info('calculate sediment retention')
LOGGER.info('calculate S factor')
_calculate_inverse_s_factor(
f_reg['thresholded_slope_path'], f_reg['s_inverse_path'])
LOGGER.info('calculating d_dn bare soil')
natcap.invest.pygeoprocessing_0_3_3.routing.routing_core.distance_to_stream(
f_reg['flow_direction_path'], f_reg['drainage_raster_path'],
f_reg['d_dn_bare_soil_path'], factor_uri=f_reg['s_inverse_path'])
LOGGER.info('calculating d_up bare soil')
_calculate_d_up_bare(
f_reg['s_bar_path'], f_reg['flow_accumulation_path'],
f_reg['d_up_bare_soil_path'])
LOGGER.info('calculate ic')
_calculate_ic(
f_reg['d_up_bare_soil_path'], f_reg['d_dn_bare_soil_path'],
f_reg['ic_bare_soil_path'])
_calculate_sdr(
float(args['k_param']), float(args['ic_0_param']),
float(args['sdr_max']), f_reg['ic_bare_soil_path'],
f_reg['drainage_raster_path'], f_reg['sdr_bare_soil_path'])
_calculate_sed_retention(
f_reg['rkls_path'], f_reg['usle_path'], f_reg['drainage_raster_path'],
f_reg['sdr_path'], f_reg['sdr_bare_soil_path'],
f_reg['sed_retention_path'])
LOGGER.info('generating report')
_generate_report(
args['watersheds_path'], f_reg['usle_path'],
f_reg['sed_export_path'], f_reg['sed_retention_path'],
f_reg['watershed_results_sdr_path'])
for tmp_filename_key in _TMP_BASE_FILES:
if os.path.exists(f_reg[tmp_filename_key]):
os.remove(f_reg[tmp_filename_key])
def _calculate_ls_factor(
flow_accumulation_path, slope_path, aspect_path, out_ls_factor_path):
"""Calculate LS factor.
LS factor as Equation 3 from "Extension and validation
of a geographic information system-based method for calculating the
Revised Universal Soil Loss Equation length-slope factor for erosion
risk assessments in large watersheds"
Parameters:
flow_accumulation_path (string): path to raster, pixel values are the
contributing upstream area at that cell
slope_path (string): path to slope raster as a percent
aspect_path string): path to raster flow direction raster in radians
out_ls_factor_path (string): path to output ls_factor raster
Returns:
None
"""
slope_nodata = pygeoprocessing.get_raster_info(slope_path)['nodata'][0]
aspect_nodata = pygeoprocessing.get_raster_info(aspect_path)['nodata'][0]
flow_accumulation_info = pygeoprocessing.get_raster_info(
flow_accumulation_path)
flow_accumulation_nodata = flow_accumulation_info['nodata'][0]
cell_size = flow_accumulation_info['mean_pixel_size']
cell_area = cell_size ** 2
def ls_factor_function(aspect_angle, percent_slope, flow_accumulation):
"""Calculate the LS factor.
Parameters:
aspect_angle (numpy.ndarray): flow direction in radians
percent_slope (numpy.ndarray): slope in percent
flow_accumulation (numpy.ndarray): upstream pixels
Returns:
ls_factor
"""
valid_mask = (
(aspect_angle != aspect_nodata) &
(percent_slope != slope_nodata) &
(flow_accumulation != flow_accumulation_nodata))
result = numpy.empty(valid_mask.shape, dtype=numpy.float32)
result[:] = _TARGET_NODATA
# Determine the length of the flow path on the pixel
xij = (numpy.abs(numpy.sin(aspect_angle[valid_mask])) +
numpy.abs(numpy.cos(aspect_angle[valid_mask])))
contributing_area = (flow_accumulation[valid_mask]-1) * cell_area
slope_in_radians = numpy.arctan(percent_slope[valid_mask] / 100.0)
# From Equation 4 in "Extension and validation of a geographic
# information system ..."
slope_factor = numpy.where(
percent_slope[valid_mask] < 9.0,
10.8 * numpy.sin(slope_in_radians) + 0.03,
16.8 * numpy.sin(slope_in_radians) - 0.5)
beta = (
(numpy.sin(slope_in_radians) / 0.0986) /
(3 * numpy.sin(slope_in_radians)**0.8 + 0.56))
# Set m value via lookup table: Table 1 in
# InVEST Sediment Model_modifications_10-01-2012_RS.docx
# note slope_table in percent
slope_table = numpy.array([1., 3.5, 5., 9.])
m_table = numpy.array([0.2, 0.3, 0.4, 0.5])
# mask where slopes are larger than lookup table
big_slope_mask = percent_slope[valid_mask] > slope_table[-1]
m_indexes = numpy.digitize(
percent_slope[valid_mask][~big_slope_mask], slope_table,
right=True)
m_exp = numpy.empty(big_slope_mask.shape, dtype=numpy.float32)
m_exp[big_slope_mask] = (
beta[big_slope_mask] / (1 + beta[big_slope_mask]))
m_exp[~big_slope_mask] = m_table[m_indexes]
l_factor = (
((contributing_area + cell_area)**(m_exp+1) -
contributing_area ** (m_exp+1)) /
((cell_size ** (m_exp + 2)) * (xij**m_exp) * (22.13**m_exp)))
# from McCool paper: "as a final check against excessively long slope
# length calculations ... cap of 333m"
l_factor[l_factor > 333] = 333
result[valid_mask] = l_factor * slope_factor
return result
# call vectorize datasets to calculate the ls_factor
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
aspect_path, slope_path, flow_accumulation_path]],
ls_factor_function, out_ls_factor_path, gdal.GDT_Float32,
_TARGET_NODATA)
def _calculate_rkls(
ls_factor_path, erosivity_path, erodibility_path, stream_path,
rkls_path):
"""Calculate per-pixel potential soil loss using the RKLS.
(revised universal soil loss equation with no C or P).
Parameters:
ls_factor_path (string): path to LS raster
erosivity_path (string): path to per pixel erosivity raster
erodibility_path (string): path to erodibility raster
stream_path (string): path to drainage raster
(1 is drainage, 0 is not)
rkls_path (string): path to RKLS raster
Returns:
None
"""
erosivity_nodata = pygeoprocessing.get_raster_info(
erosivity_path)['nodata'][0]
erodibility_nodata = pygeoprocessing.get_raster_info(
erodibility_path)['nodata'][0]
stream_nodata = pygeoprocessing.get_raster_info(
stream_path)['nodata'][0]
cell_size = pygeoprocessing.get_raster_info(
ls_factor_path)['mean_pixel_size']
cell_area_ha = cell_size ** 2 / 10000.0
def rkls_function(ls_factor, erosivity, erodibility, stream):
"""Calculate the RKLS equation.
Parameters:
ls_factor (numpy.ndarray): length/slope factor
erosivity (numpy.ndarray): related to peak rainfall events
erodibility (numpy.ndarray): related to the potential for soil to
erode
stream (numpy.ndarray): stream mask (1 stream, 0 no stream)
Returns:
ls_factor * erosivity * erodibility * usle_c_p or nodata if
any values are nodata themselves.
"""
rkls = numpy.empty(ls_factor.shape, dtype=numpy.float32)
nodata_mask = (
(ls_factor != _TARGET_NODATA) &
(erosivity != erosivity_nodata) &
(erodibility != erodibility_nodata) & (stream != stream_nodata))
valid_mask = nodata_mask & (stream == 0)
rkls[:] = _TARGET_NODATA
rkls[valid_mask] = (
ls_factor[valid_mask] * erosivity[valid_mask] *
erodibility[valid_mask] * cell_area_ha)
# rkls is 1 on the stream
rkls[nodata_mask & (stream == 1)] = 1
return rkls
# aligning with index 3 that's the stream and the most likely to be
# aligned with LULCs
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
ls_factor_path, erosivity_path, erodibility_path, stream_path]],
rkls_function, rkls_path, gdal.GDT_Float32, _TARGET_NODATA)
def _threshold_slope(slope_path, out_thresholded_slope_path):
"""Threshold the slope between 0.005 and 1.0.
Parameters:
slope_path (string): path to a raster of slope in percent
out_thresholded_slope_path (string): path to output raster of
thresholded slope between 0.005 and 1.0
Returns:
None
"""
slope_nodata = pygeoprocessing.get_raster_info(slope_path)['nodata'][0]
def threshold_slope(slope):
"""Convert slope to m/m and clamp at 0.005 and 1.0.
As desribed in Cavalli et al., 2013.
"""
valid_slope = slope != slope_nodata
slope_m = slope[valid_slope] / 100.0
slope_m[slope_m < 0.005] = 0.005
slope_m[slope_m > 1.0] = 1.0
result = numpy.empty(valid_slope.shape)
result[:] = slope_nodata
result[valid_slope] = slope_m
return result
pygeoprocessing.raster_calculator(
[(slope_path, 1)], threshold_slope, out_thresholded_slope_path,
gdal.GDT_Float32, slope_nodata)
def _add_drainage(stream_path, drainage_path, out_stream_and_drainage_path):
"""Combine stream and drainage masks into one raster mask.
Parameters:
stream_path (string): path to stream raster mask where 1 indicates
a stream, and 0 is a valid landscape pixel but not a stream.
drainage_raster_path (string): path to 1/0 mask of drainage areas.
1 indicates any water reaching that pixel drains to a stream.
out_stream_and_drainage_path (string): output raster of a logical
OR of stream and drainage inputs
Returns:
None
"""
def add_drainage_op(stream, drainage):
"""Add drainage mask to stream layer."""
return numpy.where(drainage == 1, 1, stream)
stream_nodata = pygeoprocessing.get_raster_info(stream_path)['nodata'][0]
pygeoprocessing.raster_calculator(
[(path, 1) for path in [stream_path, drainage_path]], add_drainage_op,
out_stream_and_drainage_path, gdal.GDT_Byte, stream_nodata)
def _calculate_w(
biophysical_table, lulc_path, w_factor_path,
out_thresholded_w_factor_path):
"""W factor: map C values from LULC and lower threshold to 0.001.
W is a factor in calculating d_up accumulation for SDR.
Parameters:
biophysical_table (dict): map of LULC codes to dictionaries that
contain at least a 'usle_c' field
lulc_path (string): path to LULC raster
w_factor_path (string): path to outputed raw W factor
out_thresholded_w_factor_path (string): W factor from `w_factor_path`
thresholded to be no less than 0.001.
Returns:
None
"""
lulc_to_c = dict(
[(lulc_code, float(table['usle_c'])) for
(lulc_code, table) in biophysical_table.items()])
pygeoprocessing.reclassify_raster(
(lulc_path, 1), lulc_to_c, w_factor_path, gdal.GDT_Float32,
_TARGET_NODATA, values_required=True)
def threshold_w(w_val):
"""Threshold w to 0.001."""
w_val_copy = w_val.copy()
nodata_mask = w_val == _TARGET_NODATA
w_val_copy[w_val < 0.001] = 0.001
w_val_copy[nodata_mask] = _TARGET_NODATA
return w_val_copy
pygeoprocessing.raster_calculator(
[(w_factor_path, 1)], threshold_w, out_thresholded_w_factor_path,
gdal.GDT_Float32, _TARGET_NODATA)
def _calculate_cp(biophysical_table, lulc_path, cp_factor_path):
"""Map LULC to C*P value.
Parameters:
biophysical_table (dict): map of lulc codes to dictionaries that
contain at least the entry 'usle_c" and 'usle_p' corresponding to
those USLE components.
lulc_path (string): path to LULC raster
cp_factor_path (string): path to output raster of LULC mapped to C*P
values
Returns:
None
"""
lulc_to_cp = dict(
[(lulc_code, float(table['usle_c']) * float(table['usle_p'])) for
(lulc_code, table) in biophysical_table.items()])
pygeoprocessing.reclassify_raster(
(lulc_path, 1), lulc_to_cp, cp_factor_path, gdal.GDT_Float32,
_TARGET_NODATA, values_required=True)
def _calculate_usle(
rkls_path, cp_factor_path, drainage_raster_path, out_usle_path):
"""Calculate USLE, multiply RKLS by CP and set to 1 on drains."""
def usle_op(rkls, cp_factor, drainage):
"""Calculate USLE."""
result = numpy.empty(rkls.shape)
result[:] = _TARGET_NODATA
valid_mask = (rkls != _TARGET_NODATA) & (cp_factor != _TARGET_NODATA)
result[valid_mask] = rkls[valid_mask] * cp_factor[valid_mask] * (
1 - drainage[valid_mask])
return result
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
rkls_path, cp_factor_path, drainage_raster_path]], usle_op,
out_usle_path, gdal.GDT_Float32, _TARGET_NODATA)
def _calculate_bar_factor(
dem_path, factor_path, flow_accumulation_path, flow_direction_path,
zero_absorption_source_path, loss_path, accumulation_path,
out_bar_path):
"""Route user defined source across DEM.
Used for calcualting S and W bar in the SDR operation.
Parameters:
dem_path (string): path to DEM raster
factor_path (string): path to arbitrary factor raster
flow_accumulation_path (string): path to flow accumulation raster
flow_direction_path (string): path to flow direction path (in radians)
zero_absorption_source_path (string): path to a raster that is all
0s and same size as `dem_path`. Temporary file.
loss_path (string): path to a raster that can save the loss raster
from routing. Temporary file.
accumulation_path (string): path to a raster that can be used to
save the accumulation of the factor. Temporary file.
out_bar_path (string): path to output raster that is the result of
the factor accumulation raster divided by the flow accumulation
raster.
Returns:
None.
"""
pygeoprocessing.new_raster_from_base(
dem_path, zero_absorption_source_path, gdal.GDT_Float32,
[_TARGET_NODATA], fill_value_list=[0.0])
flow_accumulation_nodata = pygeoprocessing.get_raster_info(
flow_accumulation_path)['nodata'][0]
natcap.invest.pygeoprocessing_0_3_3.routing.route_flux(
flow_direction_path, dem_path, factor_path,
zero_absorption_source_path, loss_path, accumulation_path,
'flux_only')
def bar_op(base_accumulation, flow_accumulation):
"""Aggregate accumulation from base divided by the flow accum."""
result = numpy.empty(base_accumulation.shape)
valid_mask = (
(base_accumulation != _TARGET_NODATA) &
(flow_accumulation != flow_accumulation_nodata))
result[:] = _TARGET_NODATA
result[valid_mask] = (
base_accumulation[valid_mask] / flow_accumulation[valid_mask])
return result
pygeoprocessing.raster_calculator(
[(accumulation_path, 1), (flow_accumulation_path, 1)], bar_op,
out_bar_path, gdal.GDT_Float32, _TARGET_NODATA)
def _calculate_d_up(
w_bar_path, s_bar_path, flow_accumulation_path, out_d_up_path):
"""Calculate w_bar * s_bar * sqrt(flow accumulation * cell area)."""
cell_area = pygeoprocessing.get_raster_info(
w_bar_path)['mean_pixel_size'] ** 2
flow_accumulation_nodata = pygeoprocessing.get_raster_info(
flow_accumulation_path)['nodata'][0]
def d_up_op(w_bar, s_bar, flow_accumulation):
"""Calculate the d_up index.
w_bar * s_bar * sqrt(upstream area)
"""
valid_mask = (
(w_bar != _TARGET_NODATA) & (s_bar != _TARGET_NODATA) &
(flow_accumulation != flow_accumulation_nodata))
d_up_array = numpy.empty(valid_mask.shape)
d_up_array[:] = _TARGET_NODATA
d_up_array[valid_mask] = (
w_bar[valid_mask] * s_bar[valid_mask] * numpy.sqrt(
flow_accumulation[valid_mask] * cell_area))
return d_up_array
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
w_bar_path, s_bar_path, flow_accumulation_path]], d_up_op,
out_d_up_path, gdal.GDT_Float32, _TARGET_NODATA)
def _calculate_d_up_bare(
s_bar_path, flow_accumulation_path, out_d_up_bare_path):
"""Calculate s_bar * sqrt(flow accumulation * cell area)."""
cell_area = pygeoprocessing.get_raster_info(
s_bar_path)['mean_pixel_size'] ** 2
flow_accumulation_nodata = pygeoprocessing.get_raster_info(
flow_accumulation_path)['nodata'][0]
def d_up_op(s_bar, flow_accumulation):
"""Calculate the bare d_up index.
s_bar * sqrt(upstream area)
"""
valid_mask = (
(flow_accumulation != flow_accumulation_nodata) &
(s_bar != _TARGET_NODATA))
d_up_array = numpy.empty(valid_mask.shape, dtype=numpy.float32)
d_up_array[:] = _TARGET_NODATA
d_up_array[valid_mask] = (
numpy.sqrt(flow_accumulation[valid_mask] * cell_area) *
s_bar[valid_mask])
return d_up_array
pygeoprocessing.raster_calculator(
[(s_bar_path, 1), (flow_accumulation_path, 1)], d_up_op,
out_d_up_bare_path, gdal.GDT_Float32, _TARGET_NODATA)
def _calculate_inverse_ws_factor(
thresholded_slope_path, thresholded_w_factor_path,
out_ws_factor_inverse_path):
"""Calculate 1/(w*s)."""
slope_nodata = pygeoprocessing.get_raster_info(
thresholded_slope_path)['nodata'][0]
def ws_op(w_factor, s_factor):
"""Calculate the inverse ws factor."""
valid_mask = (w_factor != _TARGET_NODATA) & (s_factor != slope_nodata)
result = numpy.empty(valid_mask.shape, dtype=numpy.float32)
result[:] = _TARGET_NODATA
result[valid_mask] = (
1.0 / (w_factor[valid_mask] * s_factor[valid_mask]))
return result
pygeoprocessing.raster_calculator(
[(thresholded_w_factor_path, 1), (thresholded_slope_path, 1)], ws_op,
out_ws_factor_inverse_path, gdal.GDT_Float32, _TARGET_NODATA)
def _calculate_inverse_s_factor(
thresholded_slope_path, out_s_factor_inverse_path):
"""Calculate 1/s."""
slope_nodata = pygeoprocessing.get_raster_info(
thresholded_slope_path)['nodata'][0]
def s_op(s_factor):
"""Calculate the inverse s factor."""
valid_mask = (s_factor != slope_nodata)
result = numpy.empty(valid_mask.shape, dtype=numpy.float32)
result[:] = _TARGET_NODATA
result[valid_mask] = 1.0 / s_factor[valid_mask]
return result
pygeoprocessing.raster_calculator(
[(thresholded_slope_path, 1)], s_op,
out_s_factor_inverse_path, gdal.GDT_Float32, _TARGET_NODATA)
def _calculate_ic(d_up_path, d_dn_path, out_ic_factor_path):
"""Calculate log10(d_up/d_dn)."""
# ic can be positive or negative, so float.min is a reasonable nodata value
d_dn_nodata = pygeoprocessing.get_raster_info(d_dn_path)['nodata'][0]
def ic_op(d_up, d_dn):
"""Calculate IC factor."""
valid_mask = (
(d_up != _TARGET_NODATA) & (d_dn != d_dn_nodata) & (d_dn != 0) &
(d_up != 0))
ic_array = numpy.empty(valid_mask.shape)
ic_array[:] = _IC_NODATA
ic_array[valid_mask] = numpy.log10(
d_up[valid_mask] / d_dn[valid_mask])
return ic_array
pygeoprocessing.raster_calculator(
[(d_up_path, 1), (d_dn_path, 1)], ic_op, out_ic_factor_path,
gdal.GDT_Float32, _IC_NODATA)
def _calculate_sdr(
k_factor, ic_0, sdr_max, ic_path, stream_path, out_sdr_path):
"""Derive SDR from k, ic0, ic; 0 on the stream and clamped to sdr_max."""
def sdr_op(ic_factor, stream):
"""Calculate SDR factor."""
valid_mask = (
(ic_factor != _IC_NODATA) & (stream != 1))
result = numpy.empty(valid_mask.shape, dtype=numpy.float32)
result[:] = _TARGET_NODATA
result[valid_mask] = (
sdr_max / (1+numpy.exp((ic_0-ic_factor[valid_mask])/k_factor)))
result[stream == 1] = 0.0
return result
pygeoprocessing.raster_calculator(
[(ic_path, 1), (stream_path, 1)], sdr_op, out_sdr_path,
gdal.GDT_Float32, _TARGET_NODATA)
def _calculate_sed_export(usle_path, sdr_path, out_sed_export_path):
"""Calculate USLE * SDR."""
def sed_export_op(usle, sdr):
"""Sediment export."""
valid_mask = (usle != _TARGET_NODATA) & (sdr != _TARGET_NODATA)
result = numpy.empty(valid_mask.shape, dtype=numpy.float32)
result[:] = _TARGET_NODATA
result[valid_mask] = usle[valid_mask] * sdr[valid_mask]
return result
pygeoprocessing.raster_calculator(
[(usle_path, 1), (sdr_path, 1)], sed_export_op, out_sed_export_path,
gdal.GDT_Float32, _TARGET_NODATA)
def _calculate_sed_retention_index(
rkls_path, usle_path, sdr_path, sdr_max,
out_sed_retention_index_path):
"""Calculate (rkls-usle) * sdr / sdr_max."""
def sediment_index_op(rkls, usle, sdr_factor):
"""Calculate sediment retention index."""
valid_mask = (
(rkls != _TARGET_NODATA) & (usle != _TARGET_NODATA) &
(sdr_factor != _TARGET_NODATA))
result = numpy.empty(valid_mask.shape, dtype=numpy.float32)
result[:] = _TARGET_NODATA
result[valid_mask] = (
(rkls[valid_mask] - usle[valid_mask]) *
sdr_factor[valid_mask] / sdr_max)
return result
pygeoprocessing.raster_calculator(
[(path, 1) for path in [rkls_path, usle_path, sdr_path]],
sediment_index_op, out_sed_retention_index_path, gdal.GDT_Float32,
_TARGET_NODATA)
def _calculate_sed_retention(
rkls_path, usle_path, stream_path, sdr_path, sdr_bare_soil_path,
out_sed_ret_bare_soil_path):
"""Difference in exported sediments on basic and bare watershed.
Calculates the difference of sediment export on the real landscape and
a bare soil landscape given that SDR has been calculated for bare soil.
Essentially:
RKLS * SDR_bare - USLE * SDR
Parameters:
rkls_path (string): path to RKLS raster
usle_path (string): path to USLE raster
stream_path (string): path to stream/drainage mask
sdr_path (string): path to SDR raster
sdr_bare_soil_path (string): path to SDR raster calculated for a bare
watershed
out_sed_ret_bare_soil_path (string): path to output raster indicating
where sediment is retained
Returns:
None
"""
stream_nodata = pygeoprocessing.get_raster_info(stream_path)['nodata'][0]
def sediment_retention_bare_soil_op(
rkls, usle, stream_factor, sdr_factor, sdr_factor_bare_soil):
"""Subtract bare soil export from real landcover."""
valid_mask = (
(rkls != _TARGET_NODATA) & (usle != _TARGET_NODATA) &
(stream_factor != stream_nodata) &
(sdr_factor != _TARGET_NODATA) &
(sdr_factor_bare_soil != _TARGET_NODATA))
result = numpy.empty(valid_mask.shape, dtype=numpy.float32)
result[:] = _TARGET_NODATA
result[valid_mask] = (
rkls[valid_mask] * sdr_factor_bare_soil[valid_mask] -
usle[valid_mask] * sdr_factor[valid_mask]) * (
1 - stream_factor[valid_mask])
return result
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
rkls_path, usle_path, stream_path, sdr_path, sdr_bare_soil_path]],
sediment_retention_bare_soil_op, out_sed_ret_bare_soil_path,
gdal.GDT_Float32, _TARGET_NODATA)
def _generate_report(
watersheds_path, usle_path, sed_export_path, sed_retention_path,
watershed_results_sdr_path):
"""Create shapefile with USLE, sed export, and sed retention fields."""
field_summaries = {
'usle_tot': pygeoprocessing.zonal_statistics(
(usle_path, 1), watersheds_path, 'ws_id'),
'sed_export': pygeoprocessing.zonal_statistics(
(sed_export_path, 1), watersheds_path, 'ws_id'),
'sed_retent': pygeoprocessing.zonal_statistics(
(sed_retention_path, 1), watersheds_path, 'ws_id'),
}
original_datasource = gdal.OpenEx(watersheds_path, gdal.OF_VECTOR)
# Delete if existing shapefile with the same name and path
if os.path.isfile(watershed_results_sdr_path):
os.remove(watershed_results_sdr_path)
driver = gdal.GetDriverByName('ESRI Shapefile')
datasource_copy = driver.CreateCopy(
watershed_results_sdr_path, original_datasource)
layer = datasource_copy.GetLayer()
for field_name in field_summaries:
field_def = ogr.FieldDefn(field_name, ogr.OFTReal)
field_def.SetWidth(24)
field_def.SetPrecision(11)
layer.CreateField(field_def)
# initialize each feature field to 0.0
for feature_id in xrange(layer.GetFeatureCount()):
feature = layer.GetFeature(feature_id)
for field_name in field_summaries:
ws_id = feature.GetFieldAsInteger('ws_id')
feature.SetField(
field_name, float(field_summaries[field_name][ws_id]['sum']))
layer.SetFeature(feature)
@validation.invest_validator
def validate(args, limit_to=None):
"""Validate args to ensure they conform to `execute`'s contract.
Parameters:
args (dict): dictionary of key(str)/value pairs where keys and
values are specified in `execute` docstring.
limit_to (str): (optional) if not None indicates that validation
should only occur on the args[limit_to] value. The intent that
individual key validation could be significantly less expensive
than validating the entire `args` dictionary.
Returns:
list of ([invalid key_a, invalid_keyb, ...], 'warning/error message')
tuples. Where an entry indicates that the invalid keys caused
the error message in the second part of the tuple. This should
be an empty list if validation succeeds.
"""
missing_key_list = []
no_value_list = []
validation_error_list = []
required_keys = [
'workspace_dir',
'dem_path',
'erosivity_path',
'erodibility_path',
'lulc_path',
'watersheds_path',
'biophysical_table_path',
'threshold_flow_accumulation',
'k_param',
'ic_0_param',
'sdr_max']
for key in required_keys:
if limit_to is None or limit_to == key:
if key not in args:
missing_key_list.append(key)
elif args[key] in ['', None]:
no_value_list.append(key)
if missing_key_list:
# if there are missing keys, we have raise KeyError to stop hard
raise KeyError(
"The following keys were expected in `args` but were missing " +
', '.join(missing_key_list))
if no_value_list:
validation_error_list.append(
(no_value_list, 'parameter has no value'))
file_type_list = [
('dem_path', 'raster'),
('erosivity_path', 'raster'),
('erodibility_path', 'raster'),
('lulc_path', 'raster'),
('watersheds_path', 'vector'),
('biophysical_table_path', 'table')]
if limit_to in ['drainage_path', None] and (
'drainage_path' in args and
args['drainage_path'] not in ['', None]):
file_type_list.append(('drainage_path', 'raster'))
# check that existing/optional files are the correct types
with utils.capture_gdal_logging():
for key, key_type in file_type_list:
if (limit_to is None or limit_to == key) and key in args:
if not os.path.exists(args[key]):
validation_error_list.append(
([key], 'not found on disk'))
continue
if key_type == 'raster':
raster = gdal.OpenEx(args[key])
if raster is None:
validation_error_list.append(
([key], 'not a raster'))
del raster
if limit_to in ['watersheds_path', None]:
# checks that watersheds are a vector, that they have 'ws_id' and
# that all their fields are defined
if os.path.exists(args['watersheds_path']):
try:
watersheds_vector = gdal.OpenEx(
args['watersheds_path'], gdal.OF_VECTOR)
if watersheds_vector is None:
validation_error_list.append(
(['watersheds_path'], 'not a vector'))
else:
watersheds_layer = watersheds_vector.GetLayer()
watersheds_defn = watersheds_layer.GetLayerDefn()
if watersheds_defn.GetFieldIndex('ws_id') == -1:
validation_error_list.append((
['watersheds_path'],
'does not have a `ws_id` field defined.'))
else:
for feature in watersheds_layer:
try:
value = feature.GetFieldAsString('ws_id')
_ = int(value) # value should be an integer
except ValueError:
validation_error_list.append((
['watersheds_path'],
'feature %s has an invalid value of '
'"%s" in \'ws_id\' column, it should '
'be an integer value' % (
str(feature.GetFID()), value)))
finally:
feature = None
watersheds_defn = None
watersheds_layer = None
watersheds_vector = None
return validation_error_list
| 39.170172
| 80
| 0.649102
|
67d5bd9141aa22cec0330a06f5fc9f671cc6c2f6
| 5,173
|
py
|
Python
|
main.py
|
PeterParser/BarcodeAnalyzer
|
f7a3771fea66f21c47d885d7bb3bbe203143a5ff
|
[
"MIT"
] | 1
|
2021-01-21T14:44:21.000Z
|
2021-01-21T14:44:21.000Z
|
main.py
|
PeterParser/BarcodeAnalyzer
|
f7a3771fea66f21c47d885d7bb3bbe203143a5ff
|
[
"MIT"
] | null | null | null |
main.py
|
PeterParser/BarcodeAnalyzer
|
f7a3771fea66f21c47d885d7bb3bbe203143a5ff
|
[
"MIT"
] | null | null | null |
import cv2
import glob
import matplotlib.pyplot as plt
from colorama import Fore, Style
from barcode import Barcode
title = r"""██████╗ █████╗ ██████╗ ██████╗ ██████╗ ██████╗ ███████╗ █████╗ ███╗ ██╗ █████╗ ██╗ ██╗ ██╗███████╗███████╗██████╗
██╔══██╗██╔══██╗██╔══██╗██╔════╝██╔═══██╗██╔══██╗██╔════╝ ██╔══██╗████╗ ██║██╔══██╗██║ ╚██╗ ██╔╝╚══███╔╝██╔════╝██╔══██╗
██████╔╝███████║██████╔╝██║ ██║ ██║██║ ██║█████╗ ███████║██╔██╗ ██║███████║██║ ╚████╔╝ ███╔╝ █████╗ ██████╔╝
██╔══██╗██╔══██║██╔══██╗██║ ██║ ██║██║ ██║██╔══╝ ██╔══██║██║╚██╗██║██╔══██║██║ ╚██╔╝ ███╔╝ ██╔══╝ ██╔══██╗
██████╔╝██║ ██║██║ ██║╚██████╗╚██████╔╝██████╔╝███████╗ ██║ ██║██║ ╚████║██║ ██║███████╗██║ ███████╗███████╗██║ ██║
╚═════╝ ╚═╝ ╚═╝╚═╝ ╚═╝ ╚═════╝ ╚═════╝ ╚═════╝ ╚══════╝ ╚═╝ ╚═╝╚═╝ ╚═══╝╚═╝ ╚═╝╚══════╝╚═╝ ╚══════╝╚══════╝╚═╝ ╚═╝
"""
def print_title():
print(Fore.YELLOW + title)
print(Style.RESET_ALL)
def main(barcode_directory):
column_names = ["IMAGE NAME", "BARCODE GRADE", "X-DIMENSION", "HEIGHT", "BOUNDING BOX", "ROTATION ANGLE",
"SCANLINE-1","SCANLINE-2", "SCANLINE-3", "SCANLINE-4", "SCANLINE-5",
"SCANLINE-6", "SCANLINE-7", "SCANLINE-8", "SCANLINE-9", "SCANLINE-10", "SIZE BARS-SPACES\n"]
header = ','.join(column_names)
f = open('barcode_features.csv', 'w')
f.write(header)
#Cycle over the given images in the ./resources folder
for file in glob.glob(barcode_directory + "/*.*"):
try:
#print()
#print('|' * 80)
#print()
#Print the image_name and the image itself
barcode = Barcode()
barcode.image_name = file
#print('image_name: ', barcode.image_name)
plt.title('ORIGINAL IMAGE', pad=10)
barcode.image = cv2.imread(barcode.image_name, cv2.IMREAD_GRAYSCALE)
plt.imshow(barcode.image, cmap = 'gray', vmin = 0, vmax = 255)
plt.show(block = True)
#Show the roi and print its coordinates
barcode.bin_image = cv2.adaptiveThreshold(barcode.image, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, 41, 11)
barcode.set_roi()
plt.title('ORIGINAL ROI', pad=10)
plt.imshow(barcode.roi, cmap = 'gray', vmin = 0, vmax = 255)
plt.show(block = True)
#print('coord_roi: ', barcode.coord_roi)
#Show the roi after the verticalization
plt.title('VERTICALIZED ROI', pad=10)
barcode.verticalize_roi()
plt.imshow(barcode.roi, cmap = 'gray', vmin = 0, vmax = 255)
plt.show(block = True)
#Show the highlighted bars in red and print the minimum width of a bar and the minimum_height of a bar
barcode.bin_otsu_roi = cv2.threshold(barcode.roi, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]
barcode.set_minimum_width_height_bars()
#print('min_width: %3.2f' % barcode.min_width)
#print('min_height: ', barcode.min_height)
#Show the roi resized with the following rule: bounding box enclosing the barcode plus a surrounding background area
#(quite zone) of size (quite zone): X dimension, above and below the code, 10*X before the first bar, 10* X following the last bar
plt.title('RESIZED ROI', pad=10)
barcode.resize_roi()
#Show the roi resized with the following rule
plt.imshow(barcode.roi, cmap = 'gray', vmin = 0, vmax = 255)
plt.show(block = True)
#Numbers and text elements are remmoved so to create a smaller box that would not include them.
plt.title('CUT ROI', pad=10)
barcode.cut_roi()
plt.imshow(barcode.roi, cmap = 'gray', vmin = 0, vmax = 255)
plt.show(block = True)
#Set the scanlines we have to analyse and plot them
barcode.set_scanlines()
#Methods which sets the scanlines' parameters we have to find
barcode.set_scanlines_min_reflectance()
barcode.set_scanlines_max_reflectance()
barcode.set_scanlines_symbol_contrast()
barcode.set_scanlines_edges()
barcode.set_scalines_minimum_edge_contrast()
barcode.set_scanline_defects()
barcode.set_scanline_modulation()
barcode.set_left_to_right_widths()
#Set the grade of each scanline
barcode.grade_scanlines()
#Set the grade of the barcode
barcode.grade_barcode()
row = ','.join(barcode.write_row_lists())
except:
print("The analysis didn't succeded")
continue
f.write(row)
f.close()
if __name__ == "__main__":
print_title()
barcode_directory = input('Insert the name of the directory you want to analyse:\t')
main(barcode_directory)
print("\nEnd of analysis")
| 42.401639
| 142
| 0.500483
|
b914824273d8dfa469ac27c8f4635ed3cab42e9e
| 90,070
|
py
|
Python
|
buzzer_music/example.py
|
100askTeam/pico-micropython
|
adbf63c24be0142237577d996dfd5a6211c39f1f
|
[
"BSD-3-Clause"
] | null | null | null |
buzzer_music/example.py
|
100askTeam/pico-micropython
|
adbf63c24be0142237577d996dfd5a6211c39f1f
|
[
"BSD-3-Clause"
] | null | null | null |
buzzer_music/example.py
|
100askTeam/pico-micropython
|
adbf63c24be0142237577d996dfd5a6211c39f1f
|
[
"BSD-3-Clause"
] | 1
|
2021-03-23T12:12:24.000Z
|
2021-03-23T12:12:24.000Z
|
from buzzer_music import music
from time import sleep
#Example songs
# Test - Me
#song = '0 D4 8 0;0 D5 8 0;0 G4 8 0;8 C5 2 0;10 B4 2 0;12 G4 2 0;14 F4 1 0;15 G4 17 0;16 D4 8 0;24 C4 8 0'
# https://onlinesequencer.net/195547
#song = '0 A#4 1 1;2 F5 1 1;4 D#5 1 1;8 D5 1 1;11 D5 1 1;6 A#4 1 1;14 D#5 1 1;18 A#4 1 1;20 D#5 1 1;22 A#4 1 1;24 D5 1 1;27 D5 1 1;30 D#5 1 1;32 A#4 1 1;34 F5 1 1;36 D#5 1 1;38 A#4 1 1;40 D5 1 1;43 D5 1 1;46 D#5 1 1;50 A#4 1 1;52 D#5 1 1;54 G5 1 1;56 F5 1 1;59 D#5 1 1;62 F5 1 1;64 A#4 1 1;66 F5 1 1;68 D#5 1 1;70 A#4 1 1;72 D5 1 1;75 D5 1 1;78 D#5 1 1;82 A#4 1 1;84 D#5 1 1;86 A#4 1 1;88 D5 1 1;91 D5 1 1;94 D#5 1 1;96 A#4 1 1;100 D#5 1 1;102 A#4 1 1;104 D5 1 1;107 D5 1 1;110 D#5 1 1;114 A#4 1 1;116 D#5 1 1;118 G5 1 1;120 F5 1 1;123 D#5 1 1;126 F5 1 1;98 F5 1 1'
# https://onlinesequencer.net/1864273
#song = '0 D5 4 14;4 A5 4 14;8 C6 4 14;12 B5 4 14;16 G5 2 14;18 F5 2 14;20 E5 2 14;22 F5 2 14;24 G5 8 14;4 E5 8 16;4 C5 8 16;4 F4 8 16;12 D5 8 16;12 B4 8 16;12 E4 8 16;20 C5 8 16;20 A4 8 16;20 D4 8 16;0 E4 4 16;0 B4 4 16;28 E4 4 16;28 B4 4 16'
# https://onlinesequencer.net/1864291 - Message In A Bottle
#song = '0 C#4 1 32;1 G#4 1 32;2 D#5 1 32;3 A3 1 32;5 E4 1 32;6 B4 1 32;10 C#5 1 32;7 B3 1 32;9 F#4 1 32;11 F#3 1 32;13 C#4 1 32;14 G#4 1 32;15 A4 1 32;16 C#4 1 32;17 G#4 1 32;18 D#5 1 32;19 A3 1 32;21 E4 1 32;22 B4 1 32;23 B3 1 32;25 F#4 1 32;27 F#3 1 32;29 C#4 1 32;30 G#4 1 32;31 A4 1 32;31 C#3 1 5;32 C#4 1 32;33 G#4 1 32;34 D#5 1 32;35 A3 1 32;37 E4 1 32;38 B4 1 32;39 B3 1 32;41 F#4 1 32;45 C#4 1 32;46 G#4 1 32;47 A4 1 32;48 C#4 1 32;49 G#4 1 32;50 D#5 1 32;51 A3 1 32;53 E4 1 32;54 B4 1 32;55 B3 1 32;57 F#4 1 32;59 F#3 1 32;61 C#4 1 32;62 G#4 1 32;63 A4 1 32;32 C#3 1 5;35 A2 1 5;39 B2 1 5;40 B2 1 5;47 C#3 1 5;48 C#3 1 5;51 A2 1 5;55 B2 1 5;56 B2 1 5;59 F#3 1 5;63 C#3 1 5;43 F#3 1 5;26 C#5 1 32;42 C#5 1 32;58 C#5 1 32;31 C#3 1 5;32 C#3 1 5;35 A2 1 5;39 B2 1 5;40 B2 1 5;47 C#3 1 5;48 C#3 1 5;51 A2 1 5;55 B2 1 5;56 B2 1 5;59 F#3 1 5;63 C#3 1 5;43 F#3 1 5'
# https://onlinesequencer.net/1864297 - Tetris
#song = '0 E3 1 0;2 E4 1 0;4 E3 1 0;6 E4 1 0;8 E3 1 0;10 E4 1 0;12 E3 1 0;14 E4 1 0;16 A3 1 0;18 A4 1 0;20 A3 1 0;22 A4 1 0;24 A3 1 0;26 A4 1 0;28 A3 1 0;30 A4 1 0;32 G#3 1 0;34 G#4 1 0;36 G#3 1 0;38 G#4 1 0;40 E3 1 0;42 E4 1 0;44 E3 1 0;46 E4 1 0;48 A3 1 0;50 A4 1 0;52 A3 1 0;54 A4 1 0;56 A3 1 0;58 B3 1 0;60 C4 1 0;62 D4 1 0;64 D3 1 0;66 D4 1 0;68 D3 1 0;70 D4 1 0;72 D3 1 0;74 D4 1 0;76 D3 1 0;78 D4 1 0;80 C3 1 0;82 C4 1 0;84 C3 1 0;86 C4 1 0;88 C3 1 0;90 C4 1 0;92 C3 1 0;94 C4 1 0;96 G2 1 0;98 G3 1 0;100 G2 1 0;102 G3 1 0;104 E3 1 0;106 E4 1 0;108 E3 1 0;110 E4 1 0;114 A4 1 0;112 A3 1 0;116 A3 1 0;118 A4 1 0;120 A3 1 0;122 A4 1 0;124 A3 1 0;0 E6 1 1;4 B5 1 1;6 C6 1 1;8 D6 1 1;10 E6 1 1;11 D6 1 1;12 C6 1 1;14 B5 1 1;0 E5 1 6;4 B4 1 6;6 C5 1 6;8 D5 1 6;10 E5 1 6;11 D5 1 6;12 C5 1 6;14 B4 1 6;16 A5 1 1;20 A5 1 1;22 C6 1 1;24 E6 1 1;28 D6 1 1;30 C6 1 1;32 B5 1 1;36 B5 1 1;36 B5 1 1;37 B5 1 1;38 C6 1 1;40 D6 1 1;44 E6 1 1;48 C6 1 1;52 A5 1 1;56 A5 1 1;20 A4 1 6;16 A4 1 6;22 C5 1 6;24 E5 1 6;28 D5 1 6;30 C5 1 6;32 B4 1 6;36 B4 1 6;37 B4 1 6;38 C5 1 6;40 D5 1 6;44 E5 1 6;48 C5 1 6;52 A4 1 6;56 A4 1 6;64 D5 1 6;64 D6 1 1;68 D6 1 1;70 F6 1 1;72 A6 1 1;76 G6 1 1;78 F6 1 1;80 E6 1 1;84 E6 1 1;86 C6 1 1;88 E6 1 1;92 D6 1 1;94 C6 1 1;96 B5 1 1;100 B5 1 1;101 B5 1 1;102 C6 1 1;104 D6 1 1;108 E6 1 1;112 C6 1 1;116 A5 1 1;120 A5 1 1;72 A5 1 6;80 E5 1 6;68 D5 1 7;70 F5 1 7;76 G5 1 7;84 E5 1 7;78 F5 1 7;86 C5 1 7;88 E5 1 6;96 B4 1 6;104 D5 1 6;112 C5 1 6;120 A4 1 6;92 D5 1 7;94 C5 1 7;100 B4 1 7;101 B4 1 7;102 C5 1 7;108 E5 1 7;116 A4 1 7'
# Return Home - Me
#song = '0 D5 1 25;6 F#5 1 25;8 A#5 1 25;12 B5 1 25;16 C#5 1 25;19 F#5 1 25;26 C#5 1 25;28 F#5 1 25;30 G5 1 25;20 A5 1 25;32 B4 1 25;38 E5 1 25;40 F#5 1 25;44 G5 1 25;28 F#5 1 25;40 F#5 1 25;48 A#4 1 25;51 D5 1 25;52 F#5 1 25;6 F#5 1 25;16 C#5 1 25;48 A#4 1 25;0 D4 1 14;2 D4 1 14;4 D4 1 14;6 D4 1 14;8 D4 1 14;10 D4 1 14;12 D4 1 14;14 D4 1 14;16 C#4 1 14;18 C#4 1 14;20 C#4 1 14;22 C#4 1 14;24 C#4 1 14;26 C#4 1 14;28 C#4 1 14;30 C#4 1 14;32 B3 1 14;34 B3 1 14;36 B3 1 14;38 B3 1 14;40 B3 1 14;42 B3 1 14;44 B3 1 14;46 B3 1 14;48 D4 1 14;50 D4 1 14;52 D4 1 14;54 D4 1 14;56 G4 1 14;58 G4 1 14;60 G4 1 14;62 F#4 1 14;56 A#4 1 25;58 E5 1 25;60 G5 1 25;62 F#5 1 25;64 D5 1 25;70 F#5 1 25;72 A#5 1 25;76 B5 1 25;80 C#5 1 25;83 F#5 1 25;90 C#5 1 25;92 F#5 1 25;94 G5 1 25;84 A5 1 25;96 B4 1 25;102 E5 1 25;104 F#5 1 25;108 G5 1 25;92 F#5 1 25;104 F#5 1 25;112 A#4 1 25;115 D5 1 25;116 F#5 1 25;70 F#5 1 25;80 C#5 1 25;112 A#4 1 25;64 D4 1 14;66 D4 1 14;68 D4 1 14;70 D4 1 14;72 D4 1 14;74 D4 1 14;76 D4 1 14;78 D4 1 14;80 C#4 1 14;82 C#4 1 14;84 C#4 1 14;86 C#4 1 14;88 C#4 1 14;90 C#4 1 14;92 C#4 1 14;94 C#4 1 14;96 B3 1 14;98 B3 1 14;100 B3 1 14;102 B3 1 14;104 B3 1 14;106 B3 1 14;108 B3 1 14;110 B3 1 14;112 D4 1 14;114 D4 1 14;116 D4 1 14;118 D4 1 14;120 G4 1 14;122 G4 1 14;124 G4 1 14;126 F#4 1 14;120 A#4 1 25;122 E5 1 25;124 G5 1 25;126 F#5 1 25;64 D6 10 23;72 E6 4 23;76 F#6 4 23;80 G6 2 23;82 F#6 4 23;86 E6 4 23;90 D6 3 23;94 C#6 1 23;96 B5 6 23;102 C#6 5 23;107 E6 3 23;110 F#6 1 23;112 G6 1 23;113 F#6 1 23;114 E6 1 23;115 C#6 1 23;124 G6 1 23;125 F#6 1 23;126 E6 1 23;127 B5 1 23;116 G6 1 23;117 F#6 1 23;118 E6 1 23;119 C#6 1 23;120 G6 1 23;121 F#6 1 23;122 E6 1 23;123 C#6 1 23;128 D5 1 25;134 F#5 1 25;136 A#5 1 25;140 B5 1 25;144 C#5 1 25;147 F#5 1 25;154 C#5 1 25;156 F#5 1 25;158 G5 1 25;148 A5 1 25;160 B4 1 25;166 E5 1 25;168 F#5 1 25;172 G5 1 25;156 F#5 1 25;168 F#5 1 25;176 A#4 1 25;179 D5 1 25;180 F#5 1 25;134 F#5 1 25;144 C#5 1 25;176 A#4 1 25;128 D4 1 14;130 D4 1 14;132 D4 1 14;134 D4 1 14;136 D4 1 14;138 D4 1 14;140 D4 1 14;142 D4 1 14;144 C#4 1 14;146 C#4 1 14;148 C#4 1 14;150 C#4 1 14;152 C#4 1 14;154 C#4 1 14;156 C#4 1 14;158 C#4 1 14;160 B3 1 14;162 B3 1 14;164 B3 1 14;166 B3 1 14;168 B3 1 14;170 B3 1 14;172 B3 1 14;174 B3 1 14;176 D4 1 14;178 D4 1 14;180 D4 1 14;182 D4 1 14;184 G4 1 14;186 G4 1 14;188 G4 1 14;190 F#4 1 14;184 A#4 1 25;186 E5 1 25;188 G5 1 25;190 F#5 1 25;128 D6 10 23;136 E6 4 23;140 F#6 4 23;144 G6 2 23;146 F#6 4 23;150 A6 4 23;154 F#6 4 23;158 C#6 1 23;160 B5 6 23;166 G6 5 23;171 F#6 3 23;174 E6 1 23;176 G6 1 23;177 F#6 1 23;178 E6 1 23;179 C#6 1 23;188 G6 1 23;189 F#6 1 23;190 E6 1 23;191 B5 1 23;180 G6 1 23;181 F#6 1 23;182 E6 1 23;183 C#6 1 23;184 G6 1 23;185 F#6 1 23;186 E6 1 23;187 C#6 1 23;0 D5 12 15'
# https://onlinesequencer.net/1210132
#song = '4 C#5 1 0;8 D#5 1 0;12 C5 1 0;16 C#5 1 0;18 C5 1 0;20 A#4 1 0;22 C5 1 0;24 G#4 1 0;30 G#4 1 0;31 A4 1 0;32 A#4 1 0;36 C#5 1 0;40 A#4 1 0;42 A#5 1 0;44 G#5 1 0;46 F#5 1 0;48 F5 1 0;50 F#5 1 0;52 G#5 1 0;54 F5 1 0;56 D#5 1 0;0 F5 1 0;68 C#5 1 0;72 D#5 1 0;76 C5 1 0;80 C#5 1 0;82 C5 1 0;84 A#4 1 0;86 C5 1 0;88 G#4 1 0;94 G#4 1 0;95 A4 1 0;96 A#4 1 0;100 C#5 1 0;104 A#4 1 0;106 A#5 1 0;108 G#5 1 0;110 F#5 1 0;64 F5 1 0;112 G#5 1 0;114 A#5 1 0;116 C6 1 0;118 C#6 1 0;120 D#6 1 0;128 C#6 1 0;134 F6 1 0;140 C#6 1 0;144 C6 1 0;150 D#6 1 0;156 C6 1 0;158 G#5 1 0;159 A5 1 0;160 A#5 1 0;166 C#6 1 0;172 A#5 1 0;176 C6 1 0;180 D#6 1 0;182 G#5 1 0;184 A#5 1 0;186 B5 1 0;188 C6 1 0;192 C#6 1 0;198 F6 1 0;204 C#6 1 0;208 C6 1 0;214 D#6 1 0;220 C6 1 0;222 G#5 1 0;223 A5 1 0;224 A#5 1 0;230 C#6 1 0;236 A#5 1 0;240 C6 1 0;244 D#6 1 0;246 F6 1 0;248 D#6 1 0;250 C#6 1 0;252 C6 1 0;254 G#5 1 0'
# https://onlinesequencer.net/1327293
#song = '2 A5 1 4;1 B4 1 4;3 A5 1 4;4 G5 1 4;5 G5 1 4;10 F#5 1 4;7 F#5 1 4;8 A4 1 4;9 A4 1 4;11 A4 1 4;13 C#6 1 4;14 B5 1 4;12 C#6 1 4;6 F#5 1 4;15 B4 2 4;18 A5 1 4;17 B4 1 4;19 A5 1 4;20 G5 1 4;21 G5 1 4;26 F#5 1 4;23 F#5 1 4;24 A4 1 4;25 A4 1 4;27 A4 1 4;29 C#6 1 4;30 B5 1 4;28 C#6 1 4;22 F#5 1 4;31 B4 2 4;34 A5 1 4;33 B4 1 4;35 A5 1 4;36 G5 1 4;37 G5 1 4;42 F#5 1 4;39 F#5 1 4;40 A4 1 4;41 A4 1 4;43 A4 1 4;45 C#6 1 4;46 B5 1 4;44 C#6 1 4;38 F#5 1 4;47 B4 2 4;50 A5 1 4;49 B4 1 4;51 A5 1 4;52 G5 1 4;53 G5 1 4;58 F#5 1 4;55 F#5 1 4;56 A4 1 4;57 A4 1 4;59 A4 1 4;61 C#6 1 4;62 B5 1 4;60 C#6 1 4;54 F#5 1 4;63 B4 2 4;98 A5 1 4;97 B4 1 4;99 A5 1 4;100 G5 1 4;101 G5 1 4;106 F#5 1 4;103 F#5 1 4;104 A4 1 4;105 A4 1 4;107 A4 1 4;109 C#6 1 4;110 B5 1 4;108 C#6 1 4;102 F#5 1 4;111 B4 2 4;114 A5 1 4;113 B4 1 4;115 A5 1 4;116 G5 1 4;117 G5 1 4;122 F#5 1 4;119 F#5 1 4;120 A4 1 4;121 A4 1 4;123 A4 1 4;125 C#6 1 4;126 B5 1 4;124 C#6 1 4;118 F#5 1 4;127 B4 2 4;66 A5 1 4;65 B4 1 4;67 A5 1 4;68 G5 1 4;69 G5 1 4;74 F#5 1 4;71 F#5 1 4;72 A4 1 4;73 A4 1 4;75 A4 1 4;77 C#6 1 4;78 B5 1 4;76 C#6 1 4;70 F#5 1 4;79 B4 2 4;82 A5 1 4;81 B4 1 4;83 A5 1 4;84 G5 1 4;85 G5 1 4;90 F#5 1 4;87 F#5 1 4;88 A4 1 4;89 A4 1 4;91 A4 1 4;93 C#6 1 4;94 B5 1 4;92 C#6 1 4;86 F#5 1 4;95 B4 2 4;129 B4 1 4;130 B4 1 4;131 B4 1 4;132 B4 1 4;135 B4 1 4;136 B4 1 4;137 B4 1 4;138 B4 1 4;145 D5 1 4;146 D5 1 4;147 D5 1 4;148 D5 1 4;151 E5 1 4;152 E5 1 4;153 E5 1 4;154 E5 1 4;161 B4 1 4;162 B4 1 4;163 B4 1 4;164 B4 1 4;167 B4 1 4;168 B4 1 4;169 B4 1 4;170 B4 1 4;177 D5 1 4;178 D5 1 4;179 D5 1 4;180 D5 1 4;183 E5 1 4;184 E5 1 4;185 E5 1 4;186 E5 1 4;193 B4 1 4;194 B4 1 4;195 B4 1 4;196 B4 1 4;199 B4 1 4;200 B4 1 4;201 B4 1 4;202 B4 1 4;209 D5 1 4;210 D5 1 4;211 D5 1 4;212 D5 1 4;215 E5 1 4;216 E5 1 4;217 E5 1 4;218 E5 1 4;194 D5 1 4;196 F#5 1 4;197 D5 1 4;198 B4 1 4;199 D5 1 4;200 A5 2 4;202 F#5 2 4;204 D5 1 4;205 E5 1 4;206 D5 1 4;207 D5 2 4;0 B4 1 4;209 B4 1 4;211 B4 1 4;212 F#5 1 4;213 D5 1 4;214 B4 1 4;215 D5 1 4;216 G5 2 4;218 F#5 2 4;220 A5 1 4;221 C#6 1 4;222 A5 1 4;223 F#5 2 4;225 B4 1 4;226 B4 1 4;227 B4 1 4;228 B4 1 4;231 B4 1 4;232 B4 1 4;233 B4 1 4;234 B4 1 4;241 D5 1 4;242 D5 1 4;243 D5 1 4;244 D5 1 4;226 D5 1 4;228 F#5 1 4;229 D5 1 4;230 B4 1 4;231 D5 1 4;232 A5 2 4;234 F#5 2 4;236 D5 1 4;237 E5 1 4;238 D5 1 4;239 D5 2 4;241 B4 1 4;243 B4 1 4;244 F#5 1 4;245 D5 1 4;246 B4 1 4;247 D5 1 4;248 G5 2 4;250 F#5 2 4;252 D5 2 4;254 E5 2 4;247 E5 1 4;248 E5 1 4;249 E5 1 4;250 E5 1 4;65 B2 1 4;66 B2 1 4;67 B2 1 4;68 B2 1 4;71 B2 1 4;72 B2 1 4;73 B2 1 4;74 B2 1 4;81 D3 1 4;82 D3 1 4;83 D3 1 4;84 D3 1 4;87 E3 1 4;88 E3 1 4;89 E3 1 4;90 E3 1 4;97 B2 1 4;98 B2 1 4;99 B2 1 4;100 B2 1 4;103 B2 1 4;104 B2 1 4;105 B2 1 4;106 B2 1 4;113 D3 1 4;114 D3 1 4;115 D3 1 4;116 D3 1 4;119 E3 1 4;120 E3 1 4;121 E3 1 4;122 E3 1 4;129 B2 1 4;130 B2 1 4;131 B2 1 4;132 B2 1 4;135 B2 1 4;136 B2 1 4;137 B2 1 4;138 B2 1 4;145 D3 1 4;146 D3 1 4;147 D3 1 4;148 D3 1 4;151 E3 1 4;152 E3 1 4;153 E3 1 4;154 E3 1 4;161 B2 1 4;162 B2 1 4;163 B2 1 4;164 B2 1 4;167 B2 1 4;168 B2 1 4;169 B2 1 4;170 B2 1 4;177 D3 1 4;178 D3 1 4;179 D3 1 4;180 D3 1 4;183 E3 1 4;184 E3 1 4;185 E3 1 4;186 E3 1 4;193 B2 1 4;194 B2 1 4;195 B2 1 4;196 B2 1 4;199 B2 1 4;200 B2 1 4;201 B2 1 4;202 B2 1 4;209 D3 1 4;210 D3 1 4;211 D3 1 4;212 D3 1 4;215 E3 1 4;216 E3 1 4;217 E3 1 4;218 E3 1 4;225 B2 1 4;226 B2 1 4;227 B2 1 4;228 B2 1 4;231 B2 1 4;232 B2 1 4;233 B2 1 4;241 D3 1 4;242 D3 1 4;243 D3 1 4;244 D3 1 4;247 E3 1 4;248 E3 1 4;249 E3 1 4;250 E3 1 4;256 E4 1 4;257 E4 1 4;260 E4 1 4;261 E4 1 4;264 D4 1 4;265 D4 1 4;268 D4 1 4;269 D4 1 4;272 E4 1 4;273 E4 1 4;276 E4 1 4;277 E4 1 4;262 F#5 1 4;266 D5 1 4;270 G5 1 4;274 E5 1 4;278 F#5 1 4;140 F#5 1 4;141 F#5 1 4;143 B4 1 4;156 G5 1 4;158 F#5 1 4;171 B4 1 4;172 F#5 1 4;173 B4 1 4;174 D5 1 4;187 B4 1 4;188 E5 1 4;190 B4 1 4;258 E5 1 4;280 B4 1 4;282 B4 1 4;288 E4 1 4;289 E4 1 4;292 D4 1 4;293 D4 1 4;296 D4 1 4;297 D4 1 4;300 E4 1 4;301 E4 1 4;304 E4 1 4;305 E4 1 4;290 F#5 1 4;294 D5 1 4;298 G5 1 4;302 E5 1 4;306 F#5 1 4;286 E5 1 4;308 B4 1 4;310 B4 1 4;256 F#2 1 4;257 F#2 1 4;258 F#2 1 4;259 F#2 1 4;260 F#2 1 4;261 F#2 1 4;262 F#2 1 4;263 F#2 1 4;264 B2 1 4;265 B2 1 4;266 B2 1 4;267 B2 1 4;268 B2 1 4;269 B2 1 4;270 B2 1 4;271 B2 1 4;272 F#2 1 4;273 F#2 1 4;274 F#2 1 4;275 F#2 1 4;276 F#2 1 4;277 F#2 1 4;278 F#2 1 4;279 F#2 1 4;280 B2 2 4;282 B2 2 4;286 F#2 1 4;287 F#2 1 4;288 F#2 1 4;289 F#2 1 4;290 F#2 1 4;291 F#2 1 4;292 B2 1 4;293 B2 1 4;294 B2 1 4;295 B2 1 4;296 B2 1 4;297 B2 1 4;298 B2 1 4;299 B2 1 4;300 D3 1 4;301 D3 1 4;302 D3 1 4;303 D3 1 4;304 D3 1 4;305 D3 1 4;306 D3 1 4;307 D3 1 4;308 D3 1 4;309 D3 1 4;310 D3 1 4;311 D3 1 4;312 D3 1 4;313 D3 1 4;314 D3 1 4;315 D3 1 4;300 G5 1 4;302 F#5 1 4;304 D5 1 4;300 G5 1 4;306 A4 1 4;308 G5 1 4;310 F#5 1 4;312 C#5 1 4;313 D5 1 4;314 E5 2 4;310 F#5 1 4;316 F#5 1 4;316 E4 1 4;317 E4 1 4;320 E4 1 4;321 E4 1 4;324 D4 1 4;325 D4 1 4;328 D4 1 4;329 D4 1 4;332 E4 1 4;333 E4 1 4;336 E4 1 4;337 E4 1 4;322 F#5 1 4;326 D5 1 4;330 G5 1 4;334 E5 1 4;338 F#5 1 4;318 E5 1 4;340 B4 1 4;342 B4 1 4;348 E4 1 4;349 E4 1 4;352 D4 1 4;353 D4 1 4;356 D4 1 4;357 D4 1 4;360 E4 1 4;361 E4 1 4;364 E4 1 4;365 E4 1 4;350 F#5 1 4;354 D5 1 4;358 G5 1 4;362 E5 1 4;346 E5 1 4;368 B4 1 4;370 B4 1 4;316 F#2 1 4;317 F#2 1 4;318 F#2 1 4;319 F#2 1 4;320 F#2 1 4;321 F#2 1 4;322 F#2 1 4;323 F#2 1 4;324 B2 1 4;325 B2 1 4;326 B2 1 4;327 B2 1 4;328 B2 1 4;329 B2 1 4;330 B2 1 4;331 B2 1 4;332 F#2 1 4;333 F#2 1 4;334 F#2 1 4;335 F#2 1 4;336 F#2 1 4;337 F#2 1 4;338 F#2 1 4;339 F#2 1 4;340 B2 2 4;342 B2 2 4;346 F#2 1 4;347 F#2 1 4;348 F#2 1 4;349 F#2 1 4;350 F#2 1 4;351 F#2 1 4;352 B2 1 4;353 B2 1 4;354 B2 1 4;355 B2 1 4;356 B2 1 4;357 B2 1 4;358 B2 1 4;359 B2 1 4;360 D3 1 4;361 D3 1 4;362 D3 1 4;363 D3 1 4;364 D3 1 4;365 D3 1 4;366 D3 1 4;367 D3 1 4;368 D3 1 4;369 D3 1 4;370 D3 1 4;371 D3 1 4;372 D3 1 4;373 D3 1 4;374 D3 1 4;375 D3 1 4;360 G5 1 4;362 F#5 1 4;364 D5 1 4;360 G5 1 4;366 A4 1 4;368 G5 1 4;372 C#5 1 4;373 D5 1 4;374 E5 2 4;376 F#5 1 4;361 G5 1 4;363 F#5 1 4;369 G5 1 4;366 F#5 1 4;371 B4 1 4;372 G5 1 4;373 G5 1 4;374 G5 1 4;375 G5 1 4;376 G5 1 4;377 G5 1 4;378 F#5 6 4;448 G5 1 4;449 F#5 5 4;456 G5 1 4;457 F#5 1 4;459 E5 1 4;460 D5 1 4;466 A4 1 4;467 A4 1 4;468 G5 1 4;469 G5 1 4;470 G5 1 4;471 G5 1 4;472 G5 1 4;473 G5 1 4;474 F#5 1 4;480 A5 1 4;482 G5 1 4;484 F#5 1 4;486 E5 1 4;489 F#5 1 4;490 D5 1 4;376 B3 1 4;377 B3 1 4;378 B3 1 4;379 B3 1 4;380 B3 1 4;381 B3 1 4;382 B3 1 4;383 B3 1 4;384 A3 1 4;385 A3 1 4;386 A3 1 4;387 A3 1 4;388 A3 1 4;389 A3 1 4;390 A3 1 4;391 A3 1 4;392 G3 1 4;393 G3 1 4;394 G3 1 4;395 G3 1 4;396 G3 1 4;397 G3 1 4;398 G3 1 4;399 G3 1 4;400 D3 1 4;401 D3 1 4;402 C#3 1 4;403 C#3 1 4;404 G3 1 4;405 G3 1 4;406 F#3 1 4;407 F#3 1 4;408 B3 1 4;409 B3 1 4;410 B3 1 4;411 B3 1 4;412 B3 1 4;413 B3 1 4;414 B3 1 4;415 B3 1 4;416 A3 1 4;417 A3 1 4;418 A3 1 4;419 A3 1 4;420 A3 1 4;421 A3 1 4;422 A3 1 4;423 A3 1 4;424 G3 1 4;425 G3 1 4;426 G3 1 4;427 G3 1 4;428 G3 1 4;429 G3 1 4;430 G3 1 4;431 G3 1 4;432 D3 1 4;433 D3 1 4;434 C#3 1 4;435 C#3 1 4;436 G3 1 4;437 G3 1 4;438 F#3 1 4;439 F#3 1 4;440 B3 1 4;441 B3 1 4;442 B3 1 4;443 B3 1 4;444 B3 1 4;445 B3 1 4;446 B3 1 4;447 B3 1 4;448 A3 1 4;449 A3 1 4;450 A3 1 4;451 A3 1 4;452 A3 1 4;453 A3 1 4;454 A3 1 4;455 A3 1 4;456 G3 1 4;457 G3 1 4;458 G3 1 4;459 G3 1 4;460 G3 1 4;461 G3 1 4;462 G3 1 4;463 G3 1 4;464 D3 1 4;465 D3 1 4;466 C#3 1 4;467 C#3 1 4;468 G3 1 4;469 G3 1 4;470 F#3 1 4;471 F#3 1 4;472 B3 1 4;473 B3 1 4;474 B3 1 4;475 B3 1 4;476 B3 1 4;477 B3 1 4;478 B3 1 4;479 B3 1 4;480 A3 1 4;481 A3 1 4;482 A3 1 4;483 A3 1 4;484 A3 1 4;485 A3 1 4;486 A3 1 4;487 A3 1 4;488 G3 1 4;489 G3 1 4;490 G3 1 4;491 G3 1 4;492 G3 1 4;493 G3 1 4;494 G3 1 4;495 G3 1 4;496 D3 1 4;497 D3 1 4;498 C#3 1 4;499 C#3 1 4;500 G3 1 4;501 G3 1 4;502 F#3 1 4;503 F#3 1 4;448 C#4 1 4;449 C#4 1 4;450 C#4 1 4;451 C#4 1 4;452 C#4 1 4;453 C#4 1 4;454 C#4 1 4;455 C#4 1 4;456 D4 1 4;457 D4 1 4;458 D4 1 4;459 D4 1 4;460 D4 1 4;461 D4 1 4;462 D4 1 4;463 D4 1 4;480 C#4 1 4;481 C#4 1 4;482 C#4 1 4;483 C#4 1 4;484 C#4 1 4;485 C#4 1 4;486 C#4 1 4;487 C#4 1 4;488 D4 1 4;489 D4 1 4;490 D4 1 4;491 D4 1 4;492 D4 1 4;493 D4 1 4;494 D4 1 4;495 D4 1 4;234 B2 1 4;522 E3 1 4;378 E5 1 4;382 F#5 1 4;382 E5 1 4;386 F#5 1 4;386 E5 1 4;390 C5 1 4;390 D5 1 4'
# https://onlinesequencer.net/1536953
#song = '0 B5 1 0;4 A5 1 0;14 E5 1 0;10 G#5 1 0;18 C#5 1 0;26 F#5 1 0;28 G#5 1 0;30 A5 1 0;32 B5 1 0;36 A5 1 0;0 E5 1 0;0 F#5 1 0;0 C#5 1 0;4 E5 1 0;4 F#5 1 0;4 C#5 1 0;0 F#3 1 0;14 F#3 1 0;18 F#4 1 0;20 A4 1 0;22 C#5 1 0;24 F#4 1 0;26 A4 1 0;28 C#5 1 0;30 A4 1 0;32 F#5 1 0;36 F#5 1 0;32 D5 1 0;36 D5 1 0;42 G#5 1 0;50 C#6 1 0;46 E5 1 0;32 D3 1 0;46 D3 1 0;56 B5 1 0;60 C#6 1 0;56 B6 1 0;60 C#7 1 0;60 C#7 1 0;50 D4 1 0;52 F#4 1 0;54 C#5 1 0;56 D4 1 0;58 F#4 1 0;60 C#5 1 0;62 D4 1 0;64 B6 1 0;68 A6 1 0;64 B6 1 0;68 A6 1 0;64 C#6 1 0;68 C#6 1 0;64 E6 1 0;64 F#6 1 0;68 E6 1 0;68 F#6 1 0;64 F#3 1 0;64 F#2 1 0;68 F#4 1 0;68 A4 1 0;68 C#5 1 0;68 E5 1 0;74 G#6 1 0;78 E6 1 0;82 C#6 1 0;74 G#6 1 0;78 E6 1 0;82 C#6 1 0;72 F#4 1 0;74 A4 1 0;74 C#5 1 0;74 E5 1 0;78 F#3 1 0;78 F#2 1 0;86 F#4 1 0;86 A4 1 0;86 C#5 1 0;86 F#5 1 0;92 E4 1 0;92 A4 1 0;92 C#5 1 0;90 F#6 1 0;92 G#6 1 0;94 A6 1 0;96 B6 1 0;100 A6 1 0;90 F#6 1 0;92 G#6 1 0;94 A6 1 0;96 B6 1 0;100 A6 1 0;96 D3 1 0;96 D2 1 0;100 F#4 1 0;100 A4 1 0;100 C#5 1 0;100 F#5 1 0;96 B5 1 0;96 D6 1 0;96 F#6 1 0;100 D6 1 0;100 F#6 1 0;100 A5 1 0;106 G#6 1 0;110 E6 1 0;114 C#7 1 0;106 G#5 1 0;110 E5 1 0;114 C#6 1 0;106 G#6 1 0;104 C#5 1 0;106 A4 1 0;110 D3 1 0;110 D2 1 0;116 D4 1 0;116 F#4 1 0;116 A4 1 0;116 C#5 1 0;134 G#5 1 0;132 A5 1 0;140 B5 1 0;142 A5 1 0;138 G#5 1 0;128 F#4 1 0;128 G#4 1 0;128 A4 1 0;128 C#5 1 0;140 F#4 1 0;142 A4 1 0;148 A5 1 0;150 G#5 1 0;156 B5 1 0;154 A5 1 0;144 F#4 1 0;144 A4 1 0;144 C#5 1 0;144 E5 1 0;154 E5 1 0;156 C#5 1 0;158 A4 1 0;160 F#4 1 0;160 D4 1 0;160 A4 1 0;160 C#5 1 0;164 A5 1 0;166 G#5 1 0;170 G#5 1 0;172 B5 1 0;174 A5 1 0;176 D4 1 0;176 E4 1 0;176 F#4 1 0;176 A4 1 0;178 B5 1 0;182 C#6 1 0;188 C#4 1 0;192 B3 1 0;192 D4 1 0;192 F#4 1 0;192 A4 1 0;196 A5 1 0;198 G#5 1 0;202 G#5 1 0;204 B5 1 0;206 A5 1 0;202 B3 1 0;204 D4 1 0;206 F#4 1 0;208 E4 1 0;208 C#4 1 0;208 G#4 1 0;208 B4 1 0;210 B5 1 0;214 C#6 1 0;218 E5 1 0;222 F#5 1 0;218 B4 1 0;220 G#4 1 0;222 E4 1 0;224 D4 1 0;224 F#4 1 0;224 A4 1 0;224 C#5 1 0;228 D4 1 0;230 F#4 1 0;232 A4 1 0;234 C#5 1 0;236 D5 1 0;238 F#5 1 0;240 G#5 1 0;244 A5 1 0;246 G#5 1 0;240 E4 1 0;240 F#4 1 0;240 G#4 1 0;240 B4 1 0;246 E5 1 0;252 B4 1 0;256 F#4 1 0;256 G#4 1 0;256 A4 1 0;256 C#5 1 0;260 F#4 1 0;262 F#3 1 0;262 F#3 1 0;260 A5 1 0;262 G#5 1 0;262 G#5 1 0;268 B5 1 0;266 G#5 1 0;270 A5 1 0;266 A3 1 0;268 C#4 1 0;270 F#4 1 0;268 B5 1 0;272 A4 1 0;272 C#5 1 0;272 E5 1 0;276 A5 1 0;278 G#5 1 0;282 A5 1 0;284 B5 1 0;276 F#4 1 0;276 F#3 1 0;282 E4 1 0;282 E3 1 0;288 D4 1 0;288 D3 1 0;292 A5 1 0;294 G#5 1 0;298 G#5 1 0;300 B5 1 0;302 A5 1 0;292 D4 1 0;292 F#4 1 0;292 A4 1 0;292 C#5 1 0;298 D4 1 0;300 F#4 1 0;300 A4 1 0;300 C#5 1 0;302 D4 1 0;306 B5 1 0;310 C#6 1 0;314 E5 1 0;304 F#4 1 0;304 C#5 1 0;308 D4 1 0;308 E4 1 0;308 F#4 1 0;308 A4 1 0;312 D4 1 0;314 C#4 1 0;312 D3 1 0;314 C#3 1 0;320 B3 1 0;320 B2 1 0;320 F#5 1 0;326 F#5 1 0;332 B5 1 0;334 G#5 1 0;326 B3 1 0;326 D4 1 0;326 F#4 1 0;326 A4 1 0;332 C#4 1 0;332 C#3 1 0;338 C#4 1 0;338 E4 1 0;338 G#4 1 0;338 B4 1 0;344 D4 1 0;344 D3 1 0;348 B3 1 0;348 D4 1 0;348 F#4 1 0;348 A4 1 0;348 F#5 1 0;344 D5 1 0;344 A4 1 0;344 F4 1 0;352 C#4 1 0;352 C#3 1 0;352 G#5 1 0;358 G#5 1 0;362 G#5 1 0;364 A5 1 0;366 B5 1 0;358 C#4 1 0;358 E4 1 0;358 G#4 1 0;358 B4 1 0;364 D4 1 0;364 D3 1 0;370 C6 1 0;374 D6 1 0;376 E6 1 0;380 B5 1 0;384 C#6 1 0;370 D4 1 0;370 F4 1 0;370 A4 1 0;370 C5 1 0;376 E4 1 0;376 E3 1 0;384 C#7 1 0;384 F#2 1 0;384 F#3 1 0;388 A4 1 0;388 C#5 1 0;388 E5 1 0;390 F#4 1 0;392 G#4 1 0;392 A4 1 0;392 C#5 1 0;394 F#4 1 0;394 G#4 1 0;394 A4 1 0;394 C#5 1 0;398 F#3 1 0;400 F#3 1 0;400 F#2 1 0;404 C#5 1 0;406 C#5 1 0;404 A4 1 0;404 G#4 1 0;405 F#4 1 0;406 A4 1 0;406 G#4 1 0;407 F#4 1 0;408 F#3 1 0;408 F#2 1 0;410 C#6 1 0;412 E6 1 0;414 C#6 1 0;410 C#5 1 0;412 E5 1 0;414 C#5 1 0;410 C#6 1 0;412 E6 1 0;414 C#6 1 0;416 G#6 1 0;418 F#6 1 0;416 G#5 1 0;418 F#5 1 0;416 G#6 1 0;418 F#6 1 0;420 F#6 1 0;422 E6 1 0;426 F#6 1 0;430 G#6 1 0;434 A6 1 0;440 A6 1 0;442 G#6 1 0;444 B5 1 0;446 B5 1 0;440 E6 1 0;440 E6 1 0;416 F#3 1 0;416 F#2 1 0;416 F#2 1 0;420 F#4 1 0;422 A4 1 0;422 C#5 1 0;422 E5 1 0;426 A4 1 0;426 C#5 1 0;426 E5 1 0;428 F#4 1 0;430 F#3 1 0;430 F#2 1 0;430 F#2 1 0;434 F#4 1 0;436 A4 1 0;436 C#5 1 0;436 E5 1 0;440 A4 1 0;440 C#5 1 0;440 E5 1 0;442 F#4 1 0;444 G#4 1 0;444 A4 1 0;444 C#5 1 0;450 C#6 1 0;452 C#6 1 0;452 F#6 1 0;454 E6 1 0;458 F#6 1 0;462 G#6 1 0;466 A6 1 0;470 B6 1 0;472 C#7 1 0;474 A6 1 0;476 B6 1 0;478 F#6 1 0;448 F#3 1 0;448 F#2 1 0;448 F#2 1 0;452 F#4 1 0;454 A4 1 0;454 C#5 1 0;454 E5 1 0;458 E5 1 0;458 C#5 1 0;458 A4 1 0;460 F#4 1 0;462 F#3 1 0;462 F#2 1 0;462 F#2 1 0;466 F#4 1 0;468 A4 1 0;468 C#5 1 0;468 E5 1 0;472 A4 1 0;472 C#5 1 0;472 E5 1 0;476 F#4 1 0;476 G#4 1 0;476 A4 1 0;476 C#5 1 0;474 C#6 1 0;476 E6 1 0;478 C#6 1 0;480 B6 1 0;482 A6 1 0;480 B5 1 0;482 A5 1 0;480 B6 1 0;482 A6 1 0;484 F#6 1 0;486 E6 1 0;490 F#6 1 0;494 G#6 1 0;498 A6 1 0;506 F#6 1 0;508 B6 1 0;510 A6 1 0;508 B5 1 0;510 A5 1 0;506 F#5 1 0;506 F#6 1 0;508 B6 1 0;510 A6 1 0;500 A4 1 0;500 C#5 1 0;504 A4 1 0;504 C#5 1 0;480 D3 1 0;480 D2 1 0;480 D2 1 0;494 D3 1 0;494 D2 1 0;494 D2 1 0;484 D4 1 0;486 F#4 1 0;486 A4 1 0;486 C#5 1 0;490 F#4 1 0;490 A4 1 0;490 C#5 1 0;492 D4 1 0;498 D4 1 0;500 F#4 1 0;504 F#4 1 0;506 D4 1 0;508 E4 1 0;508 F#4 1 0;508 A4 1 0;512 D3 1 0;512 D2 1 0;512 D2 1 0;516 D4 1 0;518 F#4 1 0;518 A4 1 0;518 C#5 1 0;522 F#4 1 0;522 A4 1 0;522 C#5 1 0;524 D4 1 0;526 D3 1 0;526 D2 1 0;526 D2 1 0;530 D4 1 0;532 F#4 1 0;532 A4 1 0;532 C#5 1 0;536 F#4 1 0;536 A4 1 0;536 C#5 1 0;540 D4 1 0;540 E4 1 0;540 F#4 1 0;540 A4 1 0;516 F#6 1 0;518 E6 1 0;522 F#6 1 0;526 G#6 1 0;530 A6 1 0;534 B6 1 0;536 C#7 1 0;538 A6 1 0;540 B6 1 0;542 F#6 1 0;538 C#6 1 0;540 E6 1 0;542 C#6 1 0;544 G#6 1 0;546 F#6 1 0;544 G#6 1 0;546 F#6 1 0;544 G#5 1 0;546 F#5 1 0;548 F#6 1 0;550 E6 1 0;554 F#6 1 0;558 G#6 1 0;562 A6 1 0;568 A6 1 0;570 G#6 1 0;572 B5 1 0;574 B5 1 0;568 E6 1 0;568 E6 1 0;544 F#3 1 0;544 F#2 1 0;544 F#2 1 0;548 F#4 1 0;550 A4 1 0;550 C#5 1 0;550 E5 1 0;554 A4 1 0;554 C#5 1 0;554 E5 1 0;556 F#4 1 0;558 F#3 1 0;558 F#2 1 0;558 F#2 1 0;562 F#4 1 0;564 A4 1 0;564 C#5 1 0;564 E5 1 0;568 A4 1 0;568 C#5 1 0;568 E5 1 0;570 F#4 1 0;572 G#4 1 0;572 A4 1 0;572 C#5 1 0;578 C#6 1 0;580 C#6 1 0;598 B6 1 0;600 C#7 1 0;602 A6 1 0;604 B6 1 0;606 F#6 1 0;580 F#6 1 0;582 E6 1 0;586 F#6 1 0;590 G#6 1 0;594 A6 1 0;576 F#3 1 0;576 F#2 1 0;576 F#2 1 0;580 F#4 1 0;582 A4 1 0;582 C#5 1 0;582 E5 1 0;586 E5 1 0;586 C#5 1 0;586 A4 1 0;588 F#4 1 0;590 F#3 1 0;590 F#2 1 0;590 F#2 1 0;580 C#6 1 0;602 C#6 1 0;604 E6 1 0;606 C#6 1 0;594 F#4 1 0;596 A4 1 0;596 C#5 1 0;596 E5 1 0;600 A4 1 0;600 C#5 1 0;600 E5 1 0;604 F#4 1 0;604 G#4 1 0;604 A4 1 0;604 C#5 1 0;608 B6 1 0;610 A6 1 0;608 B6 1 0;610 A6 1 0;608 B5 1 0;610 A5 1 0;608 D3 1 0;608 D2 1 0;608 D2 1 0;612 D4 1 0;614 F#4 1 0;614 A4 1 0;614 C#5 1 0;618 F#4 1 0;618 A4 1 0;618 C#5 1 0;620 D4 1 0;622 D3 1 0;622 D2 1 0;622 D2 1 0;612 F#6 1 0;614 E6 1 0;618 F#6 1 0;622 G#6 1 0;626 A6 1 0;634 F#5 1 0;636 B5 1 0;638 A5 1 0;634 F#6 1 0;636 B6 1 0;638 A6 1 0;634 F#6 1 0;636 B6 1 0;638 A6 1 0;626 D4 1 0;628 F#4 1 0;628 A4 1 0;628 C#5 1 0;632 F#4 1 0;632 A4 1 0;632 C#5 1 0;634 D4 1 0;636 E4 1 0;636 F#4 1 0;636 A4 1 0;640 D3 1 0;640 D2 1 0;640 D2 1 0;644 D4 1 0;646 F#4 1 0;646 A4 1 0;646 C#5 1 0;650 F#4 1 0;650 A4 1 0;650 C#5 1 0;652 D4 1 0;654 D3 1 0;654 D2 1 0;654 D2 1 0;644 F#6 1 0;646 E6 1 0;650 F#6 1 0;654 G#6 1 0;658 A6 1 0;662 B6 1 0;664 C#7 1 0;666 A6 1 0;668 B6 1 0;670 F#6 1 0;658 D4 1 0;660 F#4 1 0;660 A4 1 0;660 C#5 1 0;664 D4 1 0;664 F#4 1 0;664 A4 1 0;664 C#5 1 0;668 E3 1 0;668 E2 1 0;668 E2 1 0;680 F#3 1 0;672 A6 1 0;676 A6 1 0;680 A6 1 0;672 A6 1 0;676 A6 1 0;680 A6 1 0;672 A5 1 0;676 A5 1 0;680 A5 1 0;672 C#6 1 0;672 E6 1 0;676 C#6 1 0;676 E6 1 0;680 C#6 1 0;680 E6 1 0;672 F#3 1 0;676 F#3 1 0;672 F#2 1 0;674 F#2 1 0;676 F#2 1 0;678 F#2 1 0;680 F#2 1 0;672 F#2 1 0;676 F#2 1 0;680 F#2 1 0;686 A5 1 0;686 C#6 1 0;686 E6 1 0;686 A6 1 0;686 A6 1 0;690 A5 1 0;690 C#6 1 0;690 E6 1 0;690 A6 1 0;694 A5 1 0;694 C#6 1 0;694 E6 1 0;694 A6 1 0;696 G#5 1 0;700 B5 1 0;696 G#6 1 0;700 B6 1 0;696 G#6 1 0;700 B6 1 0;682 A2 1 0;682 A3 1 0;684 C#4 1 0;684 C#3 1 0;684 C#3 1 0;686 F#3 1 0;686 F#4 1 0;696 C#6 1 0;696 E6 1 0;700 C#6 1 0;700 E6 1 0;690 F#3 1 0;692 F#3 1 0;694 F#3 1 0;696 E3 1 0;698 E3 1 0;700 E3 1 0;702 E3 1 0;692 F#3 1 0;692 F#4 1 0;696 E4 1 0;696 E3 1 0;700 E3 1 0;700 E4 1 0;686 F#3 1 0;704 A5 1 0;704 A6 1 0;704 A6 1 0;712 A6 1 0;718 A6 1 0;718 A6 1 0;712 A6 1 0;708 A6 1 0;708 A6 1 0;708 A5 1 0;712 A5 1 0;718 A5 1 0;704 C#6 1 0;704 E6 1 0;708 C#6 1 0;708 E6 1 0;712 C#6 1 0;712 E6 1 0;718 C#6 1 0;718 E6 1 0;704 D4 1 0;708 D4 1 0;712 D4 1 0;704 D3 1 0;704 D3 1 0;706 D3 1 0;708 D3 1 0;708 D3 1 0;710 D3 1 0;712 D3 1 0;712 D3 1 0;714 A3 1 0;714 A2 1 0;716 F#3 1 0;716 F#2 1 0;716 F#2 1 0;718 D2 1 0;718 D2 1 0;718 D3 1 0;722 D2 1 0;724 D2 1 0;724 D2 1 0;726 D2 1 0;728 E2 1 0;728 E2 1 0;730 E2 1 0;732 E2 1 0;732 E2 1 0;734 E2 1 0;722 D3 1 0;726 D3 1 0;728 E3 1 0;732 E3 1 0;722 A6 1 0;726 A6 1 0;728 G#6 1 0;732 B6 1 0;722 A5 1 0;726 A5 1 0;728 G#5 1 0;732 B5 1 0;722 C#6 1 0;722 E6 1 0;726 C#6 1 0;726 E6 1 0;728 C#6 1 0;728 E6 1 0;732 C#6 1 0;732 E6 1 0;736 F#2 1 0;736 F#2 1 0;738 F#2 1 0;740 F#2 1 0;740 F#2 1 0;742 F#2 1 0;744 F#2 1 0;744 F#2 1 0;736 F#3 1 0;740 F#3 1 0;744 F#3 1 0;746 A2 1 0;748 C#3 1 0;746 A3 1 0;748 C#4 1 0;748 C#3 1 0;750 E2 1 0;750 E3 1 0;750 E2 1 0;736 A6 1 0;740 A6 1 0;744 A6 1 0;750 A6 1 0;750 A6 1 0;744 A6 1 0;740 A6 1 0;736 A6 1 0;736 A5 1 0;740 A5 1 0;744 A5 1 0;750 A5 1 0;736 C#6 1 0;736 E6 1 0;740 C#6 1 0;740 E6 1 0;744 C#6 1 0;744 E6 1 0;750 C#6 1 0;750 E6 1 0;754 A6 1 0;758 A6 1 0;760 G#6 1 0;764 B6 1 0;768 A6 1 0;774 F#6 1 0;780 C#7 1 0;754 A6 1 0;758 A6 1 0;760 G#6 1 0;764 B6 1 0;768 A6 1 0;774 F#6 1 0;780 C#7 1 0;754 A5 1 0;758 A5 1 0;760 G#5 1 0;764 B5 1 0;768 A5 1 0;774 F#5 1 0;780 C#6 1 0;754 C#6 1 0;754 E6 1 0;758 C#6 1 0;758 E6 1 0;760 C#6 1 0;760 E6 1 0;764 C#6 1 0;764 E6 1 0;754 E2 1 0;756 E2 1 0;758 E2 1 0;760 E2 1 0;762 E2 1 0;756 E3 1 0;760 E3 1 0;756 E2 1 0;760 E2 1 0;764 A3 1 0;766 C#4 1 0;764 A2 1 0;766 C#3 1 0;764 A2 1 0;768 D3 1 0;768 D4 1 0;768 D3 1 0;768 D4 1 0;774 D4 1 0;774 D3 1 0;774 D3 1 0;774 D4 1 0;780 D3 1 0;780 D2 1 0;780 D2 1 0;780 D3 1 0;768 C#6 1 0;768 E6 1 0;774 D6 1 0;774 B5 1 0;780 F#6 1 0;780 A6 1 0;784 C2 1 8'
# https://onlinesequencer.net/1087370 - Battle Music
#song = '0 F4 1 0;0 G#4 1 0;0 C4 1 0;0 F3 1 0;0 F2 1 0;12 A#4 1 0;18 G4 1 0;12 G4 1 0;18 D#4 1 0;24 A#4 1 0;30 C5 1 0;24 F4 1 0;24 C#4 1 0;12 F3 1 0;18 F3 1 0;12 F2 1 0;18 F2 1 0;24 F2 1 0;24 F3 1 0;36 F3 1 0;36 F2 1 0;36 D#5 1 0;36 D#4 1 0;36 G4 1 0;36 A#4 1 0;48 G#4 1 0;48 C5 1 0;48 F4 1 0;48 C4 1 0;48 F3 1 0;48 F2 1 0;60 F2 1 0;66 F2 1 0;60 F3 1 0;66 F3 1 0;60 G#4 1 0;60 F4 1 0;66 D#4 1 0;66 G4 1 0;72 A#4 1 0;72 F3 1 0;72 F2 1 0;72 C#4 1 0;72 F4 1 0;84 F2 1 0;84 F3 1 0;84 E4 1 0;84 E5 1 0;90 G5 1 0;90 G4 1 0;84 A#4 1 0;90 A#4 1 0;96 F5 1 0;96 F6 1 0;96 G#5 1 0;96 C6 1 0;96 F2 1 0;96 F3 1 0;98 C4 1 0;98 G#4 1 0;99 F4 1 0;100 G#4 1 0;102 G#4 1 0;101 F4 1 0;103 F4 1 0;100 C4 1 0;102 C4 1 0;104 G#4 1 0;106 G#4 1 0;105 F4 1 0;107 F4 1 0;104 C4 1 0;106 C4 1 0;108 A#4 1 0;110 A#4 1 0;112 A#4 1 0;114 A#4 1 0;116 A#4 1 0;118 A#4 1 0;109 F4 1 0;111 F4 1 0;113 F4 1 0;115 F4 1 0;117 F4 1 0;119 F4 1 0;108 C#4 1 0;110 C#4 1 0;112 C#4 1 0;114 C#4 1 0;116 C#4 1 0;118 C#4 1 0;114 E5 1 0;114 E6 1 0;114 E6 1 0;120 G6 1 0;120 G5 1 0;120 A#5 1 0;120 C#6 1 0;120 C#5 1 0;122 C#5 1 0;124 C#5 1 0;126 C#5 1 0;128 C#5 1 0;130 C#5 1 0;120 F4 1 0;122 F4 1 0;124 F4 1 0;126 F4 1 0;128 F4 1 0;130 F4 1 0;121 A#4 1 0;123 A#4 1 0;125 A#4 1 0;127 A#4 1 0;129 A#4 1 0;131 A#4 1 0;132 C5 1 0;134 C5 1 0;136 C5 1 0;138 C5 1 0;140 C5 1 0;142 C5 1 0;132 F4 1 0;134 F4 1 0;136 F4 1 0;138 F4 1 0;140 F4 1 0;142 F4 1 0;133 G#4 1 0;135 G#4 1 0;137 G#4 1 0;139 G#4 1 0;141 G#4 1 0;143 G#4 1 0;138 F5 1 0;138 F6 1 0;144 G#6 1 0;144 F3 1 0;145 C4 1 0;146 G#4 1 0;146 C4 1 0;147 F4 1 0;148 G#4 1 0;149 F4 1 0;148 C4 1 0;150 G#4 1 0;152 G#4 1 0;154 G#4 1 0;151 F4 1 0;153 F4 1 0;155 F4 1 0;150 C4 1 0;152 C4 1 0;154 C4 1 0;144 G#5 1 0;144 C6 1 0;144 F6 1 0;156 C#4 1 0;156 A#4 1 0;158 A#4 1 0;160 A#4 1 0;162 A#4 1 0;164 A#4 1 0;166 A#4 1 0;157 F4 1 0;159 F4 1 0;161 F4 1 0;163 F4 1 0;165 F4 1 0;167 F4 1 0;158 C#4 1 0;160 C#4 1 0;162 C#4 1 0;164 C#4 1 0;166 C#4 1 0;156 G6 1 0;156 G5 1 0;162 G#6 1 0;162 G#5 1 0;168 A#6 1 0;168 A#5 1 0;168 C#5 1 0;170 C#5 1 0;172 C#5 1 0;174 C#5 1 0;176 C#5 1 0;178 C#5 1 0;168 F4 1 0;170 F4 1 0;172 F4 1 0;174 F4 1 0;176 F4 1 0;178 F4 1 0;169 A#4 1 0;171 A#4 1 0;173 A#4 1 0;175 A#4 1 0;177 A#4 1 0;179 A#4 1 0;180 C5 1 0;182 C5 1 0;184 C5 1 0;186 C5 1 0;188 C5 1 0;190 C5 1 0;180 F4 1 0;182 F4 1 0;184 F4 1 0;186 F4 1 0;188 F4 1 0;190 F4 1 0;181 G#4 1 0;183 G#4 1 0;185 G#4 1 0;187 G#4 1 0;189 G#4 1 0;191 G#4 1 0;168 C#6 1 0;168 F6 1 0;180 G5 1 0;180 G6 1 0;178 G#6 1 0;178 G#5 1 0;192 F3 1 0;192 F2 1 0;192 F2 1 0;198 C4 1 0;200 C4 1 0;202 C4 1 0;204 C#4 1 0;206 C#4 1 0;208 C#4 1 0;210 C#4 1 0;212 C#4 1 0;214 C#4 1 0;198 F4 1 0;200 F4 1 0;202 F4 1 0;204 F4 1 0;206 F4 1 0;208 F4 1 0;210 F4 1 0;212 F4 1 0;214 F4 1 0;198 G#4 1 0;200 G#4 1 0;202 G#4 1 0;204 A#4 1 0;206 A#4 1 0;208 A#4 1 0;210 A#4 1 0;212 A#4 1 0;214 A#4 1 0;192 F6 1 0;192 F5 1 0;192 G#5 1 0;192 C6 1 0;216 F3 1 0;216 F2 1 0;216 F2 1 0;198 F5 1 0;204 E5 1 0;210 G5 1 0;198 C5 1 0;204 C#5 1 0;210 C#5 1 0;210 E5 1 0;222 G#4 1 0;224 G#4 1 0;226 G#4 1 0;228 A#4 1 0;230 A#4 1 0;232 A#4 1 0;234 A#4 1 0;236 A#4 1 0;238 A#4 1 0;222 F4 1 0;224 F4 1 0;226 F4 1 0;228 F4 1 0;230 F4 1 0;232 F4 1 0;234 F4 1 0;236 F4 1 0;238 F4 1 0;222 C4 1 0;224 C4 1 0;226 C4 1 0;228 C#4 1 0;230 C#4 1 0;232 C#4 1 0;234 C#4 1 0;236 C#4 1 0;238 C#4 1 0;222 F5 1 0;222 C5 1 0;228 C#5 1 0;228 E5 1 0;228 G5 1 0;234 A#5 1 0;234 G5 1 0;234 E5 1 0;234 C#5 1 0;240 F3 1 0;240 F2 1 0;240 F2 1 0;246 C4 1 0;248 C4 1 0;250 C4 1 0;252 C#4 1 0;254 C#4 1 0;256 C#4 1 0;258 C#4 1 0;260 C#4 1 0;262 C#4 1 0;246 F4 1 0;248 F4 1 0;250 F4 1 0;252 F4 1 0;254 F4 1 0;256 F4 1 0;258 F4 1 0;260 F4 1 0;262 F4 1 0;246 G#4 1 0;248 G#4 1 0;250 G#4 1 0;252 A#4 1 0;254 A#4 1 0;256 A#4 1 0;258 A#4 1 0;260 A#4 1 0;262 A#4 1 0;246 F6 1 0;252 E6 1 0;258 G6 1 0;246 F5 1 0;252 E5 1 0;258 G5 1 0;246 G#5 1 0;246 C6 1 0;252 G5 1 0;252 A#5 1 0;258 A#5 1 0;258 C#6 1 0;264 F3 1 0;264 F2 1 0;264 F2 1 0;270 G#4 1 0;272 G#4 1 0;274 G#4 1 0;276 A#4 1 0;278 A#4 1 0;280 A#4 1 0;282 A#4 1 0;284 A#4 1 0;286 A#4 1 0;270 F4 1 0;272 F4 1 0;274 F4 1 0;276 F4 1 0;278 F4 1 0;280 F4 1 0;282 F4 1 0;284 F4 1 0;286 F4 1 0;270 C4 1 0;272 C4 1 0;274 C4 1 0;276 C#4 1 0;278 C#4 1 0;280 C#4 1 0;282 C#4 1 0;284 C#4 1 0;286 C#4 1 0;270 C7 1 0;270 C6 1 0;270 F6 1 0;270 G#6 1 0;276 A#6 1 0;282 G6 1 0;276 A#5 1 0;282 G5 1 0;276 C#6 1 0;276 F6 1 0;282 A#5 1 0;282 C#6 1 0;288 F5 1 0;288 F6 1 0;288 G#5 1 0;288 C6 1 0;288 F3 1 0;288 F2 1 0;290 G#4 1 0;291 F4 1 0;290 C4 1 0;292 C4 1 0;294 C4 1 0;296 C4 1 0;298 C4 1 0;293 F4 1 0;295 F4 1 0;297 F4 1 0;299 F4 1 0;292 G#4 1 0;294 G#4 1 0;296 G#4 1 0;298 G#4 1 0;300 G#4 1 0;302 G#4 1 0;304 G#4 1 0;306 G#4 1 0;308 G#4 1 0;310 G#4 1 0;301 F4 1 0;303 F4 1 0;305 F4 1 0;307 F4 1 0;309 F4 1 0;311 F4 1 0;300 C4 1 0;302 C4 1 0;304 C4 1 0;306 C4 1 0;308 C4 1 0;310 C4 1 0;312 C#4 1 0;312 A#4 1 0;314 A#4 1 0;316 A#4 1 0;318 A#4 1 0;320 A#4 1 0;322 A#4 1 0;324 A#4 1 0;326 A#4 1 0;328 A#4 1 0;330 A#4 1 0;332 A#4 1 0;334 A#4 1 0;313 F4 1 0;315 F4 1 0;317 F4 1 0;319 F4 1 0;321 F4 1 0;323 F4 1 0;325 F4 1 0;327 F4 1 0;329 F4 1 0;331 F4 1 0;333 F4 1 0;335 F4 1 0;314 C#4 1 0;316 C#4 1 0;318 C#4 1 0;320 C#4 1 0;322 C#4 1 0;324 C#4 1 0;326 C#4 1 0;328 C#4 1 0;330 C#4 1 0;332 C#4 1 0;334 C#4 1 0;312 G6 1 0;312 G5 1 0;312 A#5 1 0;312 C#6 1 0;324 A#6 1 0;324 A#5 1 0;324 C#6 1 0;324 F6 1 0;336 G#6 1 0;336 G#5 1 0;336 F3 1 0;337 C4 1 0;338 G#4 1 0;338 C4 1 0;339 F4 1 0;340 G#4 1 0;341 F4 1 0;340 C4 1 0;342 G#4 1 0;344 G#4 1 0;346 G#4 1 0;343 F4 1 0;345 F4 1 0;347 F4 1 0;342 C4 1 0;344 C4 1 0;346 C4 1 0;348 G#4 1 0;350 G#4 1 0;352 G#4 1 0;354 G#4 1 0;356 G#4 1 0;358 G#4 1 0;348 C4 1 0;350 C4 1 0;352 C4 1 0;354 C4 1 0;356 C4 1 0;358 C4 1 0;349 F4 1 0;351 F4 1 0;353 F4 1 0;355 F4 1 0;357 F4 1 0;359 F4 1 0;336 C6 1 0;336 F6 1 0;348 G6 1 0;354 G#6 1 0;348 G5 1 0;354 G#5 1 0;360 A#5 1 0;360 A#6 1 0;360 A#4 1 0;362 A#4 1 0;364 A#4 1 0;366 A#4 1 0;368 A#4 1 0;370 A#4 1 0;372 A#4 1 0;374 A#4 1 0;376 A#4 1 0;378 A#4 1 0;380 A#4 1 0;382 A#4 1 0;361 F4 1 0;363 F4 1 0;365 F4 1 0;367 F4 1 0;369 F4 1 0;371 F4 1 0;373 F4 1 0;375 F4 1 0;377 F4 1 0;379 F4 1 0;381 F4 1 0;383 F4 1 0;360 C#4 1 0;362 C#4 1 0;364 C#4 1 0;366 C#4 1 0;368 C#4 1 0;370 C#4 1 0;372 C#4 1 0;374 C#4 1 0;376 C#4 1 0;378 C#4 1 0;380 C#4 1 0;382 C#4 1 0;360 C#6 1 0;360 F6 1 0;372 G5 1 0;372 G6 1 0;378 E6 1 0;378 E5 1 0;384 F6 1 0;384 F5 1 0;384 G#5 1 0;384 C6 1 0;384 F2 1 0;384 F3 1 0;386 A#3 1 0;387 C4 1 0;388 A#3 1 0;390 F4 1 0;392 A#3 1 0;394 C4 1 0;396 F3 1 0;396 F2 1 0;398 A#3 1 0;399 C4 1 0;400 A#3 1 0;401 C4 1 0;402 F4 1 0;403 A#3 1 0;404 C4 1 0;405 F4 1 0;406 A#3 1 0;407 C4 1 0;408 G5 1 0;408 G6 1 0;408 A#5 1 0;408 C#6 1 0;408 F3 1 0;408 F2 1 0;410 A#3 1 0;411 C#4 1 0;412 A#3 1 0;413 C#4 1 0;414 G4 1 0;415 A#3 1 0;416 C#4 1 0;417 G4 1 0;418 A#3 1 0;419 C#4 1 0;420 G4 1 0;423 G4 1 0;426 G4 1 0;429 G4 1 0;421 A#3 1 0;422 C#4 1 0;424 A#3 1 0;425 C#4 1 0;427 A#3 1 0;428 C#4 1 0;430 A#3 1 0;431 C#4 1 0;420 A#5 1 0;420 A#6 1 0;426 C#6 1 0;426 C#7 1 0;420 C#6 1 0;420 F6 1 0;426 F6 1 0;426 A#6 1 0;384 F3 1 0;396 F3 1 0;408 F3 1 0;432 C7 1 0;432 C6 1 0;432 F6 1 0;432 G#6 1 0;432 F3 1 0;432 F2 1 0;434 A#3 1 0;435 C4 1 0;436 A#3 1 0;438 F4 1 0;440 A#3 1 0;442 C4 1 0;432 F3 1 0;444 F3 1 0;444 F3 1 0;444 F2 1 0;446 A#3 1 0;447 C4 1 0;448 A#3 1 0;449 C4 1 0;450 F4 1 0;451 A#3 1 0;452 C4 1 0;453 F4 1 0;454 A#3 1 0;455 C4 1 0;444 G6 1 0;444 G5 1 0;450 A#5 1 0;450 A#6 1 0;456 C6 1 0;456 C7 1 0;456 A#6 1 0;456 F6 1 0;456 F3 1 0;456 F2 1 0;456 F3 1 0;458 A#3 1 0;459 C#4 1 0;460 A#3 1 0;461 C#4 1 0;462 G4 1 0;463 A#3 1 0;465 G4 1 0;466 A#3 1 0;464 C#4 1 0;467 C#4 1 0;468 G4 1 0;471 G4 1 0;474 G4 1 0;477 G4 1 0;469 A#3 1 0;470 C#4 1 0;472 A#3 1 0;473 C#4 1 0;475 A#3 1 0;476 C#4 1 0;478 A#3 1 0;479 C#4 1 0;468 C#7 1 0;468 C#6 1 0;468 F6 1 0;468 A#6 1 0;474 C7 1 0;474 A#6 1 0;474 F6 1 0;474 C6 1 0;480 F3 1 0;480 F2 1 0;480 F5 1 0;480 F4 1 0;480 C5 1 0;488 G5 1 0;490 G#5 1 0;492 F5 1 0;492 F4 1 0;492 C#5 1 0;492 C#3 1 0;492 C#4 1 0;488 G4 1 0;490 G#4 1 0;500 G5 1 0;502 G#5 1 0;504 F5 1 0;504 F4 1 0;500 G4 1 0;502 G#4 1 0;504 B4 1 0;504 B3 1 0;504 B2 1 0;512 G5 1 0;514 G#5 1 0;516 F5 1 0;522 E5 1 0;512 G4 1 0;514 G#4 1 0;516 F4 1 0;522 E4 1 0;516 C5 1 0;516 C4 1 0;516 C3 1 0;522 C5 1 0;528 A#3 1 0;528 A#2 1 0;528 F4 1 0;528 F5 1 0;528 A#4 1 0;536 G5 1 0;538 G#5 1 0;540 F5 1 0;536 G4 1 0;538 G#4 1 0;540 F4 1 0;540 G#4 1 0;540 G#3 1 0;540 G#2 1 0;548 G4 1 0;550 G#4 1 0;552 F4 1 0;548 G5 1 0;550 G#5 1 0;552 F5 1 0;552 B4 1 0;552 B3 1 0;552 B2 1 0;560 G5 1 0;562 G#5 1 0;564 F5 1 0;570 E5 1 0;560 G4 1 0;562 G#4 1 0;564 F4 1 0;570 E4 1 0;564 C5 1 0;564 C4 1 0;564 C3 1 0;570 C5 1 0;576 F2 1 0;576 F3 1 0;578 F3 1 0;580 F3 1 0;582 F3 1 0;584 F3 1 0;586 F3 1 0;588 F3 1 0;590 F3 1 0;592 F3 1 0;594 F3 1 0;596 F3 1 0;598 F3 1 0;578 C4 1 0;580 C4 1 0;582 C4 1 0;584 C4 1 0;586 C4 1 0;588 C4 1 0;590 C4 1 0;592 C4 1 0;594 C4 1 0;596 C4 1 0;598 C4 1 0;576 C5 1 0;582 C#5 1 0;588 C5 1 0;576 F4 1 0;582 F4 1 0;588 F4 1 0;594 B4 1 0;594 F4 1 0;600 C5 1 0;606 C#5 1 0;612 C5 1 0;618 B4 1 0;600 F4 1 0;606 F4 1 0;612 F4 1 0;618 F4 1 0;602 C4 1 0;604 C4 1 0;606 C4 1 0;608 C4 1 0;610 C4 1 0;612 C4 1 0;614 C4 1 0;616 C4 1 0;618 C4 1 0;620 C4 1 0;622 C4 1 0;600 F3 1 0;602 F3 1 0;604 F3 1 0;606 F3 1 0;608 F3 1 0;610 F3 1 0;612 F3 1 0;614 F3 1 0;616 F3 1 0;618 F3 1 0;620 F3 1 0;622 F3 1 0;600 F2 1 0;624 F3 1 0;624 F2 1 0;626 C5 1 0;627 C#5 1 0;628 C5 1 0;626 F4 1 0;632 C5 1 0;633 C#5 1 0;634 C5 1 0;632 F4 1 0;630 F3 1 0;630 F2 1 0;648 F3 1 0;654 F3 1 0;648 F2 1 0;654 F2 1 0;650 C5 1 0;651 C#5 1 0;652 C5 1 0;650 F4 1 0;656 C5 1 0;657 C#5 1 0;658 C5 1 0;656 F4 1 0;672 C2 1 17'
# https://onlinesequencer.net/49771 - Pokemon HGSS Dark Cave / Ice Path
#song = '200 C7 8 0;208 D7 2 0;210 C7 2 0;212 A#6 2 0;214 G6 4 0;218 A#6 2 0;220 D7 4 0;224 C#7 16 0;240 C7 16 0;264 C7 8 0;272 D7 2 0;274 C7 2 0;276 A#6 2 0;278 G6 4 0;282 A#6 2 0;284 D7 4 0;288 C#7 12 0;300 C7 2 0;302 C#7 2 0;304 D#7 16 0;320 F6 4 0;324 D#6 2 0;326 D6 2 0;328 D#6 4 0;332 D6 2 0;334 C6 2 0;336 D6 2 0;338 C6 2 0;340 A#5 2 0;342 C6 2 0;344 D6 4 0;348 C6 2 0;350 A#5 2 0;352 C#6 8 0;360 A#5 8 0;368 D#6 2 0;370 D6 2 0;372 C6 2 0;374 A#5 2 0;376 C6 8 0;384 F6 4 0;388 D#6 2 0;390 D6 2 0;392 D#6 4 0;396 D6 2 0;398 C6 2 0;400 D6 2 0;402 C6 2 0;404 A#5 2 0;406 C6 2 0;408 D6 4 0;412 C6 2 0;414 A#5 2 0;416 C#6 4 0;420 C6 2 0;422 C#6 2 0;424 C6 2 0;426 C#6 2 0;428 A#5 2 0;430 C#6 2 0;432 C6 16 0;456 C7 8 0;464 D7 2 0;466 C7 2 0;468 A#6 2 0;470 G6 4 0;474 A#6 2 0;476 D7 4 0;480 C#7 16 0;496 C7 16 0;520 C7 8 0;528 D7 2 0;530 C7 2 0;532 A#6 2 0;534 G6 4 0;538 A#6 2 0;540 D7 4 0;544 C#7 16 0;560 C7 16 0;600 A#5 2 0;602 C6 2 0;604 C#6 4 0;608 C6 16 0;632 C#6 2 0;634 C6 2 0;636 C#6 4 0;640 C6 16 0;656 D#6 2 0;664 A#5 2 0;666 C6 2 0;668 C#6 4 0;672 C6 16 0;688 A#5 2 0;696 C#6 2 0;698 C6 2 0;700 C#6 4 0;768 D#7 16 0;784 D7 16 0;800 F7 16 0;816 D#7 16 0;904 C7 8 0;912 D7 2 0;914 C7 2 0;916 A#6 2 0;918 G6 4 0;922 A#6 2 0;924 D7 4 0;928 C#7 16 0;944 C7 16 0;968 C7 8 0;976 D7 2 0;978 C7 2 0;980 A#6 2 0;982 G6 4 0;986 A#6 2 0;988 D7 4 0;992 C#7 12 0;1004 C7 2 0;1006 C#7 2 0;1008 D#7 16 0;1024 F6 4 0;1028 D#6 2 0;1030 D6 2 0;1032 D#6 4 0;1036 D6 2 0;1038 C6 2 0;1040 D6 2 0;1042 C6 2 0;1044 A#5 2 0;1046 C6 2 0;1048 D6 4 0;1052 C6 2 0;1054 A#5 2 0;1056 C#6 8 0;1064 A#5 8 0;1072 D#6 2 0;1074 D6 2 0;1076 C6 2 0;1078 A#5 2 0;1080 C6 8 0;1088 F6 4 0;1092 D#6 2 0;1094 D6 2 0;1096 D#6 4 0;1100 D6 2 0;1102 C6 2 0;1104 D6 2 0;1106 C6 2 0;1108 A#5 2 0;1110 C6 2 0;1112 D6 4 0;1116 C6 2 0;1118 A#5 2 0;1120 C#6 4 0;1124 C6 2 0;1126 C#6 2 0;1128 C6 2 0;1130 C#6 2 0;1132 A#5 2 0;1134 C#6 2 0;1136 C6 16 0;1160 C7 8 0;1168 D7 2 0;1170 C7 2 0;1172 A#6 2 0;1174 G6 4 0;1178 A#6 2 0;1180 D7 4 0;1184 C#7 16 0;1200 C7 16 0;1224 C7 8 0;1232 D7 2 0;1234 C7 2 0;1236 A#6 2 0;1238 G6 4 0;1242 A#6 2 0;1244 D7 4 0;1248 C#7 16 0;1264 C7 16 0;1304 A#5 2 0;1306 C6 2 0;1308 C#6 4 0;1312 C6 16 0;1336 C#6 2 0;1338 C6 2 0;1340 C#6 4 0;1344 C6 16 0;1360 D#6 2 0;1368 A#5 2 0;1370 C6 2 0;1372 C#6 4 0;1376 C6 16 0;1392 A#5 2 0;1400 C#6 2 0;1402 C6 2 0;1404 C#6 4 0;1472 D#7 16 0;1488 D7 16 0;1504 F7 16 0;1520 D#7 16 0;64 G5 12 0;64 D#5 12 0;80 A#5 12 0;80 D5 12 0;96 F5 12 0;96 C#5 12 0;112 G5 12 0;112 C5 12 0;128 D#6 2 0;130 G#5 2 0;132 C6 2 0;134 D#6 2 0;136 G#5 2 0;138 C6 2 0;140 D#6 2 0;142 C6 2 0;144 D6 2 0;146 G5 2 0;148 A#5 2 0;150 D6 2 0;152 G5 2 0;154 A#5 2 0;156 D6 2 0;158 A#5 2 0;160 C#6 2 0;162 F5 2 0;164 A#5 2 0;166 C#6 2 0;168 F5 2 0;170 A#5 2 0;172 C#6 2 0;174 A#5 2 0;176 C6 2 0;178 G5 2 0;180 A#5 2 0;182 C6 2 0;184 G5 2 0;186 A#5 2 0;188 C6 2 0;190 A#5 2 0;192 D#6 2 0;194 G#5 2 0;196 C6 2 0;198 D#6 2 0;200 G#5 2 0;202 C6 2 0;204 D#6 2 0;206 C6 2 0;208 D6 2 0;210 G5 2 0;212 A#5 2 0;214 D6 2 0;216 G5 2 0;218 A#5 2 0;220 D6 2 0;222 A#5 2 0;224 C#6 2 0;226 F5 2 0;228 A#5 2 0;230 C#6 2 0;232 F5 2 0;234 A#5 2 0;236 C#6 2 0;238 A#5 2 0;240 C6 2 0;242 G5 2 0;244 A#5 2 0;246 C6 2 0;248 G5 2 0;250 A#5 2 0;252 C6 2 0;254 A#5 2 0;256 D#6 2 0;258 G#5 2 0;260 C6 2 0;262 D#6 2 0;264 G#5 2 0;266 C6 2 0;268 D#6 2 0;270 C6 2 0;272 D6 2 0;274 G5 2 0;276 A#5 2 0;278 D6 2 0;280 G5 2 0;282 A#5 2 0;284 D6 2 0;286 A#5 2 0;288 C#6 2 0;290 F5 2 0;292 A#5 2 0;294 C#6 2 0;296 F5 2 0;298 A#5 2 0;300 C#6 2 0;302 A#5 2 0;304 C6 2 0;306 G5 2 0;308 A#5 2 0;310 C6 2 0;312 G5 2 0;314 A#5 2 0;316 C6 2 0;318 A#5 2 0;320 G#5 8 0;328 D#5 8 0;336 G5 8 0;344 D5 8 0;352 F5 8 0;360 C#5 8 0;368 D#5 8 0;376 G5 8 0;384 G#5 8 0;392 D#5 8 0;400 G5 8 0;408 D5 8 0;416 F5 8 0;424 C#5 8 0;432 D#5 8 0;440 C5 8 0;448 D#6 2 0;450 G#5 2 0;452 C6 2 0;454 D#6 2 0;456 G#5 2 0;458 C6 2 0;460 D#6 2 0;462 C6 2 0;464 D6 2 0;466 G5 2 0;468 A#5 2 0;470 D6 2 0;472 G5 2 0;474 A#5 2 0;476 D6 2 0;478 A#5 2 0;480 C#6 2 0;482 F5 2 0;484 A#5 2 0;486 C#6 2 0;488 F5 2 0;490 A#5 2 0;492 C#6 2 0;494 A#5 2 0;496 C6 2 0;498 G5 2 0;500 A#5 2 0;502 C6 2 0;504 G5 2 0;506 A#5 2 0;508 C6 2 0;510 A#5 2 0;512 D#6 2 0;514 G#5 2 0;516 C6 2 0;518 D#6 2 0;520 G#5 2 0;522 C6 2 0;524 D#6 2 0;526 C6 2 0;528 D6 2 0;530 G5 2 0;532 A#5 2 0;534 D6 2 0;536 G5 2 0;538 A#5 2 0;540 D6 2 0;542 A#5 2 0;544 C#6 2 0;546 F5 2 0;548 A#5 2 0;550 C#6 2 0;552 F5 2 0;554 A#5 2 0;556 C#6 2 0;558 A#5 2 0;560 C6 2 0;562 G5 2 0;564 A#5 2 0;566 C6 2 0;568 G5 2 0;570 A#5 2 0;572 C6 2 0;574 A#5 2 0;576 C5 4 0;580 G5 2 0;582 D#5 4 0;586 G5 2 0;588 F5 4 0;592 D#5 2 0;596 A#4 12 0;608 C5 4 0;612 G5 2 0;614 D#5 4 0;618 G5 2 0;620 D#5 4 0;624 F5 2 0;628 A#4 12 0;640 D#5 16 0;656 G5 2 0;664 F5 2 0;666 G5 2 0;668 G#5 4 0;672 G5 16 0;696 G#5 2 0;698 G5 2 0;700 G#5 4 0;704 D#6 2 0;706 G#5 2 0;708 C6 2 0;710 D#6 2 0;712 G#5 2 0;714 C6 2 0;716 D#6 2 0;718 C6 2 0;720 D6 2 0;722 G5 2 0;724 A#5 2 0;726 D6 2 0;728 G5 2 0;730 A#5 2 0;732 D6 2 0;734 A#5 2 0;736 C#6 2 0;738 F5 2 0;740 A#5 2 0;742 C#6 2 0;744 F5 2 0;746 A#5 2 0;748 C#6 2 0;750 A#5 2 0;752 C6 2 0;754 G5 2 0;756 A#5 2 0;758 C6 2 0;760 G5 2 0;762 A#5 2 0;764 C6 2 0;766 A#5 2 0;768 D#6 2 0;770 G#5 2 0;772 C6 2 0;774 D#6 2 0;776 G#5 2 0;778 C6 2 0;780 D#6 2 0;782 C6 2 0;784 D6 2 0;786 G5 2 0;788 A#5 2 0;790 D6 2 0;792 G5 2 0;794 A#5 2 0;796 D6 2 0;798 A#5 2 0;800 C#6 2 0;802 F5 2 0;804 A#5 2 0;806 C#6 2 0;808 F5 2 0;810 A#5 2 0;812 C#6 2 0;814 A#5 2 0;816 C6 2 0;818 G5 2 0;820 A#5 2 0;822 C6 2 0;824 G5 2 0;826 A#5 2 0;828 C6 2 0;830 A#5 2 0;832 D#6 2 0;834 G#5 2 0;836 C6 2 0;838 D#6 2 0;840 G#5 2 0;842 C6 2 0;844 D#6 2 0;846 C6 2 0;848 D6 2 0;850 G5 2 0;852 A#5 2 0;854 D6 2 0;856 G5 2 0;858 A#5 2 0;860 D6 2 0;862 A#5 2 0;864 C#6 2 0;866 F5 2 0;868 A#5 2 0;870 C#6 2 0;872 F5 2 0;874 A#5 2 0;876 C#6 2 0;878 A#5 2 0;880 C6 2 0;882 G5 2 0;884 A#5 2 0;886 C6 2 0;888 G5 2 0;890 A#5 2 0;892 C6 2 0;894 A#5 2 0;896 D#6 2 0;898 G#5 2 0;900 C6 2 0;902 D#6 2 0;904 G#5 2 0;906 C6 2 0;908 D#6 2 0;910 C6 2 0;912 D6 2 0;914 G5 2 0;916 A#5 2 0;918 D6 2 0;920 G5 2 0;922 A#5 2 0;924 D6 2 0;926 A#5 2 0;928 C#6 2 0;930 F5 2 0;932 A#5 2 0;934 C#6 2 0;936 F5 2 0;938 A#5 2 0;940 C#6 2 0;942 A#5 2 0;944 C6 2 0;946 G5 2 0;948 A#5 2 0;950 C6 2 0;952 G5 2 0;954 A#5 2 0;956 C6 2 0;958 A#5 2 0;960 D#6 2 0;962 G#5 2 0;964 C6 2 0;966 D#6 2 0;968 G#5 2 0;970 C6 2 0;972 D#6 2 0;974 C6 2 0;976 D6 2 0;978 G5 2 0;980 A#5 2 0;982 D6 2 0;984 G5 2 0;986 A#5 2 0;988 D6 2 0;990 A#5 2 0;992 C#6 2 0;994 F5 2 0;996 A#5 2 0;998 C#6 2 0;1000 F5 2 0;1002 A#5 2 0;1004 C#6 2 0;1006 A#5 2 0;1008 C6 2 0;1010 G5 2 0;1012 A#5 2 0;1014 C6 2 0;1016 G5 2 0;1018 A#5 2 0;1020 C6 2 0;1022 A#5 2 0;1024 G#5 8 0;1032 D#5 8 0;1040 G5 8 0;1048 D5 8 0;1056 F5 8 0;1064 C#5 8 0;1072 D#5 8 0;1080 G5 8 0;1088 G#5 8 0;1096 D#5 8 0;1104 G5 8 0;1112 D5 8 0;1120 F5 8 0;1128 C#5 8 0;1136 D#5 8 0;1144 C5 8 0;1152 D#6 2 0;1154 G#5 2 0;1156 C6 2 0;1158 D#6 2 0;1160 G#5 2 0;1162 C6 2 0;1164 D#6 2 0;1166 C6 2 0;1168 D6 2 0;1170 G5 2 0;1172 A#5 2 0;1174 D6 2 0;1176 G5 2 0;1178 A#5 2 0;1180 D6 2 0;1182 A#5 2 0;1184 C#6 2 0;1186 F5 2 0;1188 A#5 2 0;1190 C#6 2 0;1192 F5 2 0;1194 A#5 2 0;1196 C#6 2 0;1198 A#5 2 0;1200 C6 2 0;1202 G5 2 0;1204 A#5 2 0;1206 C6 2 0;1208 G5 2 0;1210 A#5 2 0;1212 C6 2 0;1214 A#5 2 0;1216 D#6 2 0;1218 G#5 2 0;1220 C6 2 0;1222 D#6 2 0;1224 G#5 2 0;1226 C6 2 0;1228 D#6 2 0;1230 C6 2 0;1232 D6 2 0;1234 G5 2 0;1236 A#5 2 0;1238 D6 2 0;1240 G5 2 0;1242 A#5 2 0;1244 D6 2 0;1246 A#5 2 0;1248 C#6 2 0;1250 F5 2 0;1252 A#5 2 0;1254 C#6 2 0;1256 F5 2 0;1258 A#5 2 0;1260 C#6 2 0;1262 A#5 2 0;1264 C6 2 0;1266 G5 2 0;1268 A#5 2 0;1270 C6 2 0;1272 G5 2 0;1274 A#5 2 0;1276 C6 2 0;1278 A#5 2 0;1280 C5 4 0;1284 G5 2 0;1286 D#5 4 0;1290 G5 2 0;1292 F5 4 0;1296 D#5 2 0;1300 A#4 12 0;1312 C5 4 0;1316 G5 2 0;1318 D#5 4 0;1322 G5 2 0;1324 D#5 4 0;1328 F5 2 0;1332 A#4 12 0;1344 D#5 16 0;1360 G5 2 0;1368 F5 2 0;1370 G5 2 0;1372 G#5 4 0;1376 G5 16 0;1400 G#5 2 0;1402 G5 2 0;1404 G#5 4 0;1408 D#6 2 0;1410 G#5 2 0;1412 C6 2 0;1414 D#6 2 0;1416 G#5 2 0;1418 C6 2 0;1420 D#6 2 0;1422 C6 2 0;1424 D6 2 0;1426 G5 2 0;1428 A#5 2 0;1430 D6 2 0;1432 G5 2 0;1434 A#5 2 0;1436 D6 2 0;1438 A#5 2 0;1440 C#6 2 0;1442 F5 2 0;1444 A#5 2 0;1446 C#6 2 0;1448 F5 2 0;1450 A#5 2 0;1452 C#6 2 0;1454 A#5 2 0;1456 C6 2 0;1458 G5 2 0;1460 A#5 2 0;1462 C6 2 0;1464 G5 2 0;1466 A#5 2 0;1468 C6 2 0;1470 A#5 2 0;1472 D#6 2 0;1474 G#5 2 0;1476 C6 2 0;1478 D#6 2 0;1480 G#5 2 0;1482 C6 2 0;1484 D#6 2 0;1486 C6 2 0;1488 D6 2 0;1490 G5 2 0;1492 A#5 2 0;1494 D6 2 0;1496 G5 2 0;1498 A#5 2 0;1500 D6 2 0;1502 A#5 2 0;1504 C#6 2 0;1506 F5 2 0;1508 A#5 2 0;1510 C#6 2 0;1512 F5 2 0;1514 A#5 2 0;1516 C#6 2 0;1518 A#5 2 0;1520 C6 2 0;1522 G5 2 0;1524 A#5 2 0;1526 C6 2 0;1528 G5 2 0;1530 A#5 2 0;1532 C6 2 0;1534 A#5 2 0;0 C4 4 0;4 G4 2 0;6 D#4 4 0;10 G4 2 0;12 F4 4 0;16 D#4 2 0;20 A#3 6 0;26 C4 2 0;28 C#4 4 0;32 C4 4 0;36 G4 2 0;38 D#4 4 0;42 G4 2 0;44 D#4 4 0;48 F4 2 0;52 A#3 4 0;56 C#4 2 0;58 C4 2 0;60 C#4 4 0;64 C4 4 0;68 G4 2 0;70 D#4 4 0;74 G4 2 0;76 F4 4 0;80 D#4 2 0;84 A#3 6 0;90 C4 2 0;92 C#4 4 0;96 C4 4 0;100 G4 2 0;102 D#4 4 0;106 G4 2 0;108 D#4 4 0;112 F4 2 0;116 A#3 4 0;120 C#4 2 0;122 C4 2 0;124 C#4 4 0;128 C4 4 0;132 G4 2 0;134 D#4 4 0;138 G4 2 0;140 F4 4 0;144 D#4 2 0;148 A#3 6 0;154 C4 2 0;156 C#4 4 0;160 C4 4 0;164 G4 2 0;166 D#4 4 0;170 G4 2 0;172 D#4 4 0;176 F4 2 0;180 A#3 4 0;184 C#4 2 0;186 C4 2 0;188 C#4 4 0;192 C4 4 0;196 G4 2 0;198 D#4 4 0;202 G4 2 0;204 F4 4 0;208 D#4 2 0;212 A#3 6 0;218 C4 2 0;220 C#4 4 0;224 C4 4 0;228 G4 2 0;230 D#4 4 0;234 G4 2 0;236 D#4 4 0;240 F4 2 0;244 A#3 4 0;248 C#4 2 0;250 C4 2 0;252 C#4 4 0;256 C4 4 0;260 G4 2 0;262 D#4 4 0;266 G4 2 0;268 F4 4 0;272 D#4 2 0;276 A#3 6 0;282 C4 2 0;284 C#4 4 0;288 C4 4 0;292 G4 2 0;294 D#4 4 0;298 G4 2 0;300 D#4 4 0;304 F4 2 0;308 A#3 4 0;312 C#4 2 0;314 C4 2 0;316 C#4 4 0;320 C4 4 0;324 G4 2 0;326 D#4 4 0;330 G4 2 0;332 F4 4 0;336 D#4 2 0;340 A#3 6 0;346 C4 2 0;348 C#4 4 0;352 C4 4 0;356 G4 2 0;358 D#4 4 0;362 G4 2 0;364 D#4 4 0;368 F4 2 0;372 A#3 4 0;376 C#4 2 0;378 C4 2 0;380 C#4 4 0;384 C4 4 0;388 G4 2 0;390 D#4 4 0;394 G4 2 0;396 F4 4 0;400 D#4 2 0;404 A#3 6 0;410 C4 2 0;412 C#4 4 0;416 C4 4 0;420 G4 2 0;422 D#4 4 0;426 G4 2 0;428 D#4 4 0;432 F4 2 0;436 A#3 4 0;440 C#4 2 0;442 C4 2 0;444 C#4 4 0;448 C4 4 0;452 G4 2 0;454 D#4 4 0;458 G4 2 0;460 F4 4 0;464 D#4 2 0;468 A#3 6 0;474 C4 2 0;476 C#4 4 0;480 C4 4 0;484 G4 2 0;486 D#4 4 0;490 G4 2 0;492 D#4 4 0;496 F4 2 0;500 A#3 4 0;504 C#4 2 0;506 C4 2 0;508 C#4 4 0;512 C4 4 0;516 G4 2 0;518 D#4 4 0;522 G4 2 0;524 F4 4 0;528 D#4 2 0;532 A#3 6 0;538 C4 2 0;540 C#4 4 0;544 C4 4 0;548 G4 2 0;550 D#4 4 0;554 G4 2 0;556 D#4 4 0;560 F4 2 0;564 A#3 4 0;568 C#4 2 0;570 C4 2 0;572 C#4 4 0;600 F4 2 0;602 G4 2 0;604 G#4 4 0;608 D#5 16 0;632 G#4 2 0;634 G4 2 0;636 G#4 4 0;640 C5 4 0;644 G5 2 0;646 D#5 4 0;650 G5 2 0;652 F5 4 0;656 D#5 2 0;660 A#4 12 0;672 C5 4 0;676 G5 2 0;678 D#5 4 0;682 G5 2 0;684 D#5 4 0;688 F5 2 0;692 A#4 4 0;696 C#5 2 0;698 C5 2 0;700 C#5 4 0;704 C5 4 0;708 G5 2 0;710 D#5 4 0;714 G5 2 0;716 F5 4 0;720 D#5 2 0;724 A#4 6 0;730 C5 2 0;732 C#5 4 0;736 C5 4 0;740 G5 2 0;742 D#5 4 0;746 G5 2 0;748 D#5 4 0;752 F5 2 0;756 A#4 4 0;760 C#5 2 0;762 C5 2 0;764 C#5 4 0;768 C5 4 0;772 G5 2 0;774 D#5 4 0;778 G5 2 0;780 F5 4 0;784 D#5 2 0;788 A#4 6 0;794 C5 2 0;796 C#5 4 0;800 C5 4 0;804 G5 2 0;806 D#5 4 0;810 G5 2 0;812 D#5 4 0;816 F5 2 0;820 A#4 4 0;824 C#5 2 0;826 C5 2 0;828 C#5 4 0;832 C4 4 0;836 G4 2 0;838 D#4 4 0;842 G4 2 0;844 F4 4 0;848 D#4 2 0;852 A#3 6 0;858 C4 2 0;860 C#4 4 0;864 C4 4 0;868 G4 2 0;870 D#4 4 0;874 G4 2 0;876 D#4 4 0;880 F4 2 0;884 A#3 4 0;888 C#4 2 0;890 C4 2 0;892 C#4 4 0;896 C4 4 0;900 G4 2 0;902 D#4 4 0;906 G4 2 0;908 F4 4 0;912 D#4 2 0;916 A#3 6 0;922 C4 2 0;924 C#4 4 0;928 C4 4 0;932 G4 2 0;934 D#4 4 0;938 G4 2 0;940 D#4 4 0;944 F4 2 0;948 A#3 4 0;952 C#4 2 0;954 C4 2 0;956 C#4 4 0;960 C4 4 0;964 G4 2 0;966 D#4 4 0;970 G4 2 0;972 F4 4 0;976 D#4 2 0;980 A#3 6 0;986 C4 2 0;988 C#4 4 0;992 C4 4 0;996 G4 2 0;998 D#4 4 0;1002 G4 2 0;1004 D#4 4 0;1008 F4 2 0;1012 A#3 4 0;1016 C#4 2 0;1018 C4 2 0;1020 C#4 4 0;1024 C4 4 0;1028 G4 2 0;1030 D#4 4 0;1034 G4 2 0;1036 F4 4 0;1040 D#4 2 0;1044 A#3 6 0;1050 C4 2 0;1052 C#4 4 0;1056 C4 4 0;1060 G4 2 0;1062 D#4 4 0;1066 G4 2 0;1068 D#4 4 0;1072 F4 2 0;1076 A#3 4 0;1080 C#4 2 0;1082 C4 2 0;1084 C#4 4 0;1088 C4 4 0;1092 G4 2 0;1094 D#4 4 0;1098 G4 2 0;1100 F4 4 0;1104 D#4 2 0;1108 A#3 6 0;1114 C4 2 0;1116 C#4 4 0;1120 C4 4 0;1124 G4 2 0;1126 D#4 4 0;1130 G4 2 0;1132 D#4 4 0;1136 F4 2 0;1140 A#3 4 0;1144 C#4 2 0;1146 C4 2 0;1148 C#4 4 0;1152 C4 4 0;1156 G4 2 0;1158 D#4 4 0;1162 G4 2 0;1164 F4 4 0;1168 D#4 2 0;1172 A#3 6 0;1178 C4 2 0;1180 C#4 4 0;1184 C4 4 0;1188 G4 2 0;1190 D#4 4 0;1194 G4 2 0;1196 D#4 4 0;1200 F4 2 0;1204 A#3 4 0;1208 C#4 2 0;1210 C4 2 0;1212 C#4 4 0;1216 C4 4 0;1220 G4 2 0;1222 D#4 4 0;1226 G4 2 0;1228 F4 4 0;1232 D#4 2 0;1236 A#3 6 0;1242 C4 2 0;1244 C#4 4 0;1248 C4 4 0;1252 G4 2 0;1254 D#4 4 0;1258 G4 2 0;1260 D#4 4 0;1264 F4 2 0;1268 A#3 4 0;1272 C#4 2 0;1274 C4 2 0;1276 C#4 4 0;1304 F4 2 0;1306 G4 2 0;1308 G#4 4 0;1312 D#5 16 0;1336 G#4 2 0;1338 G4 2 0;1340 G#4 4 0;1344 C5 4 0;1348 G5 2 0;1350 D#5 4 0;1354 G5 2 0;1356 F5 4 0;1360 D#5 2 0;1364 A#4 12 0;1376 C5 4 0;1380 G5 2 0;1382 D#5 4 0;1386 G5 2 0;1388 D#5 4 0;1392 F5 2 0;1396 A#4 4 0;1400 C#5 2 0;1402 C5 2 0;1404 C#5 4 0;1408 C5 4 0;1412 G5 2 0;1414 D#5 4 0;1418 G5 2 0;1420 F5 4 0;1424 D#5 2 0;1428 A#4 6 0;1434 C5 2 0;1436 C#5 4 0;1440 C5 4 0;1444 G5 2 0;1446 D#5 4 0;1450 G5 2 0;1452 D#5 4 0;1456 F5 2 0;1460 A#4 4 0;1464 C#5 2 0;1466 C5 2 0;1468 C#5 4 0;1472 C5 4 0;1476 G5 2 0;1478 D#5 4 0;1482 G5 2 0;1484 F5 4 0;1488 D#5 2 0;1492 A#4 6 0;1498 C5 2 0;1500 C#5 4 0;1504 C5 4 0;1508 G5 2 0;1510 D#5 4 0;1514 G5 2 0;1516 D#5 4 0;1520 F5 2 0;1524 A#4 4 0;1528 C#5 2 0;1530 C5 2 0;1532 C#5 4 0;0 C3 4 0;4 G3 2 0;6 D#3 4 0;10 G3 2 0;12 F3 4 0;16 D#3 2 0;20 A#2 6 0;26 C3 2 0;28 C#3 4 0;32 C3 4 0;36 G3 2 0;38 D#3 4 0;42 G3 2 0;44 D#3 4 0;48 F3 2 0;52 A#2 4 0;56 C#3 2 0;58 C3 2 0;60 C#3 4 0;64 C3 4 0;68 G3 2 0;70 D#3 4 0;74 G3 2 0;76 F3 4 0;80 D#3 2 0;84 A#2 6 0;90 C3 2 0;92 C#3 4 0;96 C3 4 0;100 G3 2 0;102 D#3 4 0;106 G3 2 0;108 D#3 4 0;112 F3 2 0;116 A#2 4 0;120 C#3 2 0;122 C3 2 0;124 C#3 4 0;128 C3 4 0;132 G3 2 0;134 D#3 4 0;138 G3 2 0;140 F3 4 0;144 D#3 2 0;148 A#2 6 0;154 C3 2 0;156 C#3 4 0;160 C3 4 0;164 G3 2 0;166 D#3 4 0;170 G3 2 0;172 D#3 4 0;176 F3 2 0;180 A#2 4 0;184 C#3 2 0;186 C3 2 0;188 C#3 4 0;192 C3 4 0;196 G3 2 0;198 D#3 4 0;202 G3 2 0;204 F3 4 0;208 D#3 2 0;212 A#2 6 0;218 C3 2 0;220 C#3 4 0;224 C3 4 0;228 G3 2 0;230 D#3 4 0;234 G3 2 0;236 D#3 4 0;240 F3 2 0;244 A#2 4 0;248 C#3 2 0;250 C3 2 0;252 C#3 4 0;256 C3 4 0;260 G3 2 0;262 D#3 4 0;266 G3 2 0;268 F3 4 0;272 D#3 2 0;276 A#2 6 0;282 C3 2 0;284 C#3 4 0;288 C3 4 0;292 G3 2 0;294 D#3 4 0;298 G3 2 0;300 D#3 4 0;304 F3 2 0;308 A#2 4 0;312 C#3 2 0;314 C3 2 0;316 C#3 4 0;320 C3 4 0;324 G3 2 0;326 D#3 4 0;330 G3 2 0;332 F3 4 0;336 D#3 2 0;340 A#2 6 0;346 C3 2 0;348 C#3 4 0;352 C3 4 0;356 G3 2 0;358 D#3 4 0;362 G3 2 0;364 D#3 4 0;368 F3 2 0;372 A#2 4 0;376 C#3 2 0;378 C3 2 0;380 C#3 4 0;384 C3 4 0;388 G3 2 0;390 D#3 4 0;394 G3 2 0;396 F3 4 0;400 D#3 2 0;404 A#2 6 0;410 C3 2 0;412 C#3 4 0;416 C3 4 0;420 G3 2 0;422 D#3 4 0;426 G3 2 0;428 D#3 4 0;432 F3 2 0;436 A#2 4 0;440 C#3 2 0;442 C3 2 0;444 C#3 4 0;448 C3 4 0;452 G3 2 0;454 D#3 4 0;458 G3 2 0;460 F3 4 0;464 D#3 2 0;468 A#2 6 0;474 C3 2 0;476 C#3 4 0;480 C3 4 0;484 G3 2 0;486 D#3 4 0;490 G3 2 0;492 D#3 4 0;496 F3 2 0;500 A#2 4 0;504 C#3 2 0;506 C3 2 0;508 C#3 4 0;512 C3 4 0;516 G3 2 0;518 D#3 4 0;522 G3 2 0;524 F3 4 0;528 D#3 2 0;532 A#2 6 0;538 C3 2 0;540 C#3 4 0;544 C3 4 0;548 G3 2 0;550 D#3 4 0;554 G3 2 0;556 D#3 4 0;560 F3 2 0;564 A#2 4 0;568 C#3 2 0;570 C3 2 0;572 C#3 4 0;576 C4 4 0;580 G4 2 0;582 D#4 4 0;586 G4 2 0;588 F4 4 0;592 D#4 2 0;596 A#3 12 0;608 C4 4 0;612 G4 2 0;614 D#4 4 0;618 G4 2 0;620 D#4 4 0;624 F4 2 0;628 A#3 12 0;640 C4 4 0;644 G4 2 0;646 D#4 4 0;650 G4 2 0;652 F4 4 0;656 D#4 2 0;660 A#3 12 0;672 C4 4 0;676 G4 2 0;678 D#4 4 0;682 G4 2 0;684 D#4 4 0;688 F4 2 0;692 A#3 4 0;696 C#4 2 0;698 C4 2 0;700 C#4 4 0;704 C4 4 0;708 G4 2 0;710 D#4 4 0;714 G4 2 0;716 F4 4 0;720 D#4 2 0;724 A#3 6 0;730 C4 2 0;732 C#4 4 0;736 C4 4 0;740 G4 2 0;742 D#4 4 0;746 G4 2 0;748 D#4 4 0;752 F4 2 0;756 A#3 4 0;760 C#4 2 0;762 C4 2 0;764 C#4 4 0;768 C4 4 0;772 G4 2 0;774 D#4 4 0;778 G4 2 0;780 F4 4 0;784 D#4 2 0;788 A#3 6 0;794 C4 2 0;796 C#4 4 0;800 C4 4 0;804 G4 2 0;806 D#4 4 0;810 G4 2 0;812 D#4 4 0;816 F4 2 0;820 A#3 4 0;824 C#4 2 0;826 C4 2 0;828 C#4 4 0;832 C3 4 0;836 G3 2 0;838 D#3 4 0;842 G3 2 0;844 F3 4 0;848 D#3 2 0;852 A#2 6 0;858 C3 2 0;860 C#3 4 0;864 C3 4 0;868 G3 2 0;870 D#3 4 0;874 G3 2 0;876 D#3 4 0;880 F3 2 0;884 A#2 4 0;888 C#3 2 0;890 C3 2 0;892 C#3 4 0;896 C3 4 0;900 G3 2 0;902 D#3 4 0;906 G3 2 0;908 F3 4 0;912 D#3 2 0;916 A#2 6 0;922 C3 2 0;924 C#3 4 0;928 C3 4 0;932 G3 2 0;934 D#3 4 0;938 G3 2 0;940 D#3 4 0;944 F3 2 0;948 A#2 4 0;952 C#3 2 0;954 C3 2 0;956 C#3 4 0;960 C3 4 0;964 G3 2 0;966 D#3 4 0;970 G3 2 0;972 F3 4 0;976 D#3 2 0;980 A#2 6 0;986 C3 2 0;988 C#3 4 0;992 C3 4 0;996 G3 2 0;998 D#3 4 0;1002 G3 2 0;1004 D#3 4 0;1008 F3 2 0;1012 A#2 4 0;1016 C#3 2 0;1018 C3 2 0;1020 C#3 4 0;1024 C3 4 0;1028 G3 2 0;1030 D#3 4 0;1034 G3 2 0;1036 F3 4 0;1040 D#3 2 0;1044 A#2 6 0;1050 C3 2 0;1052 C#3 4 0;1056 C3 4 0;1060 G3 2 0;1062 D#3 4 0;1066 G3 2 0;1068 D#3 4 0;1072 F3 2 0;1076 A#2 4 0;1080 C#3 2 0;1082 C3 2 0;1084 C#3 4 0;1088 C3 4 0;1092 G3 2 0;1094 D#3 4 0;1098 G3 2 0;1100 F3 4 0;1104 D#3 2 0;1108 A#2 6 0;1114 C3 2 0;1116 C#3 4 0;1120 C3 4 0;1124 G3 2 0;1126 D#3 4 0;1130 G3 2 0;1132 D#3 4 0;1136 F3 2 0;1140 A#2 4 0;1144 C#3 2 0;1146 C3 2 0;1148 C#3 4 0;1152 C3 4 0;1156 G3 2 0;1158 D#3 4 0;1162 G3 2 0;1164 F3 4 0;1168 D#3 2 0;1172 A#2 6 0;1178 C3 2 0;1180 C#3 4 0;1184 C3 4 0;1188 G3 2 0;1190 D#3 4 0;1194 G3 2 0;1196 D#3 4 0;1200 F3 2 0;1204 A#2 4 0;1208 C#3 2 0;1210 C3 2 0;1212 C#3 4 0;1216 C3 4 0;1220 G3 2 0;1222 D#3 4 0;1226 G3 2 0;1228 F3 4 0;1232 D#3 2 0;1236 A#2 6 0;1242 C3 2 0;1244 C#3 4 0;1248 C3 4 0;1252 G3 2 0;1254 D#3 4 0;1258 G3 2 0;1260 D#3 4 0;1264 F3 2 0;1268 A#2 4 0;1272 C#3 2 0;1274 C3 2 0;1276 C#3 4 0;1280 C4 4 0;1284 G4 2 0;1286 D#4 4 0;1290 G4 2 0;1292 F4 4 0;1296 D#4 2 0;1300 A#3 12 0;1312 C4 4 0;1316 G4 2 0;1318 D#4 4 0;1322 G4 2 0;1324 D#4 4 0;1328 F4 2 0;1332 A#3 12 0;1344 C4 4 0;1348 G4 2 0;1350 D#4 4 0;1354 G4 2 0;1356 F4 4 0;1360 D#4 2 0;1364 A#3 12 0;1376 C4 4 0;1380 G4 2 0;1382 D#4 4 0;1386 G4 2 0;1388 D#4 4 0;1392 F4 2 0;1396 A#3 4 0;1400 C#4 2 0;1402 C4 2 0;1404 C#4 4 0;1408 C4 4 0;1412 G4 2 0;1414 D#4 4 0;1418 G4 2 0;1420 F4 4 0;1424 D#4 2 0;1428 A#3 6 0;1434 C4 2 0;1436 C#4 4 0;1440 C4 4 0;1444 G4 2 0;1446 D#4 4 0;1450 G4 2 0;1452 D#4 4 0;1456 F4 2 0;1460 A#3 4 0;1464 C#4 2 0;1466 C4 2 0;1468 C#4 4 0;1472 C4 4 0;1476 G4 2 0;1478 D#4 4 0;1482 G4 2 0;1484 F4 4 0;1488 D#4 2 0;1492 A#3 6 0;1498 C4 2 0;1500 C#4 4 0;1504 C4 4 0;1508 G4 2 0;1510 D#4 4 0;1514 G4 2 0;1516 D#4 4 0;1520 F4 2 0;1524 A#3 4 0;1528 C#4 2 0;1530 C4 2 0;1532 C#4 4 0'
# C418 - Haggstrom
#song = '0 G5 9 33;0 C6 9 33;4 D5 1 33;8 C5 1 33;12 C6 9 33;12 G5 9 33;16 D5 1 33;20 C5 1 33;24 A5 1 33;28 C6 9 33;28 G5 9 33;32 D5 1 33;36 C5 1 33;40 C6 9 33;40 G5 9 33;44 D5 1 33;48 C5 1 33;56 C6 9 33;56 G5 9 33;60 D5 1 33;64 C5 1 33;68 C6 9 33;68 G5 9 33;72 D5 1 33;76 C5 1 33;80 A5 1 33;84 C6 9 33;84 G5 9 33;88 D5 1 33;92 C5 1 33;96 C6 9 33;96 G5 9 33;100 D5 1 33;104 C5 1 33;112 C6 4 33;112 G5 4 33;112 C5 1 33;112 C4 8 7;116 B5 4 33;116 D5 1 33;120 C5 1 33;120 G5 3 33;124 C6 4 33;124 G5 3 33;124 C5 1 33;124 C4 8 7;128 B5 4 33;128 D5 1 33;128 G5 4 33;132 C5 4 33;132 E5 4 33;136 A5 4 33;140 C6 4 33;140 G5 4 33;140 C5 1 33;140 C4 8 7;144 D5 1 33;144 B5 4 33;148 C5 1 33;148 G5 3 33;152 C6 4 33;152 G5 4 33;152 C5 1 33;152 C4 8 7;156 D5 4 33;156 B5 8 33;160 C5 6 33;168 C6 4 33;168 G5 3 33;168 C5 1 33;168 E5 1 33;168 C4 8 7;172 B5 4 33;172 G5 3 33;172 D5 4 33;176 C5 3 33;176 G5 3 33;180 C6 4 33;180 G5 3 33;180 E5 1 33;180 C5 1 33;180 C4 8 7;184 B5 4 33;184 G5 3 33;184 D5 4 33;188 C5 3 33;188 E5 3 33;192 A5 4 33;192 D6 4 33;196 C4 8 7;196 C5 7 33;196 G5 4 33;196 E6 4 33;200 D5 4 33;200 G6 4 33;204 C6 4 33;204 C5 3 33;208 E6 4 33;208 G5 4 33;212 G6 4 33;212 D5 4 33;216 C6 4 33;216 C5 4 33;208 C5 7 33;208 C4 7 7;224 A3 8 7;224 E5 4 33;224 B5 4 33;228 D5 4 33;228 A5 4 33;232 E5 3 33;232 A4 4 33;236 B5 4 33;236 E5 4 33;236 A3 8 7;240 D5 4 33;240 A5 4 33;244 G#5 4 33;244 A4 4 33;248 F#4 4 33;252 B5 4 33;252 E5 4 33;252 A3 8 7;256 A5 4 33;256 D5 4 33;260 E5 4 33;260 A4 4 33;264 A3 15 7;264 D5 4 33;268 C#5 4 33;272 A4 4 33;280 A3 8 7;280 E5 4 33;280 B5 4 33;284 D5 4 33;284 A5 4 33;292 B5 4 33;292 E5 4 33;292 A3 8 7;296 D5 4 33;296 A5 4 33;300 G#5 4 33;300 A4 4 33;288 A4 4 33;288 E5 3 33;288 C#6 4 33;300 E6 4 33;304 F#4 4 33;308 E6 3 33;308 C#6 4 33;308 G#6 4 33;308 A3 8 7;320 A3 8 7;320 C#6 4 33;320 E6 3 33;320 G#6 4 33;312 E6 3 33;312 A6 4 33;312 D5 4 33;316 A4 4 33;316 A5 4 33;316 E6 3 33;324 E6 3 33;324 A6 4 33;324 D5 4 33;328 A5 4 33;328 A4 4 33;120 G3 4 7;148 G3 4 7;176 G3 4 7;204 G3 4 7;132 G3 4 7;232 E3 4 7;260 E3 4 7;288 E3 4 7;316 E3 4 7'
# https://onlinesequencer.net/1140127 C418 - Sweden
#song = '64 E3 4 13;64 E4 4 13;64 G4 4 13;64 B4 4 13;68 F#3 4 13;76 B3 4 13;80 A3 4 13;84 G3 4 13;88 D3 4 13;72 D5 4 13;72 A4 4 13;72 F#5 4 13;72 G3 4 13;80 F#4 4 13;80 A4 4 13;80 C#5 4 13;88 A4 4 13;88 C#5 4 13;88 E5 4 13;96 E3 4 13;96 E4 4 13;96 G4 4 13;96 B4 4 13;100 F#3 4 13;108 B3 4 13;112 A3 4 13;116 G3 4 13;120 D3 4 13;104 D5 4 13;104 A4 4 13;104 F#5 4 13;104 G3 4 13;112 F#4 4 13;112 A4 4 13;112 C#5 4 13;120 A4 4 13;120 C#5 4 13;120 E5 4 13;0 E3 4 13;4 F#3 4 13;12 B3 4 13;16 A3 4 13;20 G3 4 13;24 D3 4 13;8 G3 4 13;32 E3 4 13;36 F#3 4 13;44 B3 4 13;48 A3 4 13;52 G3 4 13;56 D3 4 13;40 G3 4 13;0 E4 4 13;0 G4 4 13;8 A4 4 13;8 D5 4 13;16 A4 4 13;16 F#4 4 13;24 A4 4 13;24 C#5 4 13;32 E4 4 13;32 G4 4 13;40 A4 4 13;40 D5 4 13;48 A4 4 13;48 F#4 4 13;56 A4 4 13;56 C#5 4 13;128 E3 4 13;128 E4 4 13;128 G4 4 13;128 B4 4 13;132 F#3 4 13;140 B3 4 13;144 A3 4 13;148 G3 4 13;152 D3 4 13;136 D5 4 13;136 A4 4 13;136 F#5 4 13;136 G3 4 13;144 F#4 4 13;144 A4 4 13;144 C#5 4 13;152 A4 4 13;152 C#5 4 13;152 E5 4 13;132 A5 2 13;134 B5 2 13;142 D5 1 13;143 E5 1 13;150 F#5 1 13;151 A5 1 13;160 E3 4 13;160 E4 2 13;160 G4 2 13;160 B4 2 13;164 F#3 4 13;172 B3 4 13;176 A3 4 13;180 G3 4 13;184 D3 4 13;168 D5 4 13;168 A4 4 13;168 F#5 4 13;168 G3 4 13;176 F#4 4 13;176 A4 4 13;176 C#5 4 13;184 A4 4 13;184 C#5 4 13;184 E5 4 13;162 D6 2 13;164 B5 2 13;166 A5 2 13;174 D5 1 13;175 E5 1 13;182 A5 1 13;183 F#5 1 13'
# https://onlinesequencer.net/1194533 C418 - Wet Hands
#song = '0 A3 1 32;4 E4 1 32;8 A4 1 32;12 B4 1 32;16 C#5 1 32;20 B4 1 32;24 A4 1 32;28 E4 1 32;32 D4 1 32;36 F#4 1 32;40 C#5 1 32;44 E5 1 32;48 C#5 1 32;52 A4 1 32;64 A3 1 32;68 E4 1 32;72 A4 1 32;76 B4 1 32;80 C#5 1 32;84 B4 1 32;88 A4 1 32;92 E4 1 32;96 D4 1 32;100 F#4 1 32;104 C#5 1 32;108 E5 1 32;112 C#5 1 32;116 A4 1 32;128 A3 1 32;132 E4 1 32;136 A4 1 32;140 B4 1 32;144 C#5 1 32;148 B4 1 32;152 A4 1 32;156 E4 1 32;160 D4 1 32;164 F#4 1 32;168 C#5 1 32;172 E5 1 32;176 C#5 1 32;180 A4 1 32;128 G#5 1 32;152 A5 1 32;160 F#5 1 32;184 E5 1 32;192 G#5 1 32;188 F#5 1 32;192 A3 1 32;196 E4 1 32;200 A4 1 32;204 B4 1 32;208 C#5 1 32;212 B4 1 32;216 A4 1 32;220 E4 1 32;224 D4 1 32;228 F#4 1 32;232 C#5 1 32;236 E5 1 32;240 C#5 1 32;244 A5 1 32;216 B5 1 32;220 C#6 1 32;228 F#5 1 32;248 C#6 1 32;252 E6 1 32;256 G6 1 32;256 G3 1 32;260 B3 1 32;264 D4 1 32;268 F#4 1 32;272 A4 1 32;268 F#6 1 32;244 A4 1 32;272 D6 1 32;276 F#4 1 32;280 D4 1 32;284 B3 1 32;288 G3 1 32;292 B3 1 32;296 D4 1 32;300 F#4 1 32;304 A4 1 32;280 A5 1 32;284 B5 1 32;320 G6 1 32;320 G3 1 32;324 B3 1 32;328 D4 1 32;332 F#4 1 32;328 F#6 1 32;336 D6 1 32;344 A5 1 32;348 B5 1 32;340 F#4 1 32;344 D4 1 32;348 B3 1 32;352 G3 1 32;356 B3 1 32;360 D4 1 32;364 F#4 1 32;368 A4 1 32;336 A4 1 32;376 A5 1 32;384 E5 1 32;384 A3 1 32;388 E4 1 32;392 A4 1 32;396 B4 1 32;400 C#5 1 32;404 B4 1 32;408 A4 1 32;412 E4 1 32;416 A3 1 32;432 C#5 1 32;436 E5 1 32;440 A5 1 32;444 C#6 1 32;460 B3 1 32;464 D4 1 32;468 F#4 1 32;472 A4 1 32;476 C#5 1 32;459 F#5 1 32;459 B5 1 32;459 D6 1 32;472 C#6 1 32;476 A5 1 32;484 E5 1 32;484 E6 1 32;488 F#6 1 32;488 F#5 1 32;492 B3 1 32;496 D4 1 32;500 F#4 1 32;504 A4 1 32;508 C#5 1 32;496 D6 1 32;516 B5 1 32;520 C#6 1 32;524 D6 1 32;532 C#6 1 32;536 D6 1 32;544 F#6 1 32;555 C#6 8 32;524 G3 1 32;528 B3 1 32;532 D4 1 32;536 F#4 1 32;540 A4 1 32;555 A5 8 32;555 E5 8 32;555 A4 9 32;556 E4 8 32;556 C#4 8 32;556 A3 8 32;572 B5 1 32;576 A5 1 32;580 B5 1 32;580 E3 1 32;584 G#3 1 32;588 B3 1 32;592 E4 1 32;596 G#4 1 32;600 E4 1 32;604 B3 1 32;608 G#3 1 32;612 E3 1 32;616 G#3 1 32;620 B3 1 32;624 E4 1 32;628 G#4 1 32;632 E4 1 32;636 A3 1 32;644 G3 1 32;648 B3 1 32;652 D4 1 32;656 F#4 1 32;644 G6 1 32;648 F#6 1 32;652 E6 1 32;656 D6 1 32;660 E6 1 32;664 D6 1 32;668 E6 1 32;672 F#6 1 32;660 A4 1 32;664 F#4 1 32;668 D4 1 32;672 B3 1 32;676 A3 1 32;680 C#4 1 32;684 E4 1 32;688 A4 1 32;692 C#5 1 32;696 A4 1 32;700 E4 1 32;680 E6 1 32;692 A6 1 32;704 C#4 1 32;708 E3 1 32;712 G#3 1 32;716 B3 1 32;720 E4 1 32;724 G#4 1 32;708 G#6 1 32;712 E6 1 32;716 B5 1 32;720 G#5 1 32;724 E5 1 32;738 E3 1 32;742 G#3 1 32;746 B3 1 32;750 E4 1 32;754 G#4 1 32;738 B5 1 32;742 G#5 1 32;746 E5 1 32;750 B4 1 32;766 E3 1 32;770 G#3 1 32;774 B3 1 32;778 E4 1 32;782 B3 1 32;794 E3 1 32;798 G#3 1 32;802 B3 1 32;806 E4 1 32;810 E4 1 32;818 A5 1 32;826 E5 1 32;826 A3 1 32;830 E4 1 32;834 A4 1 32;838 B4 1 32;842 C#5 1 32;846 B4 1 32;850 A4 1 32;854 E4 1 32;858 D4 1 32;862 F#4 1 32;866 C#5 1 32;870 E5 1 32;874 C#5 1 32;878 A4 1 32;882 A5 1 32;890 A5 1 32;890 A3 1 32;894 E4 1 32;898 A4 1 32;902 B4 1 32;906 C#5 1 32;910 B4 1 32;914 A4 1 32;918 E4 1 32;922 D4 1 32;926 F#4 1 32;930 C#5 1 32;934 E5 1 32;938 C#5 1 32;942 A4 1 32;420 C#4 1 32;424 E4 1 32;428 A4 1 32;953 A3 1 32;953 G#5 1 32;957 E4 1 32;961 A4 1 32;965 B4 1 32;969 C#5 1 32;974 B4 1 32;979 A4 1 32;985 E4 1 32;995 A5 1 32;995 A3 1 32;995 C#4 1 32;995 E4 1 32;995 A4 1 32;995 C#5 1 32;995 E5 1 32;1015 B7 1 5'
# https://onlinesequencer.net/1865394 C418 - Calm 3
#song = '0 E4 1 8;0 G4 1 8;8 A4 1 8;8 D5 1 8;16 F#4 1 8;16 A4 1 8;24 A4 1 8;24 C#5 1 8;32 E4 1 8;32 G4 1 8;32 B4 1 8;40 A4 1 8;40 D5 1 8;40 F#5 1 8;48 F#4 1 8;48 A4 1 8;48 C#5 1 8;56 A4 1 8;56 C#5 1 8;56 E5 1 8;64 E4 1 8;64 G4 1 8;64 B4 1 8;72 A4 1 8;72 D5 1 8;72 F#5 1 8;80 F#4 1 8;80 A4 1 8;80 C#5 1 8;88 A4 1 8;88 C#5 1 8;88 E5 1 8;96 E4 1 8;96 G4 1 8;96 B4 1 8;100 A5 1 8;102 B5 1 8;104 A4 1 8;104 D5 1 8;104 F#5 1 8;110 D5 1 8;111 E5 1 8;112 F#4 1 8;112 A4 1 8;112 C#5 1 8;118 F#5 1 8;119 A5 1 8;120 A4 1 8;120 C#5 1 8;120 E5 1 8;128 E4 1 8;128 G4 1 8;128 B4 1 8;130 D6 1 8;132 B5 1 8;134 A5 1 8;136 A4 1 8;136 D5 1 8;136 F#5 1 8;142 D5 1 8;143 E5 1 8;144 F#4 1 8;144 A4 1 8;144 C#5 1 8;150 A5 1 8;151 F#5 1 8;152 A4 1 8;152 C#5 1 8;152 E5 1 8;160 E4 1 8;160 G4 1 8;160 B4 1 8;164 A5 1 8;166 B5 1 8;168 D5 1 8;168 F#5 1 8;168 D6 1 8;174 D5 1 8;174 F#6 1 8;175 E5 1 8;175 E6 1 8;176 D5 1 8;176 C#6 1 8;182 F#5 1 8;182 D6 1 8;183 C#6 1 8;184 A4 1 8;184 C#5 1 8;184 E5 1 8;184 A5 1 8;192 E4 1 8;192 G4 1 8;192 B4 1 8;196 B5 1 8;198 A5 1 8;200 A4 1 8;200 D5 1 8;200 F#5 1 8;206 D5 1 8;207 E5 1 8;208 F#4 1 8;208 A4 1 8;208 C#5 1 8;214 F#5 1 8;215 A5 1 8;216 A4 1 8;216 C#5 1 8;216 E5 1 8;224 E4 1 8;224 G4 1 8;224 B4 1 8;228 A5 1 8;230 B5 1 8;232 A4 1 8;232 D5 1 8;232 F#5 1 8;238 D5 1 8;239 E5 1 8;240 F#4 1 8;240 A4 1 8;240 C#5 1 8;244 F#6 1 8;246 F#5 1 8;247 A5 1 8;248 A4 1 8;248 C#5 1 8;248 E5 1 8;256 B4 1 8;256 D5 1 8;256 F#5 1 8;262 B5 1 8;263 A5 1 8;264 G#4 1 8;264 B4 1 8;264 E5 1 8;268 E5 1 8;270 D5 1 8;272 E4 1 8;272 A4 1 8;272 C#5 1 8;278 D5 1 8;279 E5 1 8;280 D4 1 8;280 G4 1 8;280 B4 1 8;288 B4 1 8;288 D5 1 8;288 F#5 1 8;288 D6 1 8;294 B5 1 8;295 A5 1 8;296 G#4 1 8;296 B4 1 8;296 E5 1 8;300 E5 1 8;302 D5 1 8;302 E6 1 8;304 E4 1 8;304 A4 1 8;304 C#5 1 8;304 C#6 1 8;308 D6 1 8;310 F#6 1 8;312 B4 1 8;312 B5 1 8;0 E3 1 8;4 F#3 1 8;8 G3 1 8;12 B3 1 8;16 A3 1 8;20 G3 1 8;24 D3 1 8;32 E3 1 8;36 F#3 1 8;40 G3 1 8;44 B3 1 8;48 A3 1 8;52 G3 1 8;56 D3 1 8;64 E3 1 8;68 F#3 1 8;72 G3 1 8;76 B3 1 8;80 A3 1 8;84 G3 1 8;88 D3 1 8;96 E3 1 8;100 F#3 1 8;104 G3 1 8;108 B3 1 8;112 A3 1 8;116 G3 1 8;120 D3 1 8;128 E3 1 8;132 F#3 1 8;136 G3 1 8;140 B3 1 8;144 A3 1 8;148 G3 1 8;152 D3 1 8;160 E3 1 8;164 F#3 1 8;168 G3 1 8;168 A4 1 8;172 B3 1 8;176 A3 1 8;176 F#4 1 8;176 A4 1 8;180 G3 1 8;184 D3 1 8;192 E3 1 8;196 F#3 1 8;200 G3 1 8;204 B3 1 8;208 A3 1 8;212 G3 1 8;216 D3 1 8;224 E3 1 8;228 F#3 1 8;232 G3 1 8;236 B3 1 8;240 A3 1 8;244 G3 1 8;248 D3 1 8;256 B2 1 8;256 B3 1 8;264 E3 1 8;264 E4 1 8;272 A2 1 8;272 A3 1 8;280 G2 1 8;280 G3 1 8;288 B2 1 8;288 B3 1 8;296 E3 1 8;296 E4 1 8;304 A2 1 8;304 A3 1 8;312 G2 1 8;312 D4 1 8;312 G4 1 8;312 B4 1 8;0 E4 1 26;0 G4 1 26;8 A4 1 26;8 D5 1 26;16 F#4 1 26;16 A4 1 26;24 A4 1 26;24 C#5 1 26;32 E4 1 26;32 G4 1 26;32 B4 1 26;40 A4 1 26;40 D5 1 26;40 F#5 1 26;48 F#4 1 26;48 A4 1 26;48 C#5 1 26;56 A4 1 26;56 C#5 1 26;56 E5 1 26;64 E4 1 26;64 G4 1 26;64 B4 1 26;72 A4 1 26;72 D5 1 26;72 F#5 1 26;80 F#4 1 26;80 A4 1 26;80 C#5 1 26;88 A4 1 26;88 C#5 1 26;88 E5 1 26;96 E4 1 26;96 G4 1 26;96 B4 1 26;100 A5 1 26;102 B5 1 26;104 A4 1 26;104 D5 1 26;104 F#5 1 26;110 D5 1 26;111 E5 1 26;112 F#4 1 26;112 A4 1 26;112 C#5 1 26;118 F#5 1 26;119 A5 1 26;120 A4 1 26;120 C#5 1 26;120 E5 1 26;128 E4 1 26;128 G4 1 26;128 B4 1 26;130 D6 1 26;132 B5 1 26;134 A5 1 26;136 A4 1 26;136 D5 1 26;136 F#5 1 26;142 D5 1 26;143 E5 1 26;144 F#4 1 26;144 A4 1 26;144 C#5 1 26;150 A5 1 26;151 F#5 1 26;152 A4 1 26;152 C#5 1 26;152 E5 1 26;160 E4 1 26;160 G4 1 26;160 B4 1 26;164 A5 1 26;166 B5 1 26;168 D5 1 26;168 F#5 1 26;168 D6 1 26;174 D5 1 26;174 F#6 1 26;175 E5 1 26;175 E6 1 26;176 D5 1 26;176 C#6 1 26;182 F#5 1 26;182 D6 1 26;183 C#6 1 26;184 A4 1 26;184 C#5 1 26;184 E5 1 26;184 A5 1 26;192 E4 1 26;192 G4 1 26;192 B4 1 26;196 B5 1 26;198 A5 1 26;200 A4 1 26;200 D5 1 26;200 F#5 1 26;206 D5 1 26;207 E5 1 26;208 F#4 1 26;208 A4 1 26;208 C#5 1 26;214 F#5 1 26;215 A5 1 26;216 A4 1 26;216 C#5 1 26;216 E5 1 26;224 E4 1 26;224 G4 1 26;224 B4 1 26;228 A5 1 26;230 B5 1 26;232 A4 1 26;232 D5 1 26;232 F#5 1 26;238 D5 1 26;239 E5 1 26;240 F#4 1 26;240 A4 1 26;240 C#5 1 26;244 F#6 1 26;246 F#5 1 26;247 A5 1 26;248 A4 1 26;248 C#5 1 26;248 E5 1 26;256 B4 1 26;256 D5 1 26;256 F#5 1 26;262 B5 1 26;263 A5 1 26;264 G#4 1 26;264 B4 1 26;264 E5 1 26;268 E5 1 26;270 D5 1 26;272 E4 1 26;272 A4 1 26;272 C#5 1 26;278 D5 1 26;279 E5 1 26;280 D4 1 26;280 G4 1 26;280 B4 1 26;288 B4 1 26;288 D5 1 26;288 F#5 1 26;288 D6 1 26;294 B5 1 26;295 A5 1 26;296 G#4 1 26;296 B4 1 26;296 E5 1 26;300 E5 1 26;302 D5 1 26;302 E6 1 26;304 E4 1 26;304 A4 1 26;304 C#5 1 26;304 C#6 1 26;308 D6 1 26;310 F#6 1 26;312 B4 1 26;312 B5 1 26;0 E3 1 26;4 F#3 1 26;8 G3 1 26;12 B3 1 26;16 A3 1 26;20 G3 1 26;24 D3 1 26;32 E3 1 26;36 F#3 1 26;40 G3 1 26;44 B3 1 26;48 A3 1 26;52 G3 1 26;56 D3 1 26;64 E3 1 26;68 F#3 1 26;72 G3 1 26;76 B3 1 26;80 A3 1 26;84 G3 1 26;88 D3 1 26;96 E3 1 26;100 F#3 1 26;104 G3 1 26;108 B3 1 26;112 A3 1 26;116 G3 1 26;120 D3 1 26;128 E3 1 26;132 F#3 1 26;136 G3 1 26;140 B3 1 26;144 A3 1 26;148 G3 1 26;152 D3 1 26;160 E3 1 26;164 F#3 1 26;168 G3 1 26;168 A4 1 26;172 B3 1 26;176 A3 1 26;176 F#4 1 26;176 A4 1 26;180 G3 1 26;184 D3 1 26;192 E3 1 26;196 F#3 1 26;200 G3 1 26;204 B3 1 26;208 A3 1 26;212 G3 1 26;216 D3 1 26;224 E3 1 26;228 F#3 1 26;232 G3 1 26;236 B3 1 26;240 A3 1 26;244 G3 1 26;248 D3 1 26;256 B2 1 26;256 B3 1 26;264 E3 1 26;264 E4 1 26;272 A2 1 26;272 A3 1 26;280 G2 1 26;280 G3 1 26;288 B2 1 26;288 B3 1 26;296 E3 1 26;296 E4 1 26;304 A2 1 26;304 A3 1 26;312 G2 1 26;312 D4 1 26;312 G4 1 26;312 B4 1 26'
# https://onlinesequencer.net/1696155 Undertale - Heartache
song = '0 A#3 1 0;0 F#3 2 0;1 C#4 1 0;2 C4 1 0;3 C#4 1 0;4 A#3 1 0;6 A#3 1 0;6 F#3 2 0;7 C4 1 0;8 C#4 1 0;9 G#4 1 0;10 F4 1 0;12 A#3 1 0;12 F#3 2 0;13 C#4 1 0;14 C4 1 0;15 C#4 1 0;16 A#3 1 0;18 A#3 1 0;18 F#3 2 0;19 C4 1 0;20 C#4 1 0;21 G#4 1 0;22 F4 1 0;24 A#3 1 0;24 F3 2 0;25 C#4 1 0;26 C4 1 0;27 C#4 1 0;28 A#3 1 0;30 A#3 1 0;31 C4 1 0;30 F3 2 0;32 C#4 1 0;33 G#4 1 0;34 F4 1 0;36 F3 2 0;36 D#5 2 0;38 C6 2 0;38 G#3 2 0;40 C4 2 0;40 A#5 2 0;42 G#5 1 0;42 D#4 2 0;43 F#5 1 0;44 C4 1 0;45 G#3 1 0;44 F5 2 0;46 D#4 1 0;47 C4 1 0;46 G#5 2 0;48 A#3 1 0;48 F#3 2 0;49 C#4 1 0;50 C4 1 0;51 C#4 1 0;52 A#3 1 0;54 A#3 1 0;55 C4 1 0;54 F#3 2 0;56 C#4 1 0;57 G#4 1 0;58 F4 1 0;60 A#3 1 0;60 F#3 2 0;61 C#4 1 0;48 A#5 15 0;62 C4 1 0;63 C#4 1 0;64 A#3 1 0;66 A#3 1 0;66 F#3 2 0;67 C4 1 0;68 C#4 1 0;69 G#4 1 0;70 F4 1 0;72 A#3 1 0;72 F3 2 0;73 C#4 1 0;74 C4 1 0;75 C#4 1 0;76 A#3 1 0;78 A#3 1 0;78 F3 2 0;79 C4 1 0;80 C#4 1 0;81 G#4 1 0;82 F4 1 0;84 D#5 2 0;84 D#6 2 0;84 G#3 2 0;86 C7 2 0;86 D#3 2 0;86 C6 2 0;88 A#6 2 0;88 F3 2 0;88 A#5 2 0;90 G#5 1 0;90 G#6 1 0;90 G#3 2 0;91 F#6 1 0;91 F#5 1 0;92 F5 2 0;92 C4 2 0;92 F6 2 0;94 D#4 2 0;94 G#6 2 0;94 G#5 2 0;96 F#3 2 0;98 C4 1 0;99 C#4 1 0;100 A#3 1 0;101 F3 1 0;96 A#5 8 0;102 F#3 2 0;104 C4 1 0;104 F5 2 0;105 C#4 1 0;106 A#3 1 0;106 G#5 2 0;107 D#4 1 0;108 G#3 2 0;110 D#3 1 0;108 A#5 4 0;111 F3 1 0;112 G#3 1 0;112 C#6 2 0;113 F3 1 0;114 D#6 1 0;114 D#3 2 0;115 C#6 1 0;116 F3 2 0;116 C6 2 0;118 G#3 2 0;118 C#6 2 0;120 A#3 1 0;121 A#4 1 0;122 D#4 1 0;123 F4 1 0;124 C#4 1 0;125 G#3 1 0;126 A#3 1 0;127 A#4 1 0;128 D#4 1 0;129 F4 1 0;130 C#4 1 0;131 G#3 1 0;132 A#3 1 0;133 A#4 1 0;120 A#5 15 0;134 D#4 1 0;135 F4 1 0;136 C#4 1 0;137 F3 1 0;138 G#3 2 0;140 D#3 2 0;142 G#3 2 0;144 A#4 1 0;144 A#3 1 0;144 F#3 1 0;145 C#5 1 0;145 C#4 1 0;146 C5 1 0;146 C4 1 0;147 C#5 1 0;147 C#4 1 0;148 A#4 1 0;148 A#3 1 0;150 A#4 1 0;150 F#3 1 0;150 A#3 1 0;151 C4 1 0;151 C5 1 0;152 C#4 1 0;152 C#5 1 0;153 G#4 1 0;154 F4 1 0;154 F5 1 0;156 A#4 1 0;156 A#3 1 0;156 F#3 1 0;157 C#5 1 0;157 C#4 1 0;158 C5 1 0;158 C4 1 0;159 C#5 1 0;159 C#4 1 0;160 A#3 1 0;160 A#4 1 0;162 A#4 1 0;162 A#3 1 0;162 F#3 1 0;163 C5 1 0;163 C4 1 0;164 C#5 1 0;164 C#4 1 0;165 G#4 1 0;166 F5 1 0;166 F4 1 0;168 A#3 1 0;168 F3 1 0;168 A#4 1 0;169 C#5 1 0;169 C#4 1 0;170 C5 1 0;170 C4 1 0;171 C#4 1 0;171 C#5 1 0;172 A#3 1 0;172 A#4 1 0;174 A#4 1 0;174 A#3 1 0;174 F3 1 0;175 C5 1 0;175 C4 1 0;176 C#5 1 0;176 C#4 1 0;177 G#4 1 0;178 F4 1 0;178 F5 1 0;180 F3 2 0;180 D#6 2 0;180 F4 2 0;182 G#4 2 0;182 C7 2 0;182 G#3 2 0;184 C4 2 0;184 A#6 2 0;184 C5 2 0;186 G#6 1 0;186 D#5 2 0;186 D#4 2 0;187 F#6 1 0;188 C5 1 0;188 C4 1 0;189 G#3 1 0;189 G#4 1 0;188 F6 2 0;190 D#4 1 0;190 D#5 1 0;191 C5 1 0;190 G#6 2 0;191 C4 1 0;192 A#4 1 0;192 A#3 1 0;192 F#3 1 0;193 C#5 1 0;193 C#4 1 0;194 C4 1 0;194 C5 1 0;195 C#4 1 0;195 C#5 1 0;196 A#4 1 0;196 A#3 1 0;198 F#3 1 0;198 A#3 1 0;198 A#4 1 0;199 C5 1 0;199 C4 1 0;200 C#4 1 0;200 C#5 1 0;201 G#4 1 0;202 F4 1 0;202 F5 1 0;204 A#4 1 0;204 F#3 1 0;204 A#3 1 0;205 C#4 1 0;205 C#5 1 0;206 C5 1 0;192 A#6 15 0;206 C4 1 0;207 C#4 1 0;207 C#5 1 0;208 A#3 1 0;208 A#4 1 0;210 A#3 1 0;210 F#3 1 0;210 A#4 1 0;211 C5 1 0;211 C4 1 0;212 C#5 1 0;212 C#4 1 0;213 G#4 1 0;214 F5 1 0;214 F4 1 0;216 D#5 1 0;216 D#4 1 0;217 F4 1 0;217 F5 1 0;218 G#5 1 0;218 G#4 1 0;219 A#4 1 0;219 A#5 1 0;220 C6 1 0;220 C5 1 0;221 C#6 1 0;221 C#5 1 0;222 D#6 1 0;222 D#5 1 0;223 C#6 1 0;223 C#5 1 0;224 C6 1 0;224 C5 1 0;225 A#5 1 0;225 A#4 1 0;226 G#5 1 0;226 G#4 1 0;227 F4 1 0;227 F5 1 0;228 G#5 2 0;228 G#4 2 0;228 D#6 2 0;228 G#3 2 0;230 C7 2 0;230 D#3 2 0;230 D#4 2 0;230 D#6 2 0;232 F4 2 0;232 C#6 2 0;232 F3 2 0;232 A#6 2 0;234 G#6 1 0;234 C6 1 0;234 G#3 2 0;234 G#4 2 0;235 A#5 1 0;235 F#6 1 0;236 G#5 2 0;236 F6 2 0;236 C5 2 0;236 C4 2 0;238 G#6 2 0;238 F5 2 0;238 D#4 2 0;238 D#5 2 0;240 F#3 2 0;242 C4 1 0;243 C#4 1 0;244 A#3 1 0;245 F3 1 0;240 A#5 8 0;246 F#3 2 0;240 F#5 8 0;248 C4 1 0;248 F5 2 0;248 C5 2 0;249 C#4 1 0;250 A#3 1 0;250 G#5 2 0;250 D#5 2 0;251 D#4 1 0;252 G#3 2 0;254 D#3 1 0;252 F5 4 0;252 A#5 4 0;255 F3 1 0;256 G#3 1 0;256 F5 2 0;256 C#6 2 0;257 F3 1 0;258 F#5 1 0;258 D#6 1 0;258 D#3 2 0;259 C#6 1 0;259 F5 1 0;260 D#5 2 0;260 C6 2 0;260 F3 2 0;262 G#3 2 0;262 C#6 2 0;262 F5 2 0;264 A#3 1 0;265 A#4 1 0;266 D#4 1 0;267 F4 1 0;268 C#4 1 0;269 G#3 1 0;270 A#3 1 0;271 A#4 1 0;272 D#4 1 0;273 F4 1 0;274 C#4 1 0;275 G#3 1 0;276 A#3 1 0;277 A#4 1 0;278 D#4 1 0;264 A#5 15 0;264 F5 15 0;279 F4 1 0;280 C#4 1 0;281 F3 1 0;282 G#3 2 0;282 F5 2 0;282 A#5 2 0;284 C#6 2 0;284 G#5 2 0;284 D#3 2 0;286 D#6 2 0;286 A#5 2 0;286 F3 2 0;288 D#3 2 0;290 G#3 1 0;291 A#3 1 0;292 F#3 1 0;293 C#3 1 0;294 D#3 2 0;288 F6 8 0;288 G#5 8 0;296 G#3 1 0;296 G#5 2 0;296 C#6 2 0;297 A#3 1 0;298 D#6 2 0;298 A#5 2 0;298 C4 2 0;300 F3 2 0;302 C4 1 0;300 G#5 4 0;300 F6 4 0;303 G#4 1 0;304 F4 1 0;304 D#6 2 0;304 A#5 2 0;305 C4 1 0;306 G#5 1 0;306 C#6 1 0;306 F3 1 0;307 A#5 1 0;307 D#6 1 0;307 F4 1 0;308 G#3 1 0;309 F4 1 0;308 C6 2 0;308 D#5 2 0;310 A#3 1 0;311 G#4 1 0;310 G#5 2 0;310 F5 2 0;312 F#3 1 0;313 F#4 1 0;314 C#4 1 0;315 D#4 1 0;316 F4 1 0;317 F3 1 0;318 F#3 1 0;312 A#5 8 0;312 F5 8 0;319 F#4 1 0;320 C#4 1 0;320 C#5 2 0;321 D#4 1 0;322 F4 1 0;322 D#5 2 0;323 F#3 1 0;324 F3 1 0;325 F4 1 0;326 C4 1 0;324 F5 4 0;327 G#4 1 0;328 F4 1 0;328 D#5 2 0;329 C4 1 0;330 C#5 1 0;330 F3 1 0;331 F4 1 0;331 D#5 1 0;332 G#3 1 0;332 C5 2 0;333 F4 1 0;334 A#3 1 0;335 G#4 1 0;334 G#5 2 0;336 F#3 1 0;337 F#4 1 0;338 C#4 1 0;339 D#4 1 0;340 F4 1 0;341 F3 1 0;342 F#3 1 0;343 F#4 1 0;336 F5 8 0;344 C#4 1 0;344 G#5 2 0;344 C#6 2 0;345 D#4 1 0;346 F4 1 0;346 A#5 2 0;346 D#6 2 0;347 F#3 1 0;348 G#3 1 0;349 D#3 1 0;350 C4 1 0;348 G#5 4 0;348 F6 4 0;351 G#3 1 0;352 D#4 1 0;352 D#6 2 0;352 A#5 2 0;353 C4 1 0;354 C#6 1 0;354 G#5 1 0;354 G#4 1 0;355 D#6 1 0;355 G#3 1 0;355 A#5 1 0;356 C6 2 0;356 D#5 2 0;356 D#4 2 0;358 G#5 2 0;358 F5 2 0;358 C4 2 0;360 A#3 1 0;361 F3 1 0;362 C4 1 0;363 G#3 1 0;364 C#4 1 0;365 F3 1 0;366 B3 1 0;367 C#4 1 0;368 D#4 1 0;369 F#3 1 0;370 A#3 1 0;371 C#4 1 0;372 B3 1 0;373 F#3 1 0;374 C#4 1 0;360 F5 15 0;360 A#5 15 0;375 F#3 1 0;376 D#4 1 0;377 F#3 1 0;378 F#4 1 0;379 B3 1 0;380 G#4 1 0;381 B3 1 0;382 C#5 1 0;383 G#4 1 0;384 B3 1 0;384 D#6 1 0;384 D#5 1 0;385 F#3 1 0;385 D#6 2 0;385 D#5 2 0;386 C#4 1 0;387 F#3 1 0;388 D#4 1 0;387 D#6 2 0;387 D#5 2 0;389 D#6 1 0;389 F#3 1 0;389 D#5 1 0;390 B3 1 0;391 F#3 1 0;390 D#5 2 0;390 D#6 2 0;392 C#4 1 0;393 F#3 1 0;392 D#6 2 0;392 D#5 2 0;394 D#4 1 0;394 D#6 2 0;394 D#5 2 0;395 F#3 1 0;396 D#6 1 0;396 D#5 1 0;396 F#4 1 0;397 B3 1 0;398 G#4 1 0;397 D#5 2 0;397 D#6 2 0;399 C#4 1 0;399 D#6 2 0;399 D#5 2 0;400 C#5 1 0;401 G#4 1 0;401 D#6 1 0;401 D#5 1 0;402 B4 1 0;403 F#4 1 0;402 D#5 2 0;402 D#6 2 0;404 F#5 1 0;404 D#5 2 0;404 D#6 2 0;405 C#5 1 0;406 B5 1 0;406 D#5 2 0;406 D#6 2 0;407 F#5 1 0;408 B3 1 0;409 D#4 1 0;410 C#4 1 0;411 D#4 1 0;412 B3 1 0;414 B3 1 0;415 C#4 1 0;416 D#4 1 0;417 B4 1 0;418 F#4 1 0;420 B3 1 0;421 D#4 1 0;422 C#4 1 0;423 D#4 1 0;424 B3 1 0;426 B3 1 0;427 C#4 1 0;428 D#4 1 0;429 B4 1 0;430 F#4 1 0;432 F6 1 0;432 A#3 1 0;432.6600036621094 F#6 1 0;433 C#4 1 0;433.3299865722656 G6 1 0;434.010009765625 C4 1 0;434.010009765625 C#6 2 0;434.010009765625 G#6 2 0;435.010009765625 C#4 1 0;436.010009765625 A#3 1 0;436.010009765625 C#6 2 0;436.010009765625 G#6 2 0;438.010009765625 A#3 1 0;439.010009765625 C4 1 0;438.010009765625 C6 2 0;438.010009765625 G6 2 0;440.010009765625 C#4 1 0;440.010009765625 C6 2 0;440.010009765625 G6 2 0;441.010009765625 G#4 1 0;442.010009765625 F4 1 0;442.010009765625 G6 2 0;442.010009765625 C6 2 0;444.010009765625 F6 1 0;444.6700134277344 F#6 1 0;444.010009765625 F3 2 0;445.3399963378906 G6 1 0;446.0199890136719 C#6 2 0;446.0199890136719 G#3 2 0;446.0199890136719 G#6 2 0;448.0199890136719 G#6 2 0;448.0199890136719 C#6 2 0;448.0199890136719 C4 2 0;450.0199890136719 D#4 2 0;450.0199890136719 C6 2 0;450.0199890136719 G#6 2 0;452.0199890136719 G6 1 0;452.0199890136719 A#5 1 0;452.0199890136719 C4 1 0;453.0199890136719 G#3 1 0;453.0199890136719 F6 1 0;453.0199890136719 G#5 1 0;454.0199890136719 D#4 1 0;454.0199890136719 D#6 2 0;454.0199890136719 G#5 2 0;455.0199890136719 C4 1 0;456.0199890136719 B3 1 0;457.0199890136719 D#4 1 0;458.0199890136719 C#4 1 0;459.0199890136719 D#4 1 0;460.0199890136719 B3 1 0;462.0199890136719 B3 1 0;463.0199890136719 C#4 1 0;464.0199890136719 D#4 1 0;465.0199890136719 B4 1 0;466.0199890136719 F#4 1 0;468.0199890136719 B3 1 0;469.0199890136719 D#4 1 0;456.0199890136719 A#5 15 0;456.0199890136719 F6 15 0;470.0199890136719 C#4 1 0;471.0199890136719 D#4 1 0;472.0199890136719 B3 1 0;474.0199890136719 B3 1 0;475.0199890136719 C#4 1 0;476.0199890136719 D#4 1 0;477.0199890136719 B4 1 0;478.0199890136719 F#4 1 0;480.0199890136719 A#3 1 0;480.0199890136719 F6 1 0;480.67999267578125 F#6 1 0;481.0199890136719 C#4 1 0;481.3500061035156 G6 1 0;482.0299987792969 C4 1 0;482.0299987792969 C#6 2 0;482.0299987792969 G#6 2 0;483.0299987792969 C#4 1 0;484.0299987792969 A#3 1 0;484.0299987792969 C#6 2 0;484.0299987792969 G#6 2 0;486.0299987792969 A#3 1 0;487.0299987792969 C4 1 0;486.0299987792969 C6 2 0;486.0299987792969 G6 2 0;488.0299987792969 C#4 1 0;488.0299987792969 G6 2 0;488.0299987792969 C6 2 0;489.0299987792969 G#4 1 0;490.0299987792969 F4 1 0;490.0299987792969 G6 2 0;490.0299987792969 C6 2 0;492.0299987792969 D#5 2 0;492.0299987792969 G#3 2 0;494.0299987792969 C6 2 0;494.0299987792969 D#3 2 0;496.0299987792969 F3 2 0;496.0299987792969 A#5 2 0;498.0299987792969 G#5 1 0;498.0299987792969 G#3 2 0;499.0299987792969 F#5 1 0;500.0299987792969 F5 2 0;500.0299987792969 C4 2 0;502.0299987792969 G#5 2 0;502.0299987792969 D#4 2 0;504.0299987792969 B3 1 0;505.0299987792969 D#4 1 0;506.0299987792969 C#4 1 0;507.0299987792969 D#4 1 0;504.0299987792969 A#5 4 0;508.0299987792969 B3 1 0;508.0299987792969 C#6 2 0;510.0299987792969 B3 1 0;511.0299987792969 C#4 1 0;510.0299987792969 D#6 2 0;512.030029296875 D#4 1 0;512.030029296875 C#6 2 0;513.030029296875 B4 1 0;514.030029296875 F#4 1 0;514.030029296875 D#6 2 0;516.030029296875 B3 1 0;517.030029296875 D#4 1 0;518.030029296875 C#4 1 0;516.030029296875 F6 4 0;519.030029296875 D#4 1 0;520.030029296875 B3 1 0;520.030029296875 D#6 2 0;522.030029296875 B3 1 0;522.030029296875 F#6 2 0;523.030029296875 C#4 1 0;524.030029296875 D#4 1 0;524.030029296875 F6 2 0;525.030029296875 B4 1 0;526.030029296875 F#4 1 0;526.030029296875 D#6 2 0;528.030029296875 A#3 1 0;529.030029296875 C#4 1 0;530.030029296875 C4 1 0;528.030029296875 F6 4 0;531.030029296875 C#4 1 0;532.030029296875 A#3 1 0;532.030029296875 F#6 2 0;534.030029296875 F6 1 0;534.030029296875 A#3 1 0;535.030029296875 C4 1 0;535.030029296875 F#6 1 0;536.030029296875 C#4 1 0;536.030029296875 D#6 2 0;537.030029296875 G#4 1 0;538.030029296875 F4 1 0;538.030029296875 F6 2 0;540.030029296875 F3 2 0;540.030029296875 C#6 4 0;542.030029296875 G#3 2 0;544.030029296875 D#6 2 0;544.030029296875 C4 2 0;546.030029296875 C6 1 0;547.030029296875 C#6 1 0;546.030029296875 D#4 2 0;548.030029296875 C4 1 0;548.030029296875 A#5 2 0;549.030029296875 G#3 1 0;550.030029296875 D#4 1 0;551.030029296875 C4 1 0;550.030029296875 G#5 2 0;552.030029296875 B3 1 0;553.030029296875 D#4 1 0;554.030029296875 C#4 1 0;552.030029296875 F5 4 0;555.030029296875 D#4 1 0;556.030029296875 B3 1 0;556.030029296875 F#5 2 0;558.030029296875 B3 1 0;559.030029296875 C#4 1 0;560.030029296875 D#4 1 0;558.030029296875 G#5 4 0;561.030029296875 B4 1 0;562.030029296875 F#4 1 0;562.030029296875 A#5 2 0;564.030029296875 B3 1 0;565.030029296875 D#4 1 0;566.030029296875 C#4 1 0;564.030029296875 F5 4 0;567.030029296875 D#4 1 0;568.030029296875 B3 1 0;568.030029296875 F#5 2 0;570.030029296875 B3 1 0;571.030029296875 C#4 1 0;572.030029296875 D#4 1 0;570.030029296875 G#5 4 0;573.030029296875 B4 1 0;574.030029296875 F#4 1 0;574.030029296875 A#5 2 0;576.030029296875 A#3 1 0;576.030029296875 F5 1 0;576.030029296875 F6 1 0;576.6900024414062 F#6 1 0;577.030029296875 C#4 1 0;577.030029296875 F#5 1 0;577.3599853515625 G6 1 0;578.0399780273438 C4 1 0;579.0399780273438 C#4 1 0;578.0399780273438 C#6 2 0;578.0399780273438 G#6 2 0;580.0399780273438 A#3 1 0;580.0399780273438 G#6 2 0;580.0399780273438 C#6 2 0;582.0399780273438 A#3 1 0;582.0399780273438 C6 2 0;582.0399780273438 G6 2 0;583.0399780273438 C4 1 0;584.0399780273438 C#4 1 0;585.0399780273438 G#4 1 0;584.0399780273438 C6 2 0;584.0399780273438 G6 2 0;586.0399780273438 F4 1 0;586.0399780273438 C6 2 0;586.0399780273438 G6 2 0;588.0399780273438 F6 1 0;588.7000122070312 F#6 1 0;588.0399780273438 F3 2 0;589.3800048828125 G6 1 0;590.0499877929688 G#6 2 0;590.0499877929688 C#6 2 0;590.0499877929688 G#3 2 0;578.0399780273438 F5 15 0;592.0499877929688 G#6 2 0;592.0499877929688 C4 2 0;592.0499877929688 C#6 2 0;594.0499877929688 D#4 2 0;594.0499877929688 C6 2 0;594.0499877929688 G#6 2 0;596.0499877929688 C4 1 0;596.0499877929688 G6 1 0;596.0499877929688 A#5 1 0;597.0499877929688 G#3 1 0;597.0499877929688 F6 1 0;597.0499877929688 G#5 1 0;598.0499877929688 D#4 1 0;598.0499877929688 D#6 2 0;598.0499877929688 G#5 2 0;599.0499877929688 C4 1 0;600.0499877929688 B3 1 0;601.0499877929688 D#4 1 0;602.0499877929688 C#4 1 0;600.0499877929688 A#5 4 0;603.0499877929688 D#4 1 0;604.0499877929688 B3 1 0;604.0499877929688 C#6 2 0;606.0499877929688 B3 1 0;606.0499877929688 D#6 2 0;607.0499877929688 C#4 1 0;608.0499877929688 D#4 1 0;608.0499877929688 C#6 2 0;609.0499877929688 B4 1 0;610.0499877929688 F#4 1 0;610.0499877929688 D#6 2 0;612.0499877929688 B3 1 0;613.0499877929688 D#4 1 0;614.0499877929688 C#4 1 0;612.0499877929688 F6 4 0;615.0499877929688 D#4 1 0;616.0499877929688 B3 1 0;616.0499877929688 D#6 2 0;618.0499877929688 B3 1 0;618.0499877929688 F#6 2 0;619.0499877929688 C#4 1 0;620.0499877929688 D#4 1 0;621.0499877929688 B4 1 0;620.0499877929688 F6 2 0;622.0499877929688 F#4 1 0;622.0499877929688 D#6 2 0;624.0499877929688 A#3 1 0;625.0499877929688 C#4 1 0;626.0499877929688 C4 1 0;624.0499877929688 F6 4 0;627.0499877929688 C#4 1 0;628.0499877929688 A#3 1 0;628.0499877929688 F#6 2 0;630.0499877929688 A#3 1 0;630.0499877929688 F6 1 0;631.0499877929688 F#6 1 0;631.0499877929688 C4 1 0;632.0499877929688 C#4 1 0;633.0499877929688 G#4 1 0;632.0499877929688 D#6 2 0;634.0499877929688 F4 1 0;634.0499877929688 F6 2 0;636.0499877929688 F3 2 0;636.0499877929688 C#6 4 0;638.0499877929688 G#3 2 0;640.0499877929688 C4 2 0;640.0499877929688 D#6 2 0;642.0499877929688 C6 1 0;642.0499877929688 D#4 2 0;643.0499877929688 C#6 1 0;644.0499877929688 C4 1 0;644.0499877929688 A#5 2 0;645.0499877929688 G#3 1 0;646.0499877929688 D#4 1 0;646.0499877929688 G#5 2 0;647.0499877929688 C4 1 0;648.0499877929688 B3 1 0;649.0499877929688 D#4 1 0;650.0499877929688 C#4 1 0;651.0499877929688 D#4 1 0;648.0499877929688 F5 4 0;652.0499877929688 B3 1 0;652.0499877929688 F#5 2 0;654.0499877929688 B3 1 0;655.0499877929688 C#4 1 0;656.0499877929688 D#4 1 0;654.0499877929688 G#5 4 0;657.0499877929688 B4 1 0;658.0499877929688 F#4 1 0;658.0499877929688 A#5 2 0;660.0499877929688 B3 1 0;661.0499877929688 D#4 1 0;662.0499877929688 C#4 1 0;660.0499877929688 F5 4 0;663.0499877929688 D#4 1 0;664.0499877929688 B3 1 0;664.0499877929688 F#5 2 0;666.0499877929688 B3 1 0;667.0499877929688 C#4 1 0;668.0499877929688 D#4 1 0;669.0499877929688 B4 1 0;666.0499877929688 G#5 4 0;670.0499877929688 F#4 1 0;670.0499877929688 A#5 2 0;672.0499877929688 A#3 1 0;672.0499877929688 F5 1 0;673.0499877929688 C#4 1 0;673.0499877929688 F#5 1 0;674.0499877929688 C4 1 0;675.0499877929688 C#4 1 0;676.0499877929688 A#3 1 0;678.0499877929688 A#3 1 0;679.0499877929688 C4 1 0;680.0499877929688 C#4 1 0;674.0499877929688 F5 8 0;681.0499877929688 G#4 1 0;682.0499877929688 F4 1 0;684.0499877929688 G#3 2 0;684.0499877929688 D#6 2 0;684.0499877929688 G#5 2 0;686.0499877929688 D#6 2 0;686.0499877929688 C7 2 0;686.0499877929688 D#3 2 0;688.0499877929688 F3 2 0;688.0499877929688 C#6 2 0;688.0499877929688 A#6 2 0;690.0499877929688 G#6 1 0;690.0499877929688 C6 1 0;691.0499877929688 F#6 1 0;691.0499877929688 A#5 1 0;690.0499877929688 G#3 2 0;692.0499877929688 G#5 2 0;692.0499877929688 F6 2 0;692.0499877929688 C4 2 0;694.0499877929688 F5 2 0;694.0499877929688 G#6 2 0;694.0499877929688 D#4 2 0;696.0499877929688 F#3 2 0;698.0499877929688 C4 1 0;699.0499877929688 C#4 1 0;700.0499877929688 A#3 1 0;701.0499877929688 F3 1 0;702.0499877929688 F#3 2 0;696.0499877929688 A#5 8 0;696.0499877929688 F#5 8 0;704.0499877929688 C4 1 0;704.0499877929688 F5 2 0;705.0499877929688 C#4 1 0;704.0499877929688 C5 2 0;706.0499877929688 A#3 1 0;706.0499877929688 D#5 2 0;706.0499877929688 G#5 2 0;707.0499877929688 D#4 1 0;708.0499877929688 G#3 2 0;710.0499877929688 D#3 1 0;708.0499877929688 F5 4 0;711.0499877929688 F3 1 0;708.0499877929688 A#5 4 0;712.0499877929688 G#3 1 0;713.0499877929688 F3 1 0;712.0499877929688 F5 2 0;712.0499877929688 C#6 2 0;714.0499877929688 F#5 1 0;714.0499877929688 D#6 1 0;714.0499877929688 D#3 2 0;715.0499877929688 C#6 1 0;715.0499877929688 F5 1 0;716.0499877929688 D#5 2 0;716.0499877929688 F3 2 0;716.0499877929688 C6 2 0;718.0499877929688 G#3 2 0;718.0499877929688 F5 2 0;718.0499877929688 C#6 2 0;720.0499877929688 A#3 1 0;721.0499877929688 A#4 1 0;722.0499877929688 D#4 1 0;723.0499877929688 F4 1 0;724.0499877929688 C#4 1 0;725.0499877929688 G#3 1 0;726.0499877929688 A#3 1 0;727.0499877929688 A#4 1 0;728.0499877929688 D#4 1 0;729.0499877929688 F4 1 0;730.0499877929688 C#4 1 0;731.0499877929688 G#3 1 0;732.0499877929688 A#3 1 0;733.0499877929688 A#4 1 0;734.0499877929688 D#4 1 0;720.0499877929688 F5 15 0;720.0499877929688 A#5 15 0;735.0499877929688 F4 1 0;736.0499877929688 C#4 1 0;737.0499877929688 F3 1 0;738.0499877929688 A#5 2 0;738.0499877929688 F5 2 0;738.0499877929688 G#3 2 0;740.0499877929688 D#3 2 0;740.0499877929688 C#6 2 0;740.0499877929688 G#5 2 0;742.0499877929688 A#5 2 0;742.0499877929688 D#6 2 0;742.0499877929688 F3 2 0;744.0499877929688 D#3 2 0;746.0499877929688 G#3 1 0;747.0499877929688 A#3 1 0;748.0499877929688 F#3 1 0;749.0499877929688 C#3 1 0;744.0499877929688 G#5 8 0;750.0499877929688 D#3 2 0;744.0499877929688 F6 8 0;752.0499877929688 G#3 1 0;753.0499877929688 A#3 1 0;752.0499877929688 G#5 2 0;752.0499877929688 C#6 2 0;754.0499877929688 A#5 2 0;754.0499877929688 D#6 2 0;754.0499877929688 C4 2 0;756.0499877929688 F3 2 0;758.0499877929688 C4 1 0;756.0499877929688 G#5 4 0;756.0499877929688 F6 4 0;759.0499877929688 G#4 1 0;760.0499877929688 F4 1 0;760.0499877929688 D#6 2 0;761.0499877929688 C4 1 0;760.0499877929688 A#5 2 0;762.0499877929688 G#5 1 0;762.0499877929688 C#6 1 0;762.0499877929688 F3 1 0;763.0499877929688 A#5 1 0;763.0499877929688 D#6 1 0;763.0499877929688 F4 1 0;764.0499877929688 G#3 1 0;764.0499877929688 D#5 2 0;764.0499877929688 C6 2 0;765.0499877929688 F4 1 0;766.0499877929688 A#3 1 0;766.0499877929688 F5 2 0;766.0499877929688 G#5 2 0;767.0499877929688 G#4 1 0;768.0499877929688 F#3 1 0;769.0499877929688 F#4 1 0;770.0499877929688 C#4 1 0;771.0499877929688 D#4 1 0;772.0499877929688 F4 1 0;773.0499877929688 F3 1 0;774.0499877929688 F#3 1 0;775.0499877929688 F#4 1 0;768.0499877929688 F5 8 0;768.0499877929688 A#5 8 0;776.0499877929688 C#4 1 0;777.0499877929688 D#4 1 0;776.0499877929688 C#5 2 0;778.0499877929688 F4 1 0;778.0499877929688 D#5 2 0;779.0499877929688 F#3 1 0;780.0499877929688 F3 1 0;781.0499877929688 F4 1 0;782.0499877929688 C4 1 0;783.0499877929688 G#4 1 0;780.0499877929688 F5 4 0;784.0499877929688 F4 1 0;784.0499877929688 D#5 2 0;785.0499877929688 C4 1 0;786.0499877929688 F3 1 0;786.0499877929688 C#5 1 0;787.0499877929688 D#5 1 0;787.0499877929688 F4 1 0;788.0499877929688 G#3 1 0;788.0499877929688 C5 2 0;789.0499877929688 F4 1 0;790.0499877929688 A#3 1 0;791.0499877929688 G#4 1 0;790.0499877929688 G#5 2 0;792.0499877929688 F#3 1 0;793.0499877929688 F#4 1 0;794.0499877929688 C#4 1 0;795.0499877929688 D#4 1 0;796.0499877929688 F4 1 0;797.0499877929688 F3 1 0;798.0499877929688 F#3 1 0;792.0499877929688 F5 8 0;799.0499877929688 F#4 1 0;800.0499877929688 C#4 1 0;800.0499877929688 G#5 2 0;800.0499877929688 C#6 2 0;801.0499877929688 D#4 1 0;802.0499877929688 F4 1 0;803.0499877929688 F#3 1 0;802.0499877929688 D#6 2 0;802.0499877929688 A#5 2 0;804.0499877929688 G#3 1 0;805.0499877929688 D#3 1 0;806.0499877929688 C4 1 0;804.0499877929688 F6 4 0;804.0499877929688 G#5 4 0;807.0499877929688 G#3 1 0;808.0499877929688 D#4 1 0;809.0499877929688 C4 1 0;808.0499877929688 A#5 2 0;808.0499877929688 D#6 2 0;810.0499877929688 G#5 1 0;810.0499877929688 C#6 1 0;810.0499877929688 G#4 1 0;811.0499877929688 D#6 1 0;811.0499877929688 G#3 1 0;811.0499877929688 A#5 1 0;812.0499877929688 D#4 2 0;812.0499877929688 D#5 2 0;812.0499877929688 C6 2 0;814.0499877929688 G#5 2 0;814.0499877929688 F5 2 0;814.0499877929688 C4 2 0;816.0499877929688 F#3 1 0;817.0499877929688 F#4 1 0;818.0499877929688 C#4 1 0;819.0499877929688 D#4 1 0;820.0499877929688 F4 1 0;821.0499877929688 F3 1 0;822.0499877929688 F#3 1 0;816.0499877929688 A#5 8 0;816.0499877929688 F5 8 0;823.0499877929688 F#4 1 0;824.0499877929688 C#4 1 0;824.0499877929688 C#6 2 0;824.0499877929688 G#5 2 0;825.0499877929688 D#4 1 0;826.0499877929688 F4 1 0;826.0499877929688 A#5 2 0;826.0499877929688 D#6 2 0;827.0499877929688 F#3 1 0;828.0499877929688 F3 1 0;829.0499877929688 F4 1 0;830.0499877929688 C4 1 0;828.0499877929688 F6 4 0;828.0499877929688 G#5 4 0;831.0499877929688 G#4 1 0;832.0499877929688 F4 1 0;833.0499877929688 C4 1 0;832.0499877929688 D#6 2 0;832.0499877929688 A#5 2 0;834.0499877929688 G#5 1 0;834.0499877929688 C#6 1 0;834.0499877929688 F3 1 0;835.0499877929688 A#5 1 0;835.0499877929688 F4 1 0;835.0499877929688 D#6 1 0;836.0499877929688 G#3 1 0;836.0499877929688 D#5 2 0;836.0499877929688 C6 2 0;837.0499877929688 F4 1 0;838.0499877929688 A#3 1 0;838.0499877929688 F5 2 0;839.0499877929688 G#4 1 0;838.0499877929688 G#5 2 0;840.0499877929688 A#3 1 0;841.0499877929688 A#4 1 0;842.0499877929688 D#4 1 0;843.0499877929688 F4 1 0;844.0499877929688 C#4 1 0;845.0499877929688 G#3 1 0;846.0499877929688 A#3 1 0;847.0499877929688 A#4 1 0;848.0499877929688 D#4 1 0;849.0499877929688 F4 1 0;850.0499877929688 C#4 1 0;851.0499877929688 G#3 1 0;852.0499877929688 A#3 1 0;840.0499877929688 F5 15 0;840.0499877929688 A#5 15 0;855.0499877929688 A#3 1 0;858.0499877929688 A#3 1 0;860.0499877929688 A#3 1 0;862.0499877929688 A#3 1 0'
"""
Find a piece of music on onlinesequencer.net, click edit,
then select all notes with CTRL+A and copy them with CTRL+C
Paste string as shown above after removing ";:" from
the end and "Online Sequencer:120233:" from the start
"""
mySong = music(song)
while True:
print(mySong.tick())
sleep(0.04)
| 1,637.636364
| 22,256
| 0.64451
|
b7485a75aa179467315f768ae92b0f88c262a8e3
| 6,697
|
py
|
Python
|
lang/da.py
|
TidB/wikitranslator
|
55472c0f479ced8cb4e3606c9f39d0846ad02a61
|
[
"Xnet",
"X11"
] | 2
|
2019-04-09T00:03:17.000Z
|
2019-04-09T00:03:27.000Z
|
lang/da.py
|
TidB/wikitranslator
|
55472c0f479ced8cb4e3606c9f39d0846ad02a61
|
[
"Xnet",
"X11"
] | null | null | null |
lang/da.py
|
TidB/wikitranslator
|
55472c0f479ced8cb4e3606c9f39d0846ad02a61
|
[
"Xnet",
"X11"
] | 6
|
2016-01-25T22:22:01.000Z
|
2020-10-20T05:48:29.000Z
|
# STANDARD
ADDEDTOGAME = "* Denne genstand blev tilføjet til spillet"
ALLCLASSESBOX = "[[All classes/da|Alle klasser]]"
ITEMLOOK = "Den fremstår som"
NOUNMARKER_INDEFINITE_COSMETIC = "(Navneordsmarkering)"
NOUNMARKER_INDEFINITE_SET = "(navneordsmarkering)"
NOUNMARKER_INDEFINITE_WEAPON = "(Navneordsmarkering)"
SENTENCE_1_ALL = "(NOUN MARKER) ”'{{{{item name|{item_name}}}}}”' er {noun_marker}{workshop_link}{promotional} {item_type} til {class_list}."
SENTENCE_1_COMMUNITY_COSMETIC = "[[Steam Workshop/da|Fællesskabs-lavet]]"
SENTENCE_1_COMMUNITY_WEAPON = "[[Steam Workshop/da|Fællesskabs-lavet]]"
SENTENCE_1_PROMO_COSMETIC = "[[Promotional items/da|promoverings]]"
SENTENCE_1_PROMO_WEAPON = "[[Promotional items/da|promoverings]]"
SENTENCE_1_COSMETIC = "[[Cosmetic item/da|kosmetisk genstand]]"
SENTENCE_1_SET = "[[Item set/da|genstandssæt]]"
SENTENCE_1_CLASSES_ALL = "alle [[Classes/da|klasser]]"
SENTENCE_1_CLASSES_ONE = "[[{class_name}/da|{loc_class_name}]]en"
SENTENCE_1_CLASSES_MORE = "og [[{class_name}/da|{loc_class_name}]]en."
SENTENCE_1_CLASSES_AND = "og"
SENTENCE_1_CLASSES_COMMA = ", "
SENTENCE_COMMUNITY = "Denne genstand blev {workshop_link} til [[Steam Workshop/da|Steam-værkstedet]]{custom_name}."
SENTENCE_COMMUNITY_LINK = "[{link} bidraget]"
SENTENCE_COMMUNITY_NAME = " under navnet \"{name}\""
SENTENCE_PROMOTIONAL = "Denne genstand er tildelt i [[Genuine/da|Ægte]] kvalitet til spillere, som købte [[{game_name}/da|{game_name}]]{steam}{date}"
SENTENCE_PROMOTIONAL_STEAM = " på [[Steam/da|Steam]]"
SENTENCE_PROMOTIONAL_DATE = " før {date}"
SENTENCE_SET = " Den blev tilføjet til spillet i {{{{Update link/da|{update}}}}}en."
SENTENCE_SET_INCLUDES = "Sættet inkluderer følgende genstande:"
SENTENCE_THUMBNAIL = "|Steam-værkstedets illustration af denne genstand."
SENTENCE_1_SUB_PRIMARY = "[[Weapon/da#{class_name}primary|primære våben]]"
SENTENCE_1_SUB_SECONDARY = "[[Weapon/da#{class_name}secondary|sekundære våben]]"
SENTENCE_1_SUB_MELEE = "[[Weapon/da#{class_name}melee|nærkampsvåben]]"
ITEM_FLAGS = {
"not usable in crafting": "kan ikke bruges i smedearbejde",
"not tradable": "Kan ikke byttes",
"not tradable or usable in crafting": "Kan ikke byttes eller bruges I smedearbejde",
}
ATTRIBUTES = {
"achievement item: not tradable": "Præstationsgenstand: Kan ikke byttes",
"holiday restriction: tf birthday": "Højtidsbegrænsning: TF-fødselsdag",
"holiday restriction: winter": "Højtidsbegræsning: Vinter",
"holiday restriction: halloween": "Højtidsbegræsning: Halloween",
"holiday restriction: halloween / full moon": "Højtidsbegræsning: Halloween/Fuldmåne",
"holiday restriction: halloween / full moon / valentine's day": "Højtidsbegræsning: Halloween/Fuldmåne/Valentinsdag",
}
CLASSES = {
}
HEADINGS = {
'as a crafting ingredient': "Som en ingrediens til smedearbejde",
'blueprint': "Arbejdstegning",
'bugs': "Fejl",
'crafting': "Smedearbejde",
'damage and function times': "Skade og funktion-tider",
'external links': "Eksterne links",
'gallery': "Galleri",
'item set': "Genstandssæt",
'notes': "Noter",
'painted variants': "Malede varianter",
'references': "",
'related achievements': "Relateret præstationer",
'see also': "Se også",
'strange variant': "Sær variant",
'styles': "Stile",
'trivia': "Paratviden",
'unused content': "Ikke-brugt indhold",
'update history': "Opdateringshistorik",
}
ITEM_LEVELS = {
'Apparel': "Beklædning",
'Apron': "Forklæde",
'Armband': "Armbånd",
'Aura of Incorruptibility': "Aura af Ubestikkelighed",
'Backpack': "Rygsæk",
'Badge': "Emblem",
'Balloon': "Ballon",
'Bandages': "Bandager",
'Bandana': "Bandana",
'Bandolier': "",
'Barbeque': "Barbeque",
'Beach Towel': "Strandhåndklæde",
'Bells': "Klokker",
'Bird Head': "Fuglehoved",
'Blueprints': "Planer",
'Bones': "Knogler",
'Bongos': "Bongoer",
'Boots': "Støvler",
'Botkiller': "Botkiller",
'Bottles': "Flasker",
'Cape': "Kappe",
'Championship Belt': "Mesterskabsbælte",
'Cigar': "Cigar",
'Coat': "Frakke",
'Coffin': "Kiste",
'Community Medal': "Fællesskabsmedalje",
'Conscience': "Samvittighed",
'Cooler': "Køler",
'Cosmetic Armor': "Kosmetisk rustning",
'Cosmetic Augmentation': "Kosmetisk augmentering",
'Cosmetic Axe': "Kosmetisk økse",
'Cosmetic Knife': "kosmetiskm kniv",
'Costume Piece': "Kostumestykke",
'cursed soul': "Forbandet sjæl",
'Decorative Bombs': "Dekorative bomber",
'Duck': "And",
'Electronic Device': "Elektronisk enhed",
'Eye Patch': "Øjeklap",
'Eye Stalks': "",
'Facial Hair': "Ansigtsbehåring",
'Flair!': "Stil",
'Flip-Flops': "Klip-klapper",
'Fuel Tank': "Brændstofstank",
'Func_Medal': "",
'Futuristic Sound Device': "Futuristisk lydenhed",
'Ghost': "Spøgelse",
'Glasses': "Briller",
'Glove': "Handske",
'Gloves': "Handsker",
'Golf Clubs': "Golfkøller",
'Hair': "Hår",
'Hat': "Hat",
'Headgear': "Hovedudstyr",
'Headset': "Headset",
'Helmet': "Hjelm",
'Holiday Hat': "Helligdagshat",
'Holste': "Hylster",
'Hooves': "Hove",
'Kilt': "Kilt",
'Lantern': "Lanterne",
'Lunchbox': "Madkasse",
'Mascot': "Maskot",
'Mask': "Maske",
'Medal': "Medalje",
'Medallion': "Medaljon",
'Mystical Lamp': "Mystisk lampe",
'Necklace': "Halskæde",
'Party Favor': "Festtjeneste",
'Photograph': "Fotografi",
'Pin': "Knap",
'Pipe': "Pibe",
'Pocket Buddy': "Lommeven",
'Pocket Square': "Lommefirkant",
'Poncho': "Poncho",
'Puffy Shirt': "Oppustet skjorte",
'Pyrovision Goggles': "Pyrosynsbriller",
'quiver': "Pilekogger",
'Refreshment': "Forfriskning",
'Ring': "Ring",
'Robot': "Robot",
'Safety Apparatus': "Sikkerhedsapparet",
'Satchel': "Taske",
'Scarf': "Halstørklæde",
'Science Project': "",
'Shield': "Skjold",
'Shirt': "Trøje",
'Shoes': "Sko",
'Skateboard': "Skateboard",
'Sled': "Slæde",
'Snow Globe': "Snekugle",
'Spikes': "Pigge",
'Spirit Animal': "Åndedyr",
'Spooky Companion': "Uhyggelig ledsager",
'Spurs': "Sporer",
'Squirrel': "Egern",
'Stethoscope': "Stetoskop",
'Stocking': "Strømper",
'Supplies': "Forsyninger",
'Tattoos': "Tatoveringer",
'Tentacles': "Tentakler",
'Tool Belt': "Værktøjsbælte",
'Tournament Medal': "Turneringsmedalje",
'Towel': "Håndtørklæde",
'Treasure': "Skat",
'Tuxedo': "Tuxedo",
'Undead Pet': "Udødt kæledyr",
'Uniform': "Uniform",
"Veteran's Beret": "Veteranens Beret ",
'Wings': "Vinger",
}
| 35.247368
| 149
| 0.657907
|
b98b99f1674ff9c2363e796c37d7fa62d78e8353
| 9,191
|
py
|
Python
|
simplify.py
|
Diffeomorphic/retarget-bvh
|
201d6f92c73a1af2837bd498ac5e1d86ef3a9756
|
[
"BSD-2-Clause"
] | 3
|
2021-07-15T02:46:28.000Z
|
2022-03-31T02:42:50.000Z
|
simplify.py
|
Diffeomorphic/retarget-bvh
|
201d6f92c73a1af2837bd498ac5e1d86ef3a9756
|
[
"BSD-2-Clause"
] | 1
|
2021-08-06T09:28:24.000Z
|
2021-08-06T09:28:24.000Z
|
simplify.py
|
Diffeomorphic/retarget-bvh
|
201d6f92c73a1af2837bd498ac5e1d86ef3a9756
|
[
"BSD-2-Clause"
] | 1
|
2020-11-22T18:35:20.000Z
|
2020-11-22T18:35:20.000Z
|
# ------------------------------------------------------------------------------
# BSD 2-Clause License
#
# Copyright (c) 2019, Thomas Larsson
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ------------------------------------------------------------------------------
import bpy
from math import pi
from . import utils
from .utils import *
#
# simplifyFCurves(context, rig, useVisible, useMarkers):
#
def simplifyFCurves(context, rig, useVisible, useMarkers):
scn = context.scene
act = getAction(rig)
if not act:
return
(fcurves, minTime, maxTime) = getActionFCurves(act, useVisible, useMarkers, scn)
if not fcurves:
return
for fcu in fcurves:
simplifyFCurve(fcu, rig.animation_data.action, scn.McpErrorLoc, scn.McpErrorRot, minTime, maxTime)
setInterpolation(rig)
print("Curves simplified")
return
#
# getActionFCurves(act, useVisible, useMarkers, scn):
#
def getActionFCurves(act, useVisible, useMarkers, scn):
if useVisible:
fcurves = []
for fcu in act.fcurves:
if not fcu.hide:
fcurves.append(fcu)
#print(fcu.data_path, fcu.array_index)
else:
fcurves = act.fcurves
if useMarkers:
(minTime, maxTime) = getMarkedTime(scn)
if minTime == None:
print("Need two selected markers")
return ([], 0, 0)
else:
(minTime, maxTime) = ('All', 0)
return (fcurves, minTime, maxTime)
#
# splitFCurvePoints(fcu, minTime, maxTime):
#
def splitFCurvePoints(fcu, minTime, maxTime):
if minTime == 'All':
points = fcu.keyframe_points
before = []
after = []
else:
points = []
before = []
after = []
for pt in fcu.keyframe_points:
t = pt.co[0]
if t < minTime:
before.append(pt.co)
elif t > maxTime:
after.append(pt.co)
else:
points.append(pt)
return (points, before, after)
#
# simplifyFCurve(fcu, act, maxErrLoc, maxErrRot, minTime, maxTime):
#
def simplifyFCurve(fcu, act, maxErrLoc, maxErrRot, minTime, maxTime):
#print("WARNING: F-curve simplification turned off")
#return
words = fcu.data_path.split('.')
if words[-1] == 'location':
maxErr = maxErrLoc
elif words[-1] == 'rotation_quaternion':
maxErr = maxErrRot * 1.0/180
elif words[-1] == 'rotation_euler':
maxErr = maxErrRot * pi/180
else:
raise MocapError("Unknown FCurve type %s" % words[-1])
(points, before, after) = splitFCurvePoints(fcu, minTime, maxTime)
nPoints = len(points)
nBefore = len(before)
nAfter = len(after)
if nPoints <= 2:
return
keeps = []
new = [0, nPoints-1]
while new:
keeps += new
keeps.sort()
new = iterateFCurves(points, keeps, maxErr)
newVerts = []
for n in keeps:
newVerts.append(points[n].co.copy())
nNewPoints = len(newVerts)
oldOffset = nBefore+nPoints
newOffset = nBefore+nNewPoints
for n in range(nAfter):
fcu.keyframe_points[n+newOffset].co = fcu.keyframe_points[n+oldOffset].co.copy()
n = nBefore+nPoints+nAfter
n1 = nBefore+nNewPoints+nAfter
while n > n1:
n -= 1
kp = fcu.keyframe_points[n]
fcu.keyframe_points.remove(kp)
for n in range(nNewPoints):
fcu.keyframe_points[n+nBefore].co = newVerts[n]
return
#
# iterateFCurves(points, keeps, maxErr):
#
def iterateFCurves(points, keeps, maxErr):
new = []
for edge in range(len(keeps)-1):
n0 = keeps[edge]
n1 = keeps[edge+1]
(x0, y0) = points[n0].co
(x1, y1) = points[n1].co
if x1 > x0:
dxdn = (x1-x0)/(n1-n0)
dydx = (y1-y0)/(x1-x0)
err = 0
for n in range(n0+1, n1):
(x, y) = points[n].co
xn = n0 + dxdn*(n-n0)
yn = y0 + dydx*(xn-x0)
if abs(y-yn) > err:
err = abs(y-yn)
worst = n
if err > maxErr:
new.append(worst)
return new
#
# rescaleFCurves(context, rig, factor):
#
def rescaleFCurves(context, rig, factor):
act = getAction(context.object)
if not act:
return
for fcu in act.fcurves:
rescaleFCurve(fcu, factor)
print("Curves rescaled")
return
#
# rescaleFCurve(fcu, factor):
#
def rescaleFCurve(fcu, factor):
n = len(fcu.keyframe_points)
if n < 2:
return
(t0,v0) = fcu.keyframe_points[0].co
(tn,vn) = fcu.keyframe_points[n-1].co
limitData = getFCurveLimits(fcu)
(mode, upper, lower, diff) = limitData
tm = t0
vm = v0
inserts = []
for pk in fcu.keyframe_points:
(tk,vk) = pk.co
tn = factor*(tk-t0) + t0
if upper:
if (vk > upper) and (vm < lower):
inserts.append((tm, vm, tn, vk))
elif (vm > upper) and (vk < lower):
inserts.append((tm, vm, tn,vk))
pk.co = (tn,vk)
tm = tn
vm = vk
addFCurveInserts(fcu, inserts, limitData)
return
#
# getFCurveLimits(fcu):
#
def getFCurveLimits(fcu):
words = fcu.data_path.split('.')
mode = words[-1]
if mode == 'rotation_euler':
upper = 0.8*pi
lower = -0.8*pi
diff = pi
elif mode == 'rotation_quaternion':
upper = 0.8
lower = -0.8
diff = 2
else:
upper = 0
lower = 0
diff = 0
#print(words[1], mode, upper, lower)
return (mode, upper, lower, diff)
#
# addFCurveInserts(fcu, inserts, limitData):
#
def addFCurveInserts(fcu, inserts, limitData):
(mode, upper, lower, diff) = limitData
for (tm,vm,tn,vn) in inserts:
tp = int((tm+tn)/2 - 0.1)
tq = tp + 1
vp = (vm+vn)/2
if vm > upper:
vp += diff/2
vq = vp - diff
elif vm < lower:
vp -= diff/2
vq = vp + diff
if tp > tm:
fcu.keyframe_points.insert(frame=tp, value=vp)
if tq < tn:
fcu.keyframe_points.insert(frame=tq, value=vq)
return
########################################################################
#
# class MCP_OT_SimplifyFCurves(bpy.types.Operator):
#
class MCP_OT_SimplifyFCurves(bpy.types.Operator):
bl_idname = "mcp.simplify_fcurves"
bl_label = "Simplify FCurves"
bl_options = {'UNDO'}
def execute(self, context):
try:
scn = context.scene
simplifyFCurves(context, context.object, scn.McpSimplifyVisible, scn.McpSimplifyMarkers)
except MocapError:
bpy.ops.mcp.error('INVOKE_DEFAULT')
return{'FINISHED'}
class MCP_OT_RescaleFCurves(bpy.types.Operator):
bl_idname = "mcp.rescale_fcurves"
bl_label = "Rescale FCurves"
bl_options = {'UNDO'}
def execute(self, context):
try:
scn = context.scene
rescaleFCurves(context, context.object, scn.McpRescaleFactor)
except MocapError:
bpy.ops.mcp.error('INVOKE_DEFAULT')
return{'FINISHED'}
#----------------------------------------------------------
# Initialize
#----------------------------------------------------------
classes = [
MCP_OT_SimplifyFCurves,
MCP_OT_RescaleFCurves,
]
def initialize():
for cls in classes:
bpy.utils.register_class(cls)
def uninitialize():
for cls in classes:
bpy.utils.unregister_class(cls)
| 29.364217
| 107
| 0.555543
|
27cc4e48473940a0ed28f12c7c9532ab618cb0d2
| 5,338
|
py
|
Python
|
supervisor/supervisor.py
|
issacg/supervisor
|
53fa8e48c0689669f4ea56afc8ea0b4e398aa69c
|
[
"Apache-2.0"
] | null | null | null |
supervisor/supervisor.py
|
issacg/supervisor
|
53fa8e48c0689669f4ea56afc8ea0b4e398aa69c
|
[
"Apache-2.0"
] | null | null | null |
supervisor/supervisor.py
|
issacg/supervisor
|
53fa8e48c0689669f4ea56afc8ea0b4e398aa69c
|
[
"Apache-2.0"
] | null | null | null |
"""Home Assistant control object."""
import asyncio
from contextlib import suppress
from ipaddress import IPv4Address
import logging
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import Awaitable, Optional
import aiohttp
from .const import URL_HASSIO_APPARMOR, SUPERVISOR_VERSION
from .coresys import CoreSys, CoreSysAttributes
from .docker.stats import DockerStats
from .docker.supervisor import DockerSupervisor
from .exceptions import (
DockerAPIError,
HostAppArmorError,
SupervisorError,
SupervisorUpdateError,
)
_LOGGER: logging.Logger = logging.getLogger(__name__)
class Supervisor(CoreSysAttributes):
"""Home Assistant core object for handle it."""
def __init__(self, coresys: CoreSys):
"""Initialize hass object."""
self.coresys: CoreSys = coresys
self.instance: DockerSupervisor = DockerSupervisor(coresys)
async def load(self) -> None:
"""Prepare Home Assistant object."""
try:
await self.instance.attach(tag="latest")
except DockerAPIError:
_LOGGER.fatal("Can't setup Supervisor Docker container!")
with suppress(DockerAPIError):
await self.instance.cleanup()
# Check privileged mode
if not self.instance.privileged:
_LOGGER.error(
"Supervisor does not run in Privileged mode. Hassio runs with limited functionality!"
)
@property
def ip_address(self) -> IPv4Address:
"""Return IP of Supervisor instance."""
return self.instance.ip_address
@property
def need_update(self) -> bool:
"""Return True if an update is available."""
return self.version != self.latest_version
@property
def version(self) -> str:
"""Return version of running Home Assistant."""
return SUPERVISOR_VERSION
@property
def latest_version(self) -> str:
"""Return last available version of Home Assistant."""
return self.sys_updater.version_supervisor
@property
def image(self) -> str:
"""Return image name of Home Assistant container."""
return self.instance.image
@property
def arch(self) -> str:
"""Return arch of the Supervisor container."""
return self.instance.arch
async def update_apparmor(self) -> None:
"""Fetch last version and update profile."""
url = URL_HASSIO_APPARMOR
try:
_LOGGER.info("Fetch AppArmor profile %s", url)
async with self.sys_websession.get(url, timeout=10) as request:
data = await request.text()
except (aiohttp.ClientError, asyncio.TimeoutError) as err:
_LOGGER.warning("Can't fetch AppArmor profile: %s", err)
raise SupervisorError() from None
with TemporaryDirectory(dir=self.sys_config.path_tmp) as tmp_dir:
profile_file = Path(tmp_dir, "apparmor.txt")
try:
profile_file.write_text(data)
except OSError as err:
_LOGGER.error("Can't write temporary profile: %s", err)
raise SupervisorError() from None
try:
await self.sys_host.apparmor.load_profile(
"hassio-supervisor", profile_file
)
except HostAppArmorError:
_LOGGER.error("Can't update AppArmor profile!")
raise SupervisorError() from None
async def update(self, version: Optional[str] = None) -> None:
"""Update Home Assistant version."""
version = version or self.latest_version
if version == self.sys_supervisor.version:
_LOGGER.warning("Version %s is already installed", version)
return
_LOGGER.info("Update Supervisor to version %s", version)
try:
await self.instance.install(
version, image=self.sys_updater.image_supervisor
)
await self.instance.update_start_tag(
self.sys_updater.image_supervisor, version
)
except DockerAPIError:
_LOGGER.error("Update of Supervisor fails!")
raise SupervisorUpdateError() from None
else:
self.sys_config.version = version
self.sys_config.save_data()
with suppress(SupervisorError):
await self.update_apparmor()
self.sys_loop.call_later(5, self.sys_loop.stop)
@property
def in_progress(self) -> bool:
"""Return True if a task is in progress."""
return self.instance.in_progress
def logs(self) -> Awaitable[bytes]:
"""Get Supervisor docker logs.
Return Coroutine.
"""
return self.instance.logs()
async def stats(self) -> DockerStats:
"""Return stats of Supervisor."""
try:
return await self.instance.stats()
except DockerAPIError:
raise SupervisorError() from None
async def repair(self):
"""Repair local Supervisor data."""
if await self.instance.exists():
return
_LOGGER.info("Repair Supervisor %s", self.version)
try:
await self.instance.retag()
except DockerAPIError:
_LOGGER.error("Repairing of Supervisor fails")
| 32.54878
| 101
| 0.628887
|
9d524f70a294758510d5896642589e7e3876572e
| 651
|
py
|
Python
|
poetry/utils/helpers.py
|
blueyed/poetry
|
8708540fd4e1e034aa9fbee9398980d80a4a04d3
|
[
"MIT"
] | null | null | null |
poetry/utils/helpers.py
|
blueyed/poetry
|
8708540fd4e1e034aa9fbee9398980d80a4a04d3
|
[
"MIT"
] | null | null | null |
poetry/utils/helpers.py
|
blueyed/poetry
|
8708540fd4e1e034aa9fbee9398980d80a4a04d3
|
[
"MIT"
] | null | null | null |
import re
import shutil
import tempfile
from contextlib import contextmanager
_canonicalize_regex = re.compile('[-_.]+')
def canonicalize_name(name): # type: (str) -> str
return _canonicalize_regex.sub('-', name).lower()
def module_name(name): # type: (str) -> str
return canonicalize_name(name).replace('-', '_')
@contextmanager
def temporary_directory(*args, **kwargs):
try:
from tempfile import TemporaryDirectory
with TemporaryDirectory(*args, **kwargs) as name:
yield name
except ImportError:
name = tempfile.mkdtemp(*args, **kwargs)
yield name
shutil.rmtree(name)
| 21
| 57
| 0.666667
|
db23c075c8efc45c2262a81a16e14160d67d907f
| 2,452
|
py
|
Python
|
test/functional/node_network_limited.py
|
wolfoxonly/ofic
|
dea8f4aa577999947e1c8f969af60a03e13cae00
|
[
"MIT"
] | null | null | null |
test/functional/node_network_limited.py
|
wolfoxonly/ofic
|
dea8f4aa577999947e1c8f969af60a03e13cae00
|
[
"MIT"
] | null | null | null |
test/functional/node_network_limited.py
|
wolfoxonly/ofic
|
dea8f4aa577999947e1c8f969af60a03e13cae00
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2017 The OFIChain Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Tests NODE_NETWORK_LIMITED.
Tests that a node configured with -prune=550 signals NODE_NETWORK_LIMITED correctly
and that it responds to getdata requests for blocks correctly:
- send a block within 288 + 2 of the tip
- disconnect peers who request blocks older than that."""
from test_framework.messages import CInv, msg_getdata
from test_framework.mininode import NODE_BLOOM, NODE_NETWORK_LIMITED, NODE_WITNESS, NetworkThread, P2PInterface
from test_framework.test_framework import OFIChainTestFramework
from test_framework.util import assert_equal
class P2PIgnoreInv(P2PInterface):
def on_inv(self, message):
# The node will send us invs for other blocks. Ignore them.
pass
def send_getdata_for_block(self, blockhash):
getdata_request = msg_getdata()
getdata_request.inv.append(CInv(2, int(blockhash, 16)))
self.send_message(getdata_request)
class NodeNetworkLimitedTest(OFIChainTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [['-prune=550']]
def run_test(self):
node = self.nodes[0].add_p2p_connection(P2PIgnoreInv())
NetworkThread().start()
node.wait_for_verack()
expected_services = NODE_BLOOM | NODE_WITNESS | NODE_NETWORK_LIMITED
self.log.info("Check that node has signalled expected services.")
assert_equal(node.nServices, expected_services)
self.log.info("Check that the localservices is as expected.")
assert_equal(int(self.nodes[0].getnetworkinfo()['localservices'], 16), expected_services)
self.log.info("Mine enough blocks to reach the NODE_NETWORK_LIMITED range.")
blocks = self.nodes[0].generate(292)
self.log.info("Make sure we can max retrive block at tip-288.")
node.send_getdata_for_block(blocks[1]) # last block in valid range
node.wait_for_block(int(blocks[1], 16), timeout=3)
self.log.info("Requesting block at height 2 (tip-289) must fail (ignored).")
node.send_getdata_for_block(blocks[0]) # first block outside of the 288+2 limit
node.wait_for_disconnect(5)
if __name__ == '__main__':
NodeNetworkLimitedTest().main()
| 42.275862
| 111
| 0.726346
|
627b39b2b1b6e900c0c565c904b5b30ee6f661b2
| 8,803
|
py
|
Python
|
core/gdrn_modeling/models/pvnet_net/resnet.py
|
AlbertoRemus/GDR_Net
|
114cff27c6fc6048724a6f2bdce2306ab51d798e
|
[
"Apache-2.0"
] | 132
|
2021-02-25T10:45:29.000Z
|
2022-03-30T06:54:26.000Z
|
core/gdrn_modeling/models/pvnet_net/resnet.py
|
AlbertoRemus/GDR_Net
|
114cff27c6fc6048724a6f2bdce2306ab51d798e
|
[
"Apache-2.0"
] | 69
|
2021-03-23T12:26:17.000Z
|
2022-03-29T09:08:11.000Z
|
core/gdrn_modeling/models/pvnet_net/resnet.py
|
AlbertoRemus/GDR_Net
|
114cff27c6fc6048724a6f2bdce2306ab51d798e
|
[
"Apache-2.0"
] | 23
|
2021-03-26T06:21:32.000Z
|
2022-03-23T23:53:51.000Z
|
import torch.nn as nn
import math
import torch.utils.model_zoo as model_zoo
import numpy as np
__all__ = ["ResNet", "resnet18", "resnet34", "resnet50", "resnet101", "resnet152"]
model_urls = {
"resnet18": "https://download.pytorch.org/models/resnet18-5c106cde.pth",
"resnet34": "https://download.pytorch.org/models/resnet34-333f7ec4.pth",
"resnet50": "https://download.pytorch.org/models/resnet50-19c8e357.pth",
"resnet101": "https://download.pytorch.org/models/resnet101-5d3b4d8f.pth",
"resnet152": "https://download.pytorch.org/models/resnet152-b121ed2d.pth",
}
def conv3x3(in_planes, out_planes, stride=1, dilation=1):
"""3x3 convolution with padding."""
kernel_size = np.asarray((3, 3))
# Compute the size of the upsampled filter with
# a specified dilation rate.
upsampled_kernel_size = (kernel_size - 1) * (dilation - 1) + kernel_size
# Determine the padding that is necessary for full padding,
# meaning the output spatial size is equal to input spatial size
full_padding = (upsampled_kernel_size - 1) // 2
# Conv2d doesn't accept numpy arrays as arguments
full_padding, kernel_size = tuple(full_padding), tuple(kernel_size)
return nn.Conv2d(
in_planes,
out_planes,
kernel_size=kernel_size,
stride=stride,
padding=full_padding,
dilation=dilation,
bias=False,
)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, dilation=1):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride, dilation=dilation)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes, dilation=dilation)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, dilation=1):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = conv3x3(planes, planes, stride=stride, dilation=dilation)
# self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
# padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(
self, block, layers, num_classes=1000, fully_conv=False, remove_avg_pool_layer=False, output_stride=32
):
# Add additional variables to track
# output stride. Necessary to achieve
# specified output stride.
self.output_stride = output_stride
self.current_stride = 4
self.current_dilation = 1
self.remove_avg_pool_layer = remove_avg_pool_layer
self.inplanes = 64
self.fully_conv = fully_conv
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AvgPool2d(7)
self.fc = nn.Linear(512 * block.expansion, num_classes)
if self.fully_conv:
self.avgpool = nn.AvgPool2d(7, padding=3, stride=1)
# In the latest unstable torch 4.0 the tensor.copy_
# method was changed and doesn't work as it used to be
# self.fc = nn.Conv2d(512 * block.expansion, num_classes, 1)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2.0 / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1, dilation=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
# Check if we already achieved desired output stride.
if self.current_stride == self.output_stride:
# If so, replace subsampling with a dilation to preserve
# current spatial resolution.
self.current_dilation = self.current_dilation * stride
stride = 1
else:
# If not, perform subsampling and update current
# new output stride.
self.current_stride = self.current_stride * stride
# We don't dilate 1x1 convolution.
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, dilation=self.current_dilation))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, dilation=self.current_dilation))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x2s = self.relu(x)
x = self.maxpool(x2s)
x4s = self.layer1(x)
x8s = self.layer2(x4s)
x16s = self.layer3(x8s)
x32s = self.layer4(x16s)
x = x32s
if not self.remove_avg_pool_layer:
x = self.avgpool(x)
if not self.fully_conv:
x = x.view(x.size(0), -1)
xfc = self.fc(x)
return x2s, x4s, x8s, x16s, x32s, xfc
def resnet18(pretrained=False, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls["resnet18"]))
return model
def resnet34(pretrained=False, **kwargs):
"""Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls["resnet34"]))
return model
def resnet50(pretrained=False, **kwargs):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls["resnet50"]))
return model
def resnet101(pretrained=False, **kwargs):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls["resnet101"]))
return model
def resnet152(pretrained=False, **kwargs):
"""Constructs a ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls["resnet152"]))
return model
| 31.779783
| 110
| 0.619107
|
9435089eb829fecacc5997189c7278f92a066fa9
| 3,775
|
py
|
Python
|
Pi Driver.py
|
OwenSawyer/PiBot
|
ccb51b168cb418a413250bc136b4385b9cf76e93
|
[
"MIT"
] | 1
|
2015-09-03T01:16:54.000Z
|
2015-09-03T01:16:54.000Z
|
Pi Driver.py
|
owen135731/PiBot
|
ccb51b168cb418a413250bc136b4385b9cf76e93
|
[
"MIT"
] | null | null | null |
Pi Driver.py
|
owen135731/PiBot
|
ccb51b168cb418a413250bc136b4385b9cf76e93
|
[
"MIT"
] | null | null | null |
from pubnub import Pubnub
import json,time
try:
import RPi.GPIO as GPIO
except RuntimeError:
print "Error importing RPi.GPIO! This is probably because you need superuser privileges. You can achieve this by using 'sudo' to run your script"
#Setup GPIO
GPIO.setmode(GPIO.BOARD)
#Setup PubNub
pubnub = Pubnub(publish_key="pub-c-b72e8d8e-4e01-4896-ae5a-ad671c84ebc2",subscribe_key="sub-c-74baf600-439b-11e5-a9f1-02ee2ddab7fe")
pubnubChannelName = 'gpio-raspberry-control'
#Setup Glow Status Flow
forward = False
backward = False
left = False
right = False
#GPIO ports (BOARD mode)
#R Wheel -> 19 (ON/OFF)
#R Dir -> 22 (Forward/Backward)
#L Wheel -> 11
#L Dir -> 7
#PubNub Channel Subscribe Callback
def gpioCallback(msg,channel):
global forward
global backward
global left
global right
respstring = ''
command = msg
print "Command is : " + str(command)
if('req' in command):
if(command['req'] == 'forward'):
if(forward):
respstring = 'forward stop'
forward = False
backward = False
left = False
right = False
gpiostop()
else:
forward = True
respstring = 'forward'
GPIO.output(19, True)
GPIO.output(22, True)
GPIO.output(11, True)
GPIO.output(7, True)
respmsg = {"resp" : respstring }
pubnub.publish(pubnubChannelName, respmsg)
elif(command['req'] == 'backward'):
if(backward):
respstring = 'backward stop'
forward = False
backward = False
left = False
right = False
gpiostop()
else:
backward = True;
respstring = 'backward'
GPIO.output(19, True)
GPIO.output(22, False)
GPIO.output(11, True)
GPIO.output(7, False)
respmsg = {"resp" : respstring }
pubnub.publish(pubnubChannelName, respmsg)
elif(command['req'] == 'left'):
if(left):
respstring = 'left stop'
forward = False
backward = False
left = False
right = False
gpiostop()
else:
left = True
respstring = 'left'
GPIO.output(19, True)
GPIO.output(22, True)
GPIO.output(11, True)
GPIO.output(7, False)
respmsg = {"resp" : respstring }
pubnub.publish(pubnubChannelName, respmsg)
elif(command['req'] == 'right'):
if(right):
respstring = 'right stop'
forward = False
backward = False
left = False
right = False
gpiostop()
else:
right = True
respstring = 'right'
GPIO.output(19, True)
GPIO.output(22, False)
GPIO.output(11, True)
GPIO.output(7, True)
respmsg = {"resp" : respstring }
pubnub.publish(pubnubChannelName, respmsg)
elif(command['req'] == 'stop'):
respstring = 'stop'
forward = False
backward = False
left = False
right = False
gpiostop()
respmsg = {"resp" : respstring }
pubnub.publish(pubnubChannelName, respmsg)
elif(command['req'] == 'kill'):
respstring = 'Shutdown'
forward = False
backward = False
left = False
right = False
gpiostop()
GPIO.cleanup()
respmsg = {"resp" : respstring }
pubnub.publish(pubnubChannelName, respmsg)
#PubNub Channel Subscribe Callback
#def gpioError(msg):
# print 'Error :' + msg
def gpiostop():
GPIO.output(19, False)
GPIO.output(22, False)
GPIO.output(11, False)
GPIO.output(7, False)
if __name__ == '__main__':
GPIO.setup(19, GPIO.OUT)
GPIO.setup(22, GPIO.OUT)
GPIO.setup(11, GPIO.OUT)
GPIO.setup(7, GPIO.OUT)
pubnub.subscribe(pubnubChannelName, gpioCallback, {"resp":"Error"})
while True:
time.sleep(5000)
if(GPIO.gpio_function(19) or GPIO.gpio_function(22) or
GPIO.gpio_function(11) or GPIO.gpio_function(7)):
##All is over
break
| 19.661458
| 151
| 0.627285
|
0c2ca77da16ec200431e9f5f0585ffb801ae2d49
| 3,488
|
py
|
Python
|
venv/lib/python3.9/site-packages/tinycss2/nth.py
|
almmello/frozen
|
c9928491f694b56a0023926bc763c703ba1fd75a
|
[
"BSD-2-Clause"
] | 87
|
2017-10-02T14:43:10.000Z
|
2022-02-07T18:21:03.000Z
|
venv/lib/python3.9/site-packages/tinycss2/nth.py
|
almmello/frozen
|
c9928491f694b56a0023926bc763c703ba1fd75a
|
[
"BSD-2-Clause"
] | 36
|
2017-07-01T16:40:27.000Z
|
2022-02-22T22:02:22.000Z
|
venv/lib/python3.9/site-packages/tinycss2/nth.py
|
almmello/frozen
|
c9928491f694b56a0023926bc763c703ba1fd75a
|
[
"BSD-2-Clause"
] | 13
|
2018-03-01T20:28:21.000Z
|
2021-06-01T14:08:25.000Z
|
import re
from .parser import _next_significant, _to_token_iterator
def parse_nth(input):
"""Parse `<An+B> <https://drafts.csswg.org/css-syntax-3/#anb>`_,
as found in `:nth-child()
<https://drafts.csswg.org/selectors/#nth-child-pseudo>`_
and related Selector pseudo-classes.
Although tinycss2 does not include a full Selector parser,
this bit of syntax is included as it is particularly tricky to define
on top of a CSS tokenizer.
:type input: :obj:`str` or :term:`iterable`
:param input: A string or an iterable of :term:`component values`.
:returns:
A ``(a, b)`` tuple of integers, or :obj:`None` if the input is invalid.
"""
tokens = _to_token_iterator(input, skip_comments=True)
token = _next_significant(tokens)
if token is None:
return
token_type = token.type
if token_type == 'number' and token.is_integer:
return parse_end(tokens, 0, token.int_value)
elif token_type == 'dimension' and token.is_integer:
unit = token.lower_unit
if unit == 'n':
return parse_b(tokens, token.int_value)
elif unit == 'n-':
return parse_signless_b(tokens, token.int_value, -1)
else:
match = N_DASH_DIGITS_RE.match(unit)
if match:
return parse_end(tokens, token.int_value, int(match.group(1)))
elif token_type == 'ident':
ident = token.lower_value
if ident == 'even':
return parse_end(tokens, 2, 0)
elif ident == 'odd':
return parse_end(tokens, 2, 1)
elif ident == 'n':
return parse_b(tokens, 1)
elif ident == '-n':
return parse_b(tokens, -1)
elif ident == 'n-':
return parse_signless_b(tokens, 1, -1)
elif ident == '-n-':
return parse_signless_b(tokens, -1, -1)
elif ident[0] == '-':
match = N_DASH_DIGITS_RE.match(ident[1:])
if match:
return parse_end(tokens, -1, int(match.group(1)))
else:
match = N_DASH_DIGITS_RE.match(ident)
if match:
return parse_end(tokens, 1, int(match.group(1)))
elif token == '+':
token = next(tokens) # Whitespace after an initial '+' is invalid.
if token.type == 'ident':
ident = token.lower_value
if ident == 'n':
return parse_b(tokens, 1)
elif ident == 'n-':
return parse_signless_b(tokens, 1, -1)
else:
match = N_DASH_DIGITS_RE.match(ident)
if match:
return parse_end(tokens, 1, int(match.group(1)))
def parse_b(tokens, a):
token = _next_significant(tokens)
if token is None:
return (a, 0)
elif token == '+':
return parse_signless_b(tokens, a, 1)
elif token == '-':
return parse_signless_b(tokens, a, -1)
elif (token.type == 'number' and token.is_integer and
token.representation[0] in '-+'):
return parse_end(tokens, a, token.int_value)
def parse_signless_b(tokens, a, b_sign):
token = _next_significant(tokens)
if (token.type == 'number' and token.is_integer and
not token.representation[0] in '-+'):
return parse_end(tokens, a, b_sign * token.int_value)
def parse_end(tokens, a, b):
if _next_significant(tokens) is None:
return (a, b)
N_DASH_DIGITS_RE = re.compile('^n(-[0-9]+)$')
| 34.534653
| 79
| 0.584289
|
970291f504f307a281218759223c2abb46125e59
| 1,318
|
py
|
Python
|
sdwan/sdwan/devicestate.py
|
sambyers/netauto_learning
|
22c1049bf86e188f774f1c977823abea2bb3abfe
|
[
"MIT"
] | null | null | null |
sdwan/sdwan/devicestate.py
|
sambyers/netauto_learning
|
22c1049bf86e188f774f1c977823abea2bb3abfe
|
[
"MIT"
] | null | null | null |
sdwan/sdwan/devicestate.py
|
sambyers/netauto_learning
|
22c1049bf86e188f774f1c977823abea2bb3abfe
|
[
"MIT"
] | null | null | null |
class DeviceState():
def __init__(self, session):
self.session = session
def get_devices(self, params: dict = None) -> dict:
url = f'{self.session.api_url}/device/'
r = self.session.get(url, params=params)
return r.json()
def get_interface_stats(self, params: dict = None) -> dict:
url = f'{self.session.api_url}/statistics/interface'
r = self.session.get(url, params=params)
return r.json()
def get_alarms(self, params: dict = None) -> dict:
url = f'{self.session.api_url}/alarms'
r = self.session.get(url, params=params)
return r.json()
def get_device_status(self) -> dict:
url = f'{self.session.api_url}/device/status'
r = self.session.get(url)
return r.json()
def get_vedge_inventory_detail(self) -> dict:
url = f'{self.session.api_url}/device/vedgeinventory/detail'
r = self.session.get(url)
return r.json()
def get_vedge_inventory_summary(self) -> dict:
url = f'{self.session.api_url}/device/vedgeinventory/summary'
r = self.session.get(url)
return r.json()
def get_vmanage_systemip(self) -> dict:
url = f'{self.session.api_url}/device/vmanage'
r = self.session.get(url)
return r.json()
| 32.146341
| 69
| 0.612291
|
d182b004094d9d92721d17125669d28978a7c7f0
| 215
|
py
|
Python
|
d6tflow/tasks/torch.py
|
Mozin/d6tflow
|
e332e2244d5d98362eb7d15b4c7800a14a4a56e5
|
[
"MIT"
] | 1,004
|
2019-01-11T09:59:26.000Z
|
2022-03-31T17:48:50.000Z
|
d6tflow/tasks/torch.py
|
Mozin/d6tflow
|
e332e2244d5d98362eb7d15b4c7800a14a4a56e5
|
[
"MIT"
] | 27
|
2019-03-28T02:29:40.000Z
|
2022-02-07T12:38:17.000Z
|
d6tflow/tasks/torch.py
|
Mozin/d6tflow
|
e332e2244d5d98362eb7d15b4c7800a14a4a56e5
|
[
"MIT"
] | 73
|
2019-02-27T03:13:15.000Z
|
2021-08-29T06:10:25.000Z
|
from d6tflow.tasks import TaskData
from d6tflow.targets.torch import PyTorchModel
class PyTorch(TaskData):
"""
Task which saves to .pt models
"""
target_class = PyTorchModel
target_ext = '.pt'
| 19.545455
| 46
| 0.702326
|
82af3d31c0dcc4b6a3540f4d8649928a823b7e18
| 2,310
|
py
|
Python
|
tests/parsers/winreg_plugins/shutdown.py
|
jeppetrost/plaso
|
b48008c6ea79950eeeef3a05b3a859086c8704b6
|
[
"Apache-2.0"
] | null | null | null |
tests/parsers/winreg_plugins/shutdown.py
|
jeppetrost/plaso
|
b48008c6ea79950eeeef3a05b3a859086c8704b6
|
[
"Apache-2.0"
] | null | null | null |
tests/parsers/winreg_plugins/shutdown.py
|
jeppetrost/plaso
|
b48008c6ea79950eeeef3a05b3a859086c8704b6
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for the LastShutdown value plugin."""
from __future__ import unicode_literals
import unittest
from plaso.formatters import shutdown as _ # pylint: disable=unused-import
from plaso.lib import definitions
from plaso.parsers.winreg_plugins import shutdown
from tests import test_lib as shared_test_lib
from tests.parsers.winreg_plugins import test_lib
class ShutdownWindowsRegistryPluginTest(test_lib.RegistryPluginTestCase):
"""Tests for the LastShutdown value plugin."""
def testFilters(self):
"""Tests the FILTERS class attribute."""
plugin = shutdown.ShutdownWindowsRegistryPlugin()
key_path = 'HKEY_LOCAL_MACHINE\\System\\ControlSet001\\Control\\Windows'
self._AssertFiltersOnKeyPath(plugin, key_path)
self._AssertNotFiltersOnKeyPath(plugin, 'HKEY_LOCAL_MACHINE\\Bogus')
@shared_test_lib.skipUnlessHasTestFile(['SYSTEM'])
def testProcess(self):
"""Tests the Process function."""
test_file_entry = self._GetTestFileEntry(['SYSTEM'])
key_path = 'HKEY_LOCAL_MACHINE\\System\\ControlSet001\\Control\\Windows'
win_registry = self._GetWinRegistryFromFileEntry(test_file_entry)
registry_key = win_registry.GetKeyByPath(key_path)
plugin = shutdown.ShutdownWindowsRegistryPlugin()
storage_writer = self._ParseKeyWithPlugin(
registry_key, plugin, file_entry=test_file_entry)
self.assertEqual(storage_writer.number_of_warnings, 0)
self.assertEqual(storage_writer.number_of_events, 1)
events = list(storage_writer.GetEvents())
event = events[0]
self.assertEqual(event.pathspec, test_file_entry.path_spec)
# This should just be the plugin name, as we're invoking it directly,
# and not through the parser.
self.assertEqual(event.parser, plugin.plugin_name)
self.assertEqual(event.value_name, 'ShutdownTime')
self.CheckTimestamp(event.timestamp, '2012-04-04 01:58:40.839250')
self.assertEqual(
event.timestamp_desc, definitions.TIME_DESCRIPTION_LAST_SHUTDOWN)
expected_message = (
'[{0:s}] '
'Description: ShutdownTime').format(key_path)
expected_short_message = 'ShutdownTime'
self._TestGetMessageStrings(event, expected_message, expected_short_message)
if __name__ == '__main__':
unittest.main()
| 33
| 80
| 0.759307
|
c3bf8c1acf5b3c6fe9a3079323333e48bdf16de9
| 781
|
py
|
Python
|
main.py
|
djtimca/spacex-api
|
2aff020116f7ba74f468b9a2011b5abf0a9f61b2
|
[
"ECL-2.0",
"Apache-2.0"
] | 2
|
2020-09-19T10:56:55.000Z
|
2021-07-27T15:32:19.000Z
|
main.py
|
djtimca/spacex-api
|
2aff020116f7ba74f468b9a2011b5abf0a9f61b2
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
main.py
|
djtimca/spacex-api
|
2aff020116f7ba74f468b9a2011b5abf0a9f61b2
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2020-11-22T17:38:49.000Z
|
2020-11-22T17:38:49.000Z
|
from spacexpypi import SpaceX
import asyncio
async def main():
api_client = SpaceX()
#Roadster Data
success = await api_client.get_roadster_status()
write_file(success, "test_data/roadster.json")
#Next Launch Data
success = await api_client.get_next_launch()
write_file(success, "test_data/next_launch.json")
#All Next Launches Data
success = await api_client.get_upcoming_launches()
write_file(success, "test_data/upcoming_launches.json")
#Latest Launch Data
success = await api_client.get_latest_launch()
write_file(success, "test_data/latest_launch.json")
#close
await api_client.close()
def write_file(data, filename):
f = open(filename, "w")
f.write(str(data))
f.close()
asyncio.run(main())
| 24.40625
| 59
| 0.709347
|
f9e6785b6c9b1acf7f3a800f3f17e4112d156e8d
| 34,948
|
py
|
Python
|
azure-mgmt-authorization/azure/mgmt/authorization/v2015_07_01/operations/role_assignments_operations.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | 1
|
2021-09-07T18:36:04.000Z
|
2021-09-07T18:36:04.000Z
|
azure-mgmt-authorization/azure/mgmt/authorization/v2015_07_01/operations/role_assignments_operations.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | 2
|
2019-10-02T23:37:38.000Z
|
2020-10-02T01:17:31.000Z
|
azure-mgmt-authorization/azure/mgmt/authorization/v2015_07_01/operations/role_assignments_operations.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from .. import models
class RoleAssignmentsOperations(object):
"""RoleAssignmentsOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: The API version to use for this operation. Constant value: "2015-07-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2015-07-01"
self.config = config
def list_for_resource(
self, resource_group_name, resource_provider_namespace, parent_resource_path, resource_type, resource_name, filter=None, custom_headers=None, raw=False, **operation_config):
"""Gets role assignments for a resource.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_provider_namespace: The namespace of the resource
provider.
:type resource_provider_namespace: str
:param parent_resource_path: The parent resource identity.
:type parent_resource_path: str
:param resource_type: The resource type of the resource.
:type resource_type: str
:param resource_name: The name of the resource to get role assignments
for.
:type resource_name: str
:param filter: The filter to apply on the operation. Use
$filter=atScope() to return all role assignments at or above the
scope. Use $filter=principalId eq {id} to return all role assignments
at, above or below the scope for the specified principal.
:type filter: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of RoleAssignment
:rtype:
~azure.mgmt.authorization.v2015_07_01.models.RoleAssignmentPaged[~azure.mgmt.authorization.v2015_07_01.models.RoleAssignment]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list_for_resource.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'resourceProviderNamespace': self._serialize.url("resource_provider_namespace", resource_provider_namespace, 'str'),
'parentResourcePath': self._serialize.url("parent_resource_path", parent_resource_path, 'str', skip_quote=True),
'resourceType': self._serialize.url("resource_type", resource_type, 'str', skip_quote=True),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.RoleAssignmentPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.RoleAssignmentPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list_for_resource.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}/providers/Microsoft.Authorization/roleAssignments'}
def list_for_resource_group(
self, resource_group_name, filter=None, custom_headers=None, raw=False, **operation_config):
"""Gets role assignments for a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param filter: The filter to apply on the operation. Use
$filter=atScope() to return all role assignments at or above the
scope. Use $filter=principalId eq {id} to return all role assignments
at, above or below the scope for the specified principal.
:type filter: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of RoleAssignment
:rtype:
~azure.mgmt.authorization.v2015_07_01.models.RoleAssignmentPaged[~azure.mgmt.authorization.v2015_07_01.models.RoleAssignment]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list_for_resource_group.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.RoleAssignmentPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.RoleAssignmentPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list_for_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Authorization/roleAssignments'}
def delete(
self, scope, role_assignment_name, custom_headers=None, raw=False, **operation_config):
"""Deletes a role assignment.
:param scope: The scope of the role assignment to delete.
:type scope: str
:param role_assignment_name: The name of the role assignment to
delete.
:type role_assignment_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: RoleAssignment or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.authorization.v2015_07_01.models.RoleAssignment or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.delete.metadata['url']
path_format_arguments = {
'scope': self._serialize.url("scope", scope, 'str', skip_quote=True),
'roleAssignmentName': self._serialize.url("role_assignment_name", role_assignment_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('RoleAssignment', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
delete.metadata = {'url': '/{scope}/providers/Microsoft.Authorization/roleAssignments/{roleAssignmentName}'}
def create(
self, scope, role_assignment_name, properties, custom_headers=None, raw=False, **operation_config):
"""Creates a role assignment.
:param scope: The scope of the role assignment to create. The scope
can be any REST resource instance. For example, use
'/subscriptions/{subscription-id}/' for a subscription,
'/subscriptions/{subscription-id}/resourceGroups/{resource-group-name}'
for a resource group, and
'/subscriptions/{subscription-id}/resourceGroups/{resource-group-name}/providers/{resource-provider}/{resource-type}/{resource-name}'
for a resource.
:type scope: str
:param role_assignment_name: The name of the role assignment to
create. It can be any valid GUID.
:type role_assignment_name: str
:param properties: Role assignment properties.
:type properties:
~azure.mgmt.authorization.v2015_07_01.models.RoleAssignmentProperties
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: RoleAssignment or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.authorization.v2015_07_01.models.RoleAssignment or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
parameters = models.RoleAssignmentCreateParameters(properties=properties)
# Construct URL
url = self.create.metadata['url']
path_format_arguments = {
'scope': self._serialize.url("scope", scope, 'str', skip_quote=True),
'roleAssignmentName': self._serialize.url("role_assignment_name", role_assignment_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'RoleAssignmentCreateParameters')
# Construct and send request
request = self._client.put(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 201:
deserialized = self._deserialize('RoleAssignment', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
create.metadata = {'url': '/{scope}/providers/Microsoft.Authorization/roleAssignments/{roleAssignmentName}'}
def get(
self, scope, role_assignment_name, custom_headers=None, raw=False, **operation_config):
"""Get the specified role assignment.
:param scope: The scope of the role assignment.
:type scope: str
:param role_assignment_name: The name of the role assignment to get.
:type role_assignment_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: RoleAssignment or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.authorization.v2015_07_01.models.RoleAssignment or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.get.metadata['url']
path_format_arguments = {
'scope': self._serialize.url("scope", scope, 'str', skip_quote=True),
'roleAssignmentName': self._serialize.url("role_assignment_name", role_assignment_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('RoleAssignment', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get.metadata = {'url': '/{scope}/providers/Microsoft.Authorization/roleAssignments/{roleAssignmentName}'}
def delete_by_id(
self, role_assignment_id, custom_headers=None, raw=False, **operation_config):
"""Deletes a role assignment.
:param role_assignment_id: The fully qualified ID of the role
assignment, including the scope, resource name and resource type. Use
the format,
/{scope}/providers/Microsoft.Authorization/roleAssignments/{roleAssignmentName}.
Example:
/subscriptions/{subId}/resourcegroups/{rgname}//providers/Microsoft.Authorization/roleAssignments/{roleAssignmentName}.
:type role_assignment_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: RoleAssignment or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.authorization.v2015_07_01.models.RoleAssignment or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.delete_by_id.metadata['url']
path_format_arguments = {
'roleAssignmentId': self._serialize.url("role_assignment_id", role_assignment_id, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('RoleAssignment', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
delete_by_id.metadata = {'url': '/{roleAssignmentId}'}
def create_by_id(
self, role_assignment_id, properties, custom_headers=None, raw=False, **operation_config):
"""Creates a role assignment by ID.
:param role_assignment_id: The fully qualified ID of the role
assignment, including the scope, resource name and resource type. Use
the format,
/{scope}/providers/Microsoft.Authorization/roleAssignments/{roleAssignmentName}.
Example:
/subscriptions/{subId}/resourcegroups/{rgname}//providers/Microsoft.Authorization/roleAssignments/{roleAssignmentName}.
:type role_assignment_id: str
:param properties: Role assignment properties.
:type properties:
~azure.mgmt.authorization.v2015_07_01.models.RoleAssignmentProperties
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: RoleAssignment or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.authorization.v2015_07_01.models.RoleAssignment or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
parameters = models.RoleAssignmentCreateParameters(properties=properties)
# Construct URL
url = self.create_by_id.metadata['url']
path_format_arguments = {
'roleAssignmentId': self._serialize.url("role_assignment_id", role_assignment_id, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'RoleAssignmentCreateParameters')
# Construct and send request
request = self._client.put(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 201:
deserialized = self._deserialize('RoleAssignment', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
create_by_id.metadata = {'url': '/{roleAssignmentId}'}
def get_by_id(
self, role_assignment_id, custom_headers=None, raw=False, **operation_config):
"""Gets a role assignment by ID.
:param role_assignment_id: The fully qualified ID of the role
assignment, including the scope, resource name and resource type. Use
the format,
/{scope}/providers/Microsoft.Authorization/roleAssignments/{roleAssignmentName}.
Example:
/subscriptions/{subId}/resourcegroups/{rgname}//providers/Microsoft.Authorization/roleAssignments/{roleAssignmentName}.
:type role_assignment_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: RoleAssignment or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.authorization.v2015_07_01.models.RoleAssignment or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.get_by_id.metadata['url']
path_format_arguments = {
'roleAssignmentId': self._serialize.url("role_assignment_id", role_assignment_id, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('RoleAssignment', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get_by_id.metadata = {'url': '/{roleAssignmentId}'}
def list(
self, filter=None, custom_headers=None, raw=False, **operation_config):
"""Gets all role assignments for the subscription.
:param filter: The filter to apply on the operation. Use
$filter=atScope() to return all role assignments at or above the
scope. Use $filter=principalId eq {id} to return all role assignments
at, above or below the scope for the specified principal.
:type filter: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of RoleAssignment
:rtype:
~azure.mgmt.authorization.v2015_07_01.models.RoleAssignmentPaged[~azure.mgmt.authorization.v2015_07_01.models.RoleAssignment]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.RoleAssignmentPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.RoleAssignmentPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Authorization/roleAssignments'}
def list_for_scope(
self, scope, filter=None, custom_headers=None, raw=False, **operation_config):
"""Gets role assignments for a scope.
:param scope: The scope of the role assignments.
:type scope: str
:param filter: The filter to apply on the operation. Use
$filter=atScope() to return all role assignments at or above the
scope. Use $filter=principalId eq {id} to return all role assignments
at, above or below the scope for the specified principal.
:type filter: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of RoleAssignment
:rtype:
~azure.mgmt.authorization.v2015_07_01.models.RoleAssignmentPaged[~azure.mgmt.authorization.v2015_07_01.models.RoleAssignment]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list_for_scope.metadata['url']
path_format_arguments = {
'scope': self._serialize.url("scope", scope, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.RoleAssignmentPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.RoleAssignmentPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list_for_scope.metadata = {'url': '/{scope}/providers/Microsoft.Authorization/roleAssignments'}
| 47.036339
| 249
| 0.660782
|
caffabdcb39c625a61b5428627a0a1045adc0f4a
| 47
|
py
|
Python
|
python_modules/mylib/__init__.py
|
plter/PythonCourse2021
|
239d0c70fdc240906b3676e3c7cb134fb7ec685d
|
[
"MIT"
] | null | null | null |
python_modules/mylib/__init__.py
|
plter/PythonCourse2021
|
239d0c70fdc240906b3676e3c7cb134fb7ec685d
|
[
"MIT"
] | null | null | null |
python_modules/mylib/__init__.py
|
plter/PythonCourse2021
|
239d0c70fdc240906b3676e3c7cb134fb7ec685d
|
[
"MIT"
] | null | null | null |
# def say_hello():
# print("Hello Python")
| 15.666667
| 27
| 0.595745
|
f46f8651ed123b765e512b2b01f44bcb4673705c
| 2,723
|
py
|
Python
|
baggie/test/test_baggie_writer.py
|
Box-Robotics/ros2-bagutils
|
4542316e0831f727f8d97f4ec271f693a732482b
|
[
"Apache-2.0"
] | 10
|
2021-02-02T14:38:16.000Z
|
2021-11-23T12:15:46.000Z
|
baggie/test/test_baggie_writer.py
|
Box-Robotics/ros2-bagutils
|
4542316e0831f727f8d97f4ec271f693a732482b
|
[
"Apache-2.0"
] | null | null | null |
baggie/test/test_baggie_writer.py
|
Box-Robotics/ros2-bagutils
|
4542316e0831f727f8d97f4ec271f693a732482b
|
[
"Apache-2.0"
] | 2
|
2021-02-04T12:20:31.000Z
|
2021-03-20T04:16:49.000Z
|
import unittest
import os
import tempfile
import time
import baggie
from example_interfaces.msg import Int32
N_MSGS = 100
TOPIC_NAME = "/counter"
BAGNAME = "ziplock.bag"
class TestBaggieWriter(unittest.TestCase):
"""
Test fixture for the baggie.Baggie Python interface to writing bag files
"""
def setUp(self):
self.tmp_dir = tempfile.TemporaryDirectory()
def tearDown(self):
self.tmp_dir.cleanup()
def test_defaults(self):
bag_file_name="{}{}{}".format(self.tmp_dir.name, os.sep, BAGNAME)
bag = baggie.Baggie(bag_file_name, mode="w")
for i in range(N_MSGS):
msg = Int32()
msg.data = i
bag.write(TOPIC_NAME, msg)
time.sleep(1./N_MSGS)
def test_compressed(self):
bag_file_name="{}{}{}".format(self.tmp_dir.name, os.sep, BAGNAME)
bag = baggie.Baggie(bag_file_name, mode="w", compress=True)
for i in range(N_MSGS):
msg = Int32()
msg.data = i
bag.write(TOPIC_NAME, msg)
time.sleep(1./N_MSGS)
def test_legal_override_types(self):
bag_file_name="{}{}{}".format(self.tmp_dir.name, os.sep, BAGNAME)
s_opt = baggie._StorageOptions()
s_opt.storage_id = baggie.Baggie.DEFAULT_STORAGE_ID
c_opt = baggie._ConverterOptions()
c_opt.input_serialization_format = \
baggie.Baggie.DEFAULT_SERIALIZATION_FORMAT
c_opt.output_serialization_format = \
baggie.Baggie.DEFAULT_SERIALIZATION_FORMAT
comp_opt = baggie._CompressionOptions()
comp_opt.compression_format = baggie.Baggie.DEFAULT_COMPRESSION_FORMAT
comp_opt.compression_mode = baggie.Baggie.DEFAULT_COMPRESSION_MODE
bag = baggie.Baggie(bag_file_name, mode="w",
storage_opts=s_opt,
converter_opts=c_opt,
compression_opts=comp_opt)
def test_illegal_override_types(self):
bag_file_name="{}{}{}".format(self.tmp_dir.name, os.sep, BAGNAME)
with self.assertRaises(TypeError):
bag = baggie.Baggie(bag_file_name, mode="w", storage_opts="foo")
with self.assertRaises(TypeError):
bag = baggie.Baggie(bag_file_name, mode="w", converter_opts=100)
with self.assertRaises(TypeError):
bag = baggie.Baggie(bag_file_name, mode="w", compression_opts=1.0)
def test_file_already_exits(self):
bag_file_name="{}{}{}".format(self.tmp_dir.name, os.sep, BAGNAME)
bag1 = baggie.Baggie(bag_file_name, mode="w")
with self.assertRaises(baggie.BaggieException):
bag2 = baggie.Baggie(bag_file_name, mode="w")
| 31.662791
| 78
| 0.639001
|
ebd0d037f3b0d6a5d6696f1ae69eed8e6ca2f187
| 52
|
py
|
Python
|
s3conf/storage/exceptions.py
|
sbneto/s3conf
|
6eb6df4132ed1557da4128a748585d949015aa59
|
[
"MIT"
] | 2
|
2019-05-08T18:41:49.000Z
|
2020-06-22T14:43:34.000Z
|
s3conf/storage/exceptions.py
|
sbneto/s3conf
|
6eb6df4132ed1557da4128a748585d949015aa59
|
[
"MIT"
] | 7
|
2018-05-15T16:09:08.000Z
|
2019-05-27T12:53:20.000Z
|
s3conf/storage/exceptions.py
|
sbneto/s3conf
|
6eb6df4132ed1557da4128a748585d949015aa59
|
[
"MIT"
] | 1
|
2018-06-19T15:51:43.000Z
|
2018-06-19T15:51:43.000Z
|
class FileDoesNotExist(FileNotFoundError):
pass
| 17.333333
| 42
| 0.807692
|
59620f95c1f644f1c02d63e83eec0e431c10c8d3
| 553
|
py
|
Python
|
scrapy_lab/mtime/mtime/items.py
|
ch1huizong/lab
|
c1622f94ca5c8a716cb5769fa6213060eb7c140d
|
[
"MIT"
] | null | null | null |
scrapy_lab/mtime/mtime/items.py
|
ch1huizong/lab
|
c1622f94ca5c8a716cb5769fa6213060eb7c140d
|
[
"MIT"
] | null | null | null |
scrapy_lab/mtime/mtime/items.py
|
ch1huizong/lab
|
c1622f94ca5c8a716cb5769fa6213060eb7c140d
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class MtimeItem(scrapy.Item):
image_urls = scrapy.Field()
images = scrapy.Field()
image_paths = scrapy.Field()
rank = scrapy.Field()
title = scrapy.Field()
year = scrapy.Field()
director = scrapy.Field()
actors = scrapy.Field()
categories = scrapy.Field()
description = scrapy.Field()
points = scrapy.Field()
comments = scrapy.Field()
| 22.12
| 52
| 0.65642
|
e3e9732bc69d49146de68e1294ca97cee66126d1
| 1,792
|
py
|
Python
|
Desafios/Desafio 44.py
|
blopah/python3-curso-em-video-gustavo-guanabara-exercicios
|
d86c613ea8bb335c4dd888a34bda456055e4d82e
|
[
"MIT"
] | 2
|
2021-02-06T04:35:44.000Z
|
2021-02-08T02:37:33.000Z
|
Desafios/Desafio 44.py
|
blopah/python3-curso-em-video-gustavo-guanabara-exercicios
|
d86c613ea8bb335c4dd888a34bda456055e4d82e
|
[
"MIT"
] | null | null | null |
Desafios/Desafio 44.py
|
blopah/python3-curso-em-video-gustavo-guanabara-exercicios
|
d86c613ea8bb335c4dd888a34bda456055e4d82e
|
[
"MIT"
] | null | null | null |
print("""Elabore umprograma que calcule o valor a ser pago por um produto,
considerando o seu \033[1;31mpreço normal\033[0;0m e \033[1;31mcondição de pagamento\033[0;0m:
\033[1;31m-\033[0;0m à vista \033[1;34mdinheiro/cheque:\033[0;0m \033[1;31m10%\033[0;0m de desconto
\033[1;31m-\033[0;0m à vista no \033[1;34mcartão\033[0;0m: \033[1;31m5%\033[0;0m de desconto
\033[1;31m-\033[0;0m em até \033[1;34m2x no cartão\033[0;0m: preço normal
\033[1;31m-\033[0;0m \033[1;34m3x ou mais no cartão\033[0;0m: \033[1;31m20%\033[0;0m de juros""")
# Elabore umprograma que calcule o valor a ser pago por um produto,
# considerando o seu preço normal e condição de pagamento:
# 1 à vista dinheiro/cheque: 10% de desconto
# 2 à vista no cartão: 5% de desconto
# 3 em até 2x no cartão: preço normal
# 4 3x ou mais no cartão: 20% de juros
print('{:=^40}'.format('@blopadesign'))
valor = float(input('Insira o valor total da compra: '))
print("""Escolha a forma de pagamento:
\033[1;31m1\033[0;0m à vista \033[1;34mdinheiro/cheque:\033[0;0m \033[1;31m10%\033[0;0m de desconto
\033[1;31m2\033[0;0m à vista no \033[1;34mcartão\033[0;0m: \033[1;31m5%\033[0;0m de desconto
\033[1;31m3\033[0;0m em até \033[1;34m2x no cartão\033[0;0m: preço normal
\033[1;31m4\033[0;0m \033[1;34m3x ou mais no cartão\033[0;0m: \033[1;31m20%\033[0;0m de juros""")
forma = int(input(''))
valorfinal = 0
if forma == 1:
print('Sua compra sairá por R${:.2f}. Obrigado!'.format(valor - (valor * 10/100)))
elif forma == 2:
print('Sua compra sairá por R${:.2f}. Obrigado!'.format(valor - (valor * 5/100)))
elif forma == 3:
print('Sua compra sairá por R${:.2f}. Obrigado!'.format(valor))
elif forma == 4:
print('Sua compra sairá por R${:.2f}. Obrigado!'.format(valor + (valor * 20/100)))
else:
print('Entrada invalida')
| 56
| 99
| 0.688058
|
2feb7f5cadcb1182b6d5e8b6701cde57890b3ab6
| 1,709
|
py
|
Python
|
app/core/migrations/0001_initial.py
|
rushi444/recipe-api
|
8c7828ee969426ec9093ad0d21f492cca1e310c3
|
[
"MIT"
] | null | null | null |
app/core/migrations/0001_initial.py
|
rushi444/recipe-api
|
8c7828ee969426ec9093ad0d21f492cca1e310c3
|
[
"MIT"
] | null | null | null |
app/core/migrations/0001_initial.py
|
rushi444/recipe-api
|
8c7828ee969426ec9093ad0d21f492cca1e310c3
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.1.15 on 2020-03-13 17:30
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0009_alter_user_last_name_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(max_length=255, unique=True)),
('name', models.CharField(max_length=255)),
('is_active', models.BooleanField(default=True)),
('is_staff', models.BooleanField(default=False)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
),
]
| 50.264706
| 266
| 0.63897
|
c4f4af1588d61975a6c96b4d202df3f7fc842e2d
| 30,300
|
py
|
Python
|
bzt/jmx/tools.py
|
svetasemashko/taurus
|
6f2dbaa60fa71a1f0dc5536171d963df91a1f420
|
[
"Apache-2.0"
] | null | null | null |
bzt/jmx/tools.py
|
svetasemashko/taurus
|
6f2dbaa60fa71a1f0dc5536171d963df91a1f420
|
[
"Apache-2.0"
] | null | null | null |
bzt/jmx/tools.py
|
svetasemashko/taurus
|
6f2dbaa60fa71a1f0dc5536171d963df91a1f420
|
[
"Apache-2.0"
] | null | null | null |
"""
Module holds JMX handlers implementations
Copyright 2017 BlazeMeter Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import re
from distutils.version import LooseVersion
from lxml import etree
from bzt import TaurusInternalException, TaurusConfigError
from bzt.engine import Scenario
from bzt.jmx import JMX
from bzt.jmx.base import cond_int
from bzt.jmx.threadgroups import ThreadGroup, ConcurrencyThreadGroup, ThreadGroupHandler
from bzt.requests_model import RequestVisitor, has_variable_pattern, HierarchicRequestParser
from bzt.utils import iteritems, numeric_types
from bzt.utils import BetterDict, dehumanize_time, ensure_is_dict, load_class, guess_delimiter
class RequestCompiler(RequestVisitor):
def __init__(self, jmx_builder):
super(RequestCompiler, self).__init__()
self.jmx_builder = jmx_builder
def visit_mqttrequest(self, request):
return self.jmx_builder.compile_request(request)
def visit_hierarchichttprequest(self, request):
return self.jmx_builder.compile_request(request)
def visit_ifblock(self, block):
return self.jmx_builder.compile_if_block(block)
def visit_onceblock(self, block):
return self.jmx_builder.compile_once_block(block)
def visit_loopblock(self, block):
return self.jmx_builder.compile_loop_block(block)
def visit_whileblock(self, block):
return self.jmx_builder.compile_while_block(block)
def visit_foreachblock(self, block):
return self.jmx_builder.compile_foreach_block(block)
def visit_transactionblock(self, block):
return self.jmx_builder.compile_transaction_block(block)
def visit_includescenarioblock(self, block):
scenario_name = block.scenario_name
if scenario_name in self.path:
msg = "Mutual recursion detected in include-scenario blocks (scenario %s)"
raise TaurusConfigError(msg % scenario_name)
self.record_path(scenario_name)
return self.jmx_builder.compile_include_scenario_block(block)
def visit_actionblock(self, block):
return self.jmx_builder.compile_action_block(block)
def visit_setvariables(self, block):
return self.jmx_builder.compile_set_variables_block(block)
class LoadSettingsProcessor(object):
TG = ThreadGroup.__name__
CTG = ConcurrencyThreadGroup.__name__
def __init__(self, executor):
self.log = executor.log.getChild(self.__class__.__name__)
self.load = executor.get_specific_load()
self.raw_load = executor.get_raw_load()
self.log.debug("Load: %s", self.load)
self.tg = self._detect_thread_group(executor)
self.tg_handler = ThreadGroupHandler(self.log)
def _detect_thread_group(self, executor):
"""
Detect preferred thread group
:param executor:
:return:
"""
tg = self.TG
if not executor.settings.get('force-ctg', True):
return tg
msg = 'Thread group detection: %s, regular ThreadGroup will be used'
if not self.load.duration:
self.log.debug(msg, 'duration not found')
elif self.load.iterations:
self.log.debug(msg, 'iterations are found')
elif not executor.tool:
msg = 'You must set executor tool (%s) for choosing of ConcurrencyThreadGroup'
raise TaurusInternalException(msg % executor.tool_name)
elif not executor.tool.ctg_plugin_installed():
self.log.warning(msg % 'plugin for ConcurrentThreadGroup not found')
else:
tg = self.CTG
return tg
def modify(self, jmx):
if not (self.raw_load.iterations or self.raw_load.concurrency or self.load.duration):
self.log.debug('No iterations/concurrency/duration found, thread group modification is skipped')
return
# IMPORTANT: fix groups order as changing of element type changes order of getting of groups
groups = list(self.tg_handler.groups(jmx))
# user concurrency is jmeter variable, write it to tg as is
if isinstance(self.load.concurrency, str):
target_list = [(group, self.load.concurrency) for group in groups]
else: # concurrency is numeric or empty
raw = self.load.concurrency is None # keep existed concurrency if self.load.concurrency is omitted
concurrency_list = []
for group in groups:
concurrency = group.get_concurrency(raw=raw)
if concurrency is None:
concurrency = 1
concurrency_list.append(concurrency)
if not raw: # divide numeric concurrency
self._divide_concurrency(concurrency_list)
target_list = zip(groups, concurrency_list)
for group, concurrency in target_list:
self.tg_handler.convert(source=group, target_gtype=self.tg, load=self.load, concurrency=concurrency)
if self.load.throughput:
self._add_shaper(jmx)
if self.tg == self.TG and self.load.steps:
self.log.warning("Stepping ramp-up isn't supported for regular ThreadGroup")
def _divide_concurrency(self, concurrency_list):
"""
calculate target concurrency for every thread group
"""
total_old_concurrency = sum(concurrency_list)
for idx, concurrency in enumerate(concurrency_list):
if total_old_concurrency and concurrency_list[idx] != 0:
part_of_load = 1.0 * self.load.concurrency * concurrency / total_old_concurrency
concurrency_list[idx] = int(round(part_of_load))
if concurrency_list[idx] == 0:
concurrency_list[idx] = 1
else:
concurrency_list[idx] = 0
total_new_concurrency = sum(concurrency_list)
leftover = self.load.concurrency - total_new_concurrency
if leftover < 0:
msg = "Had to add %s more threads to maintain thread group proportion"
self.log.warning(msg, -leftover)
elif leftover > 0:
msg = "%s threads left undistributed due to thread group proportion"
self.log.warning(msg, leftover)
def _add_shaper(self, jmx):
"""
Add shaper
:param jmx: JMX
:return:
"""
if not self.load.duration:
self.log.warning("You must set 'ramp-up' and/or 'hold-for' when using 'throughput' option")
return
etree_shaper = jmx.get_rps_shaper()
if self.load.ramp_up:
if isinstance(self.load.throughput, numeric_types) and self.load.duration:
start_rps = self.load.throughput / float(self.load.duration)
start_rps = max(start_rps, 0.001) # avoid zeroing
start_rps = min(start_rps, 1.0) # avoid starting too fast
else:
start_rps = 1
if not self.load.steps:
jmx.add_rps_shaper_schedule(etree_shaper, start_rps, self.load.throughput, self.load.ramp_up)
else:
step_h = self.load.throughput / self.load.steps
step_w = float(self.load.ramp_up) / self.load.steps
accum_time = 0
for step in range(1, self.load.steps + 1):
jmx.add_rps_shaper_schedule(etree_shaper, step_h * step, step_h * step,
step_w * step - accum_time)
accum_time += cond_int(step_w * step - accum_time)
if self.load.hold:
jmx.add_rps_shaper_schedule(etree_shaper, self.load.throughput, self.load.throughput, self.load.hold)
jmx.append(JMeterScenarioBuilder.TEST_PLAN_SEL, etree_shaper)
jmx.append(JMeterScenarioBuilder.TEST_PLAN_SEL, etree.Element("hashTree"))
class ProtocolHandler(object):
def __init__(self, sys_props, engine):
super(ProtocolHandler, self).__init__()
self.system_props = sys_props
self.engine = engine
def get_toplevel_elements(self, scenario):
return []
def get_sampler_pair(self, request):
return None, None
@staticmethod
def safe_time(any_time):
try:
smart_time = int(1000 * dehumanize_time(any_time))
except TaurusInternalException:
smart_time = any_time
return smart_time
class JMeterScenarioBuilder(JMX):
"""
Helper to build JMeter test plan from Scenario
:type protocol_handlers: dict[str,ProtocolHandler]
"""
def __init__(self, executor, original=None):
"""
:type executor: ScenarioExecutor
:type original: JMX
"""
super(JMeterScenarioBuilder, self).__init__(original)
self.executor = executor
self.scenario = executor.get_scenario()
self.engine = executor.engine
self.system_props = BetterDict()
self.request_compiler = None
self.default_protocol = self.executor.settings.get('default-protocol', 'http')
self.protocol_handlers = {}
for protocol, cls_name in iteritems(self.executor.settings.get("protocol-handlers")):
cls_obj = load_class(cls_name)
instance = cls_obj(self.system_props, self.engine)
self.protocol_handlers[protocol] = instance
self.FIELD_KEYSTORE_CONFIG = 'keystore-config'
@staticmethod
def _get_timer(req):
think_time = req.get_think_time(full=True)
if not think_time:
return []
if not isinstance(think_time, list): # constant
return JMX.get_constant_timer(delay=ProtocolHandler.safe_time(think_time))
mean = ProtocolHandler.safe_time(think_time[1])
dev = ProtocolHandler.safe_time(think_time[2])
if think_time[0] == "uniform":
return JMX.get_uniform_timer(maximum=dev * 2, offset=mean - dev)
elif think_time[0] == "gaussian":
return JMX.get_gaussian_timer(dev=dev, offset=mean)
elif think_time[0] == "poisson":
return JMX.get_poisson_timer(lam=mean - dev, delay=dev)
else:
raise TaurusConfigError("Wrong timer type: %s" % think_time[0])
def __add_extractors(self, children, req):
self.__add_boundary_ext(children, req)
self.__add_regexp_ext(children, req)
self.__add_json_ext(children, req)
self.__add_jquery_ext(children, req)
self.__add_xpath_ext(children, req)
def __add_boundary_ext(self, children, req):
extractors = req.config.get("extract-boundary")
for varname, cfg in iteritems(extractors):
subj = cfg.get('subject', 'body')
left = cfg.get('left', TaurusConfigError("Left boundary is missing for boundary extractor %s" % varname))
right = cfg.get('right', TaurusConfigError("Right boundary is missing for boundary extractor %s" % varname))
match_no = cfg.get('match-no', 1)
defvalue = cfg.get('default', 'NOT_FOUND')
scope = cfg.get("scope", None)
from_var = cfg.get("from-variable", None)
extractor = JMX._get_boundary_extractor(varname, subj, left, right, match_no, defvalue, scope, from_var)
children.append(extractor)
children.append(etree.Element("hashTree"))
def __add_regexp_ext(self, children, req):
extractors = req.config.get("extract-regexp")
for varname in extractors:
cfg = ensure_is_dict(extractors, varname, "regexp")
scope = cfg.get("scope", None)
from_var = cfg.get("from-variable", None)
extractor = JMX._get_extractor(varname, cfg.get('subject', 'body'), cfg['regexp'], cfg.get('template', 1),
cfg.get('match-no', 1), cfg.get('default', 'NOT_FOUND'), scope, from_var)
children.append(extractor)
children.append(etree.Element("hashTree"))
def __add_json_ext(self, children, req):
jextractors = req.config.get("extract-jsonpath")
for varname in jextractors:
cfg = ensure_is_dict(jextractors, varname, "jsonpath")
if LooseVersion(str(self.executor.settings.get("version"))) < LooseVersion("3.0"):
extractor = JMX._get_json_extractor(varname,
cfg["jsonpath"],
cfg.get("default", "NOT_FOUND"),
cfg.get("from-variable", None))
else:
extractor = JMX._get_internal_json_extractor(varname,
cfg["jsonpath"],
cfg.get("default", "NOT_FOUND"),
cfg.get("scope", None),
cfg.get("from-variable", None),
cfg.get("match-no", "0"),
cfg.get("concat", False))
children.append(extractor)
children.append(etree.Element("hashTree"))
def __add_jquery_ext(self, children, req):
css_jquery_extors = req.config.get("extract-css-jquery")
for varname in css_jquery_extors:
cfg = ensure_is_dict(css_jquery_extors, varname, "expression")
extractor = self._get_jquerycss_extractor(varname,
cfg['expression'],
cfg.get('attribute', ""),
cfg.get('match-no', 0),
cfg.get('default', 'NOT_FOUND'),
cfg.get("scope", None),
cfg.get("from-variable", None))
children.append(extractor)
children.append(etree.Element("hashTree"))
def __add_xpath_ext(self, children, req):
xpath_extractors = req.config.get("extract-xpath")
for varname in xpath_extractors:
cfg = ensure_is_dict(xpath_extractors, varname, "xpath")
children.append(JMX._get_xpath_extractor(varname,
cfg['xpath'],
cfg.get('default', 'NOT_FOUND'),
cfg.get('validate-xml', False),
cfg.get('ignore-whitespace', True),
cfg.get("match-no", "-1"),
cfg.get('use-namespaces', False),
cfg.get('use-tolerant-parser', False),
cfg.get("scope", None),
cfg.get("from-variable", None)))
children.append(etree.Element("hashTree"))
@staticmethod
def __add_assertions(children, req):
assertions = req.config.get("assert", [])
for idx, assertion in enumerate(assertions):
assertion = ensure_is_dict(assertions, idx, "contains")
if not isinstance(assertion['contains'], list):
assertion['contains'] = [assertion['contains']]
children.append(JMX._get_resp_assertion(assertion.get("subject", Scenario.FIELD_BODY),
assertion['contains'],
assertion.get('regexp', True),
assertion.get('not', False),
assertion.get('assume-success', False)))
children.append(etree.Element("hashTree"))
jpath_assertions = req.config.get("assert-jsonpath", [])
for idx, assertion in enumerate(jpath_assertions):
assertion = ensure_is_dict(jpath_assertions, idx, "jsonpath")
exc = TaurusConfigError('JSON Path not found in assertion: %s' % assertion)
component = JMX._get_json_path_assertion(assertion.get('jsonpath', exc),
assertion.get('expected-value', ''),
assertion.get('validate', False),
assertion.get('expect-null', False),
assertion.get('invert', False),
assertion.get('regexp', True))
children.append(component)
children.append(etree.Element("hashTree"))
xpath_assertions = req.config.get("assert-xpath", [])
for idx, assertion in enumerate(xpath_assertions):
assertion = ensure_is_dict(xpath_assertions, idx, "xpath")
exc = TaurusConfigError('XPath not found in assertion: %s' % assertion)
component = JMX._get_xpath_assertion(assertion.get('xpath', exc),
assertion.get('validate-xml', False),
assertion.get('ignore-whitespace', True),
assertion.get('use-tolerant-parser', False),
assertion.get('invert', False))
children.append(component)
children.append(etree.Element("hashTree"))
@staticmethod
def __add_jsr_elements(children, req, get_from_config=True):
"""
:type children: etree.Element
:type req: Request
"""
jsrs = []
if get_from_config:
jsrs = req.config.get("jsr223", [])
else:
jsrs = req.get("jsr223", [])
if not isinstance(jsrs, list):
jsrs = [jsrs]
for idx, _ in enumerate(jsrs):
jsr = ensure_is_dict(jsrs, idx, sub_key='script-text')
lang = jsr.get("language", "groovy")
script_file = jsr.get("script-file", None)
script_text = jsr.get("script-text", None)
if not script_file and not script_text:
raise TaurusConfigError("jsr223 element must specify one of 'script-file' or 'script-text'")
parameters = jsr.get("parameters", "")
execute = jsr.get("execute", "after")
cache_key = str(jsr.get("compile-cache", True)).lower()
children.append(JMX._get_jsr223_element(lang, script_file, parameters, execute, script_text, cache_key))
children.append(etree.Element("hashTree"))
def __gen_requests(self, scenario):
http_protocol = scenario.data.get('protocol', 'http') == 'http'
requests = scenario.get_requests(parser=HierarchicRequestParser, require_url=http_protocol)
elements = []
for compiled in self.compile_requests(requests):
elements.extend(compiled)
return elements
def compile_scenario(self, scenario):
elements = []
for _, protocol in iteritems(self.protocol_handlers):
elements.extend(protocol.get_toplevel_elements(scenario))
elements.extend(self.__gen_authorization(scenario))
elements.extend(self.__gen_keystore_config(scenario))
elements.extend(self.__gen_data_sources(scenario))
elements.extend(self.__gen_requests(scenario))
self.__add_jsr_elements(elements, scenario, False)
return elements
def compile_request(self, request):
"""
:type request: HierarchicHTTPRequest
:return:
"""
sampler = children = None
protocol_name = request.priority_option('protocol', default=self.default_protocol)
if protocol_name in self.protocol_handlers:
protocol = self.protocol_handlers[protocol_name]
sampler, children = protocol.get_sampler_pair(request)
if sampler is None:
self.log.warning("Problematic request: %s", request.config)
raise TaurusInternalException("Unable to handle request, please review missing options")
children.extend(self._get_timer(request))
self.__add_assertions(children, request)
timeout = ProtocolHandler.safe_time(request.priority_option('timeout'))
if timeout is not None:
children.append(JMX._get_dur_assertion(timeout))
children.append(etree.Element("hashTree"))
self.__add_extractors(children, request)
self.__add_jsr_elements(children, request)
return [sampler, children]
def compile_if_block(self, block):
elements = []
# TODO: pass jmeter IfController options
if_controller = JMX._get_if_controller(block.condition)
then_children = etree.Element("hashTree")
for compiled in self.compile_requests(block.then_clause):
for element in compiled:
then_children.append(element)
elements.extend([if_controller, then_children])
if block.else_clause:
inverted_condition = "!(" + block.condition + ")"
else_controller = JMX._get_if_controller(inverted_condition)
else_children = etree.Element("hashTree")
for compiled in self.compile_requests(block.else_clause):
for element in compiled:
else_children.append(element)
elements.extend([else_controller, else_children])
return elements
def compile_once_block(self, block):
elements = []
once_controller = JMX._get_once_controller()
children = etree.Element("hashTree")
for compiled in self.compile_requests(block.requests):
for element in compiled:
children.append(element)
elements.extend([once_controller, children])
return elements
def compile_loop_block(self, block):
elements = []
loop_controller = JMX._get_loop_controller(block.loops)
children = etree.Element("hashTree")
for compiled in self.compile_requests(block.requests):
for element in compiled:
children.append(element)
elements.extend([loop_controller, children])
return elements
def compile_while_block(self, block):
elements = []
controller = JMX._get_while_controller(block.condition)
children = etree.Element("hashTree")
for compiled in self.compile_requests(block.requests):
for element in compiled:
children.append(element)
elements.extend([controller, children])
return elements
def compile_foreach_block(self, block):
"""
:type block: ForEachBlock
"""
elements = []
controller = JMX._get_foreach_controller(block.input_var, block.loop_var)
children = etree.Element("hashTree")
for compiled in self.compile_requests(block.requests):
for element in compiled:
children.append(element)
elements.extend([controller, children])
return elements
def compile_transaction_block(self, block):
elements = []
controller = JMX._get_transaction_controller(block.label,
block.priority_option('force-parent-sample', False),
block.include_timers)
children = etree.Element("hashTree")
for compiled in self.compile_requests(block.requests):
for element in compiled:
children.append(element)
elements.extend([controller, children])
return elements
def compile_include_scenario_block(self, block):
elements = []
controller = JMX._get_simple_controller(block.scenario_name)
children = etree.Element("hashTree")
scenario = self.executor.get_scenario(name=block.scenario_name)
for element in self.compile_scenario(scenario):
children.append(element)
elements.extend([controller, children])
return elements
def compile_action_block(self, block):
"""
:type block: ActionBlock
:return:
"""
actions = {
'stop': 0,
'pause': 1,
'stop-now': 2,
'continue': 3,
}
targets = {'current-thread': 0, 'all-threads': 2}
action = actions[block.action]
target = targets[block.target]
duration = 0
if block.duration is not None:
duration = int(block.duration * 1000)
test_action = JMX._get_action_block(action, target, duration)
children = etree.Element("hashTree")
self.__add_jsr_elements(children, block)
return [test_action, children]
@staticmethod
def compile_set_variables_block(block):
set_var_action = JMX.get_set_var_action(block.mapping)
hashtree = etree.Element("hashTree")
return [set_var_action, hashtree]
def compile_requests(self, requests):
if self.request_compiler is None:
self.request_compiler = RequestCompiler(self)
compiled = []
for request in requests:
compiled.append(self.request_compiler.visit(request))
self.request_compiler.clear_path_cache()
return compiled
def __generate(self):
"""
Generate the test plan
"""
thread_group = JMX.get_thread_group(testname=self.executor.label)
thread_group_ht = etree.Element("hashTree", type="tg")
# NOTE: set realistic dns-cache and JVM prop by default?
self.request_compiler = RequestCompiler(self)
for element in self.compile_scenario(self.scenario):
thread_group_ht.append(element)
results_tree = self._get_results_tree()
results_tree_ht = etree.Element("hashTree")
self.append(self.TEST_PLAN_SEL, thread_group)
self.append(self.TEST_PLAN_SEL, thread_group_ht)
self.append(self.TEST_PLAN_SEL, results_tree)
self.append(self.TEST_PLAN_SEL, results_tree_ht)
def save(self, filename):
"""
Generate test plan and save
:type filename: str
"""
# NOTE: bad design, as repetitive save will duplicate stuff
self.__generate()
super(JMeterScenarioBuilder, self).save(filename)
@staticmethod
def __gen_authorization(scenario):
"""
Generates HTTP Authorization Manager
"""
elements = []
authorizations = scenario.get("authorization")
if authorizations:
clear_flag = False
if isinstance(authorizations, dict):
if "clear" in authorizations or "list" in authorizations: # full form
clear_flag = authorizations.get("clear", False)
authorizations = authorizations.get("list", [])
else:
authorizations = [authorizations] # short form
if not isinstance(authorizations, list):
raise TaurusConfigError("Wrong authorization format: %s" % authorizations)
auth_manager = JMX.get_auth_manager(authorizations, clear_flag)
elements.append(auth_manager)
elements.append(etree.Element("hashTree"))
return elements
def __gen_data_sources(self, scenario):
elements = []
for source in scenario.get_data_sources():
source_path = source["path"]
delimiter = source.get("delimiter")
if has_variable_pattern(source_path):
msg = "Path to CSV contains JMeter variable/function, can't check for file existence: %s"
self.log.warning(msg, source_path)
if not delimiter:
delimiter = ','
self.log.warning("Can't detect CSV dialect, default delimiter will be '%s'", delimiter)
else:
source_path = self.executor.engine.find_file(source_path)
if not os.path.isfile(source_path):
raise TaurusConfigError("data-sources path not found: %s" % source_path)
if not delimiter:
delimiter = guess_delimiter(source_path)
if source.get("random-order"):
config = JMX._get_csv_config_random(source_path, delimiter, source.get("loop", True),
source.get("variable-names", ""))
else:
config = JMX._get_csv_config(source_path, delimiter, source.get("loop", True),
source.get("variable-names", ""), source.get("quoted", False))
elements.append(config)
elements.append(etree.Element("hashTree"))
return elements
def __gen_keystore_config(self, scenario):
elements = []
keystore_config = scenario.get(self.FIELD_KEYSTORE_CONFIG)
if keystore_config:
variable_name = keystore_config["variable-name"]
start_index = keystore_config["start-index"]
end_index = keystore_config["end-index"]
preload = keystore_config["preload"]
config = JMX.get_keystore_config_elements(variable_name, start_index, end_index, preload)
elements.append(config)
elements.append(etree.Element("hashTree"))
return elements
| 42.141864
| 120
| 0.598152
|
6dea18d2b6454e97f881a62544a4c46801fbf869
| 13,563
|
py
|
Python
|
infoblox_netmri/client.py
|
NastyaArslanova/infoblox-netmri
|
399d904399ba7958262c6f107fa3b0efdd55019b
|
[
"Apache-2.0"
] | null | null | null |
infoblox_netmri/client.py
|
NastyaArslanova/infoblox-netmri
|
399d904399ba7958262c6f107fa3b0efdd55019b
|
[
"Apache-2.0"
] | null | null | null |
infoblox_netmri/client.py
|
NastyaArslanova/infoblox-netmri
|
399d904399ba7958262c6f107fa3b0efdd55019b
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2015 Infoblox Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import re
import io
import gzip
from os.path import isfile
import requests
from requests.exceptions import HTTPError
from requests.packages.urllib3.exceptions import InsecureRequestWarning
from infoblox_netmri.utils.utils import locate, to_underscore_notation, to_snake
class InfobloxNetMRI(object):
def __init__(self, host, username, password, api_version="auto",
use_ssl=True, ssl_verify=False, http_pool_connections=5,
http_pool_maxsize=10, max_retries=5):
# Process ssl parameters
if use_ssl:
self.protocol = "https"
if isinstance(ssl_verify, bool):
self.ssl_verify = ssl_verify
else:
opt = str(ssl_verify).lower()
if opt in ['yes', 'on', 'true']:
self.ssl_verify = True
elif opt in ['no', 'off', 'false']:
self.ssl_verify = False
elif isfile(ssl_verify):
self.ssl_verify = ssl_verify
else:
raise ValueError("ssl_verify is not a valid boolean value,"
"nor a valid path to a CA bundle file")
else:
self.protocol = "http"
self.ssl_verify = False
# Process host
if re.match(r"^[\w.-]+$", host):
self.host = host
else:
raise ValueError("Hostname is not a valid hostname")
# Authentication parameters
self.username = username
self.password = password
self._is_authenticated = False
# Disable ssl warnings
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
# HTTP session settings
self.session = requests.Session()
adapter = requests.adapters.HTTPAdapter(
max_retries=max_retries,
pool_connections=http_pool_connections,
pool_maxsize=http_pool_maxsize)
self.session.mount('http://', adapter)
self.session.mount('https://', adapter)
self.session.verify = self.ssl_verify
# API version
if re.match(r'^(?:\d+\.)?(?:\d+\.)?(?:\*|\d+)$', api_version):
self.api_version = api_version
elif api_version.lower() == "auto":
self.api_version = self._get_api_version()
else:
raise ValueError("Incorrect API version")
def _make_request(self, url, method="get", data=None, extra_headers=None, downloadable=False):
"""Prepares the request, checks for authentication and retries in case of issues
Args:
url (str): URL of the request
method (str): Any of "get", "post", "delete"
data (any): Possible extra data to send with the request
extra_headers (dict): Possible extra headers to send along in the request
downloadable (bool): Indicates if the method can download files
Returns:
dict
"""
attempts = 0
while attempts < 2:
# Authenticate first if not authenticated already
if not self._is_authenticated:
self._authenticate()
# Make the request and check for authentication errors
# This allows us to catch session timeouts for long standing connections
try:
if downloadable:
return self._send_mixed_request(url, method, data, extra_headers)
else:
return self._send_request(url, method, data, extra_headers)
except HTTPError as e:
if e.response.status_code == 403:
self._is_authenticated = False
attempts += 1
else:
# re-raise other HTTP errors
raise
def _send_request(self, url, method="get", data=None, extra_headers=None):
"""Performs a given request and returns a json object
Args:
url (str): URL of the request
method (str): Any of "get", "post", "delete"
data (any): Possible extra data to send with the request
extra_headers (dict): Possible extra headers to send along in the request
Returns:
dict
"""
headers = {'Content-type': 'application/json'}
if isinstance(extra_headers, dict):
headers.update(extra_headers)
res = self.session.request(method, url, headers=headers, data=data, stream=True)
content_type = res.headers.get('content-type')
if 400 <= res.status_code < 600:
if 'application/json' in content_type:
raise HTTPError(res.json(), response=res)
else:
raise HTTPError(res.content, response=res)
content = b''
for chunk in res.iter_content():
content += chunk
if res.headers.get('Content-Encoding') == 'gzip':
content_copy = content
try:
with gzip.GzipFile(fileobj=io.BytesIO(content)) as gz:
content = gz.read()
except (ValueError, Exception):
content = content_copy
try:
content = content.decode()
except (UnicodeDecodeError, Exception):
pass
if 'application/json' in content_type:
return json.loads(content)
else:
return content
def _send_mixed_request(self, url, method="get", data=None, extra_headers=None):
"""Performs a given request and returns a json object or
downloads a requested file
Args:
url (str): URL of the request
method (str): Any of "get", "post", "delete"
data (any): Possible extra data to send with the request
extra_headers (dict): Possible extra headers to send along in the
request
Returns:
dict
"""
headers = {'Content-type': 'application/json'}
if isinstance(extra_headers, dict):
headers.update(extra_headers)
res = self.session.request(method, url, headers=headers, data=data, stream=True)
if 400 <= res.status_code < 600:
raise HTTPError(res.content, response=res)
content_type = res.headers.get('content-type')
if content_type is not None:
if 'application/json' in content_type:
return res.json()
else:
try:
content_disposition = res.headers['content-disposition']
except KeyError:
raise HTTPError("Unknown Content-Disposition", response=res)
except (HTTPError, Exception):
raise HTTPError(res.content, response=res)
m = re.search("filename=\"(.+)\"", content_disposition)
filename = m.group(1)
result = self._download_file(content_type, filename, res)
return result
else:
raise HTTPError("Unknown Content-Type!", response=res)
def _download_file(self, _content_type, filename, response):
"""Downloads a file via HTTP
Args:
_content_type (str): Type of data in the HTTP response
filename (str): The name of the file to download
response (Response): HTTP response object
Returns:
dict
"""
chunk_size = 1024 * 1000
try:
with open(filename, 'wb') as f:
for chunk in response.iter_content(chunk_size=chunk_size):
f.write(chunk)
except TypeError:
with open(filename, 'w') as f:
for chunk in response.iter_content(chunk_size=chunk_size):
f.write(chunk)
except (IOError, Exception):
return {'Status': 'FAIL', 'Filename': filename}
return {'Status': 'OK', 'Filename': filename}
def _get_api_version(self):
"""Fetches the most recent API version
Returns:
str
"""
url = "{base_url}/api/server_info".format(base_url=self._base_url())
server_info = self._make_request(url=url, method="get")
return server_info["latest_api_version"]
def _authenticate(self):
""" Perform an authentication against NetMRI"""
url = "{base_url}/api/authenticate".format(base_url=self._base_url())
data = json.dumps({'username': self.username, "password": self.password})
# Bypass authentication check in make_request by using _send_request
self._send_request(url, method="post", data=data)
self._is_authenticated = True
def _controller_name(self, objtype):
"""Determines the controller name for the object's type
Args:
objtype (str): The object type
Returns:
A string with the controller name
"""
# would be better to use inflect.pluralize here, but would add a dependency
if objtype.endswith('y'):
return objtype[:-1] + 'ies'
if objtype[-1] in 'sx' or objtype[-2:] in ['sh', 'ch']:
return objtype + 'es'
if objtype.endswith('an'):
return objtype[:-2] + 'en'
return objtype + 's'
def _base_url(self):
"""Generate the base URL for the connection with NetMRI
Returns:
A string containing the base URL
"""
return "{proto}://{host}".format(
proto=self.protocol,
host=self.host
)
def _object_url(self, objtype, objid):
"""Generate the URL for the specified object
Args:
objtype (str): The object's type
objid (int): The objects ID
Returns:
A string containing the URL of the object
"""
return "{base_url}/api/{api_version}/{controller}/{obj_id}".format(
base_url=self._base_url(),
api_version=self.api_version,
controller=self._controller_name(objtype),
obj_id=objid
)
def _method_url(self, method_name):
"""Generate the URL for the requested method
Args:
method_name (str): Name of the method
Returns:
A string containing the URL of the method
"""
return "{base_url}/api/{api}/{method}".format(
base_url=self._base_url(),
api=self.api_version,
method=method_name
)
def api_request(self, method_name, params, downloadable=False):
"""Execute an arbitrary method.
Args:
method_name (str): include the controller name: 'devices/search'
params (dict): the method parameters
downloadable (bool): indicates if the request can download files
Returns:
A dict with the response
Raises:
requests.exceptions.HTTPError
"""
url = self._method_url(method_name)
data = json.dumps(params)
return self._make_request(url=url, method="post", data=data, downloadable=downloadable)
def show(self, objtype, objid):
"""Query for a specific resource by ID
Args:
objtype (str): object type, e.g. 'device', 'interface'
objid (int): object ID (DeviceID, etc.)
Returns:
A dict with that object
Raises:
requests.exceptions.HTTPError
"""
url = self._object_url(objtype, int(objid))
return self._make_request(url, method="get")
def delete(self, objtype, objid):
"""Destroy a specific resource by ID
Args:
objtype (str): object type, e.g. 'script'
objid (int): object ID
Returns:
A dict with the response
Raises:
requests.exceptions.HTTPError
"""
url = self._object_url(objtype, int(objid))
return self._make_request(url, method="delete")
def get_broker(self, name):
"""Return broker class using full package name
Args:
name (str): full package name e.g. 'api.broker.v3_2_0.device_broker.DeviceBroker'
Returns:
class of broker
Raises:
RuntimeError
"""
return locate(self._get_broker_package(name))(self)
def _get_broker_package(self, name):
"""Return broker class full package name using API version and API class name
Args:
name (str): API data structure name e.g. 'Device'
Returns:
full path for class representation of object
"""
version = to_underscore_notation(self.api_version)
return "infoblox_netmri.api.broker.{ver}.{pckg}_broker.{name}Broker".format(
ver=version,
pckg=to_snake(name),
name=name
)
| 35.320313
| 98
| 0.58055
|
0574ccdf72bb9b8368b9992b9a0ae3308da0f6f1
| 9,719
|
py
|
Python
|
gssutils/transform/cubes.py
|
GSS-Cogs/gss-utils
|
55165c58d8c4fb9dd4ad364059f2c1de24935c06
|
[
"Apache-2.0"
] | 1
|
2021-01-21T23:35:12.000Z
|
2021-01-21T23:35:12.000Z
|
gssutils/transform/cubes.py
|
GSS-Cogs/gss-utils
|
55165c58d8c4fb9dd4ad364059f2c1de24935c06
|
[
"Apache-2.0"
] | 254
|
2019-10-07T16:17:48.000Z
|
2022-01-31T09:20:16.000Z
|
gssutils/transform/cubes.py
|
GSS-Cogs/gss-utils
|
55165c58d8c4fb9dd4ad364059f2c1de24935c06
|
[
"Apache-2.0"
] | 5
|
2020-09-30T16:01:12.000Z
|
2021-01-13T18:44:28.000Z
|
import json
import logging
import os
import copy
from pathlib import Path
from urllib.parse import urljoin
from typing import Optional
from gssutils.csvw.mapping import CSVWMapping
from gssutils.csvw.namespaces import URI
from gssutils.utils import pathify
class Cubes:
"""
A class representing multiple datacubes
"""
def __init__(
self,
info_json="info.json",
destination_path="out",
base_uri="http://gss-data.org.uk",
job_name=None,
codelists_path: Optional[str] = None,
):
with open(info_json, "r") as info_file:
self.info = json.load(info_file)
# Where we don't have a mapping field, add one to avoid iteration errors later
if "columns" not in self.info["transform"].keys():
self.info["transform"]["columns"] = {}
self.destination_folder = Path(destination_path)
self.destination_folder.mkdir(exist_ok=True, parents=True)
self.base_uri = base_uri
self.local_codelists: Optional[str] = None
if codelists_path is not None:
self.local_codelists = codelists_path
elif (Path(destination_path) / ".." / "codelists").exists():
self.local_codelists = "../codelists"
self.cubes = []
self.has_ran = False
if job_name is not None:
logging.warning(
"The passing of job_name= has been depreciated and no longer does anything, please"
"remove this keyword argument"
)
def add_cube(
self,
scraper,
dataframe,
title,
graph=None,
info_json_dict=None,
override_containing_graph=None,
suppress_catalog_and_dsd_output: bool = False,
):
"""
Add a single datacube to the cubes class.
"""
self.cubes.append(
Cube(
self.base_uri,
scraper,
dataframe,
title,
graph,
info_json_dict,
override_containing_graph,
suppress_catalog_and_dsd_output,
self.local_codelists,
)
)
def output_all(self):
"""
Output every cube object we've added to the cubes() class.
"""
if len(self.cubes) == 0:
raise Exception(
"Please add at least one datacube with '.add_cube' before "
"calling output_all()."
)
# Don't let people add 1, output 1, add 2 output 2 etc
# They'll want to but it'll mangle the url namespacing
if self.has_ran:
raise Exception(
"Calling 'output_all' on the Cubes class is a destructive process and "
"has already run. You need to (re)add all your datacubes before doing so."
)
# Are we outputting more than one cube? We need to know this before we output
is_multi_cube = len(self.cubes) >= 2
# The many-to-one scenario
# If all cubes are getting written to a single graph it plays hell with the
# single vs multiple namespaces logic, so we're going to explicitly check for and handle that
is_many_to_one = False
is_many_to_one = False
if is_multi_cube:
to_graph_statements = [x.graph for x in self.cubes if x.graph is not None]
if len(to_graph_statements) == len(self.cubes):
if len(set(to_graph_statements)) == 1:
is_many_to_one = True
for cube in self.cubes:
try:
cube.output(
self.destination_folder, is_multi_cube, is_many_to_one, self.info
)
except Exception as err:
raise Exception(
"Exception encountered while processing datacube '{}'.".format(
cube.title
)
) from err
self.has_ran = True
class Cube:
"""
A class to encapsulate the dataframe and associated metadata that constitutes a single datacube
"""
override_containing_graph_uri: Optional[str]
def __init__(
self,
base_uri,
scraper,
dataframe,
title,
graph,
info_json_dict,
override_containing_graph_uri: Optional[str],
suppress_catalog_and_dsd_output: bool,
local_codelists: Optional[str] = None,
):
self.scraper = (
scraper # note - the metadata of a scrape, not the actual data source
)
self.dataframe = dataframe
self.title = title
self.scraper.set_base_uri(base_uri)
self.graph = graph
self.info_json_dict = copy.deepcopy(
info_json_dict
) # don't copy a pointer, snapshot a thing
self.override_containing_graph_uri: Optional[URI] = URI(
override_containing_graph_uri
)
self.suppress_catalog_and_dsd_output = suppress_catalog_and_dsd_output
self.local_codelists = local_codelists
def _instantiate_map(self, destination_folder, pathified_title, info_json):
"""
Create a basic CSVWMapping object for this cube
"""
map_obj = CSVWMapping()
# Use the info.json for the mapping by default, but let people
# pass a new one in (for where we need to get clever)
info_json = info_json if self.info_json_dict is None else self.info_json_dict
map_obj.set_accretive_upload(info_json)
map_obj.set_mapping(info_json)
map_obj.set_suppress_catalog_and_dsd_output(
self.suppress_catalog_and_dsd_output
)
map_obj.set_csv(destination_folder / f"{pathified_title}.csv")
map_obj.set_dataset_uri(
urljoin(self.scraper._base_uri, f"data/{self.scraper._dataset_id}")
)
if self.local_codelists is not None:
map_obj.set_local_codelist_base(self.local_codelists)
if self.override_containing_graph_uri is not None:
map_obj.set_containing_graph_uri(self.override_containing_graph_uri)
else:
map_obj.set_containing_graph_uri(self.scraper.dataset.pmdcatGraph)
return map_obj
def _populate_csvw_mapping(self, destination_folder, pathified_title, info_json):
"""
Use the provided details object to generate then fully populate the mapping class
"""
# The base CSVWMapping class
map_obj = self._instantiate_map(destination_folder, pathified_title, info_json)
# TODO - IF we do codelist generation here, this would be the point of intervention
return map_obj
def output(self, destination_folder, is_multi_cube, is_many_to_one, info_json):
"""
Outputs the csv and csv-w schema for a single 'Cube' held in the 'Cubes' object
"""
graph_name = pathify(self.title) if self.graph is None else pathify(self.graph)
if isinstance(self.scraper.dataset.family, list):
primary_family = pathify(self.scraper.dataset.family[0])
else:
primary_family = pathify(self.scraper.dataset.family)
main_dataset_id = info_json.get("id", Path.cwd().name)
if is_many_to_one:
# Sanity check, because this isn't an obvious as I'd like / a bit weird
err_msg = (
"Aborting. Where you are writing multiple cubes to a single output graph, the "
+ "pathified graph specified needs to match you pathified current working directory. "
+ 'Got "{}", expected "{}".'.format(
graph_name, pathify(Path(os.getcwd()).name)
)
)
assert main_dataset_id == graph_name, err_msg
logging.warning(
"Output Scenario 1: Many cubes written to the default output (cwd())"
)
dataset_path = f"gss_data/{primary_family}/{graph_name}"
elif is_multi_cube:
logging.warning(
"Output Scenario 2: Many cubes written to many stated outputs"
)
dataset_path = f"gss_data/{primary_family}/{main_dataset_id}/{graph_name}"
else:
logging.warning(
"Output Scenario 3: A single cube written to the default output (cwd())"
)
dataset_path = f"gss_data/{primary_family}/{main_dataset_id}"
self.scraper.set_dataset_id(dataset_path)
# output the tidy data
self.dataframe.to_csv(
destination_folder / f"{pathify(self.title)}.csv", index=False
)
is_accretive_upload = (
info_json is not None
and "load" in info_json
and "accretiveUpload" in info_json["load"]
and info_json["load"]["accretiveUpload"]
)
# Don't output trig file if we're performing an accretive upload (or we have been asked to suppress it).
# We don't want to duplicate information we already have.
if not is_accretive_upload and not self.suppress_catalog_and_dsd_output:
# Output the trig
trig_to_use = self.scraper.generate_trig()
with open(
destination_folder / f"{pathify(self.title)}.csv-metadata.trig", "wb"
) as metadata:
metadata.write(trig_to_use)
# Output csv and csvw
populated_map_obj = self._populate_csvw_mapping(
destination_folder, pathify(self.title), info_json
)
populated_map_obj.write(
destination_folder / f"{pathify(self.title)}.csv-metadata.json"
)
| 35.600733
| 112
| 0.605206
|
a51bb9232d03fea7ae412202ac2ce9629d659eb9
| 1,222
|
py
|
Python
|
nova/api/openstack/compute/legacy_v2/contrib/__init__.py
|
ebalduf/nova-backports
|
6bf97ec73467de522d34ab7a17ca0e0874baa7f9
|
[
"Apache-2.0"
] | null | null | null |
nova/api/openstack/compute/legacy_v2/contrib/__init__.py
|
ebalduf/nova-backports
|
6bf97ec73467de522d34ab7a17ca0e0874baa7f9
|
[
"Apache-2.0"
] | null | null | null |
nova/api/openstack/compute/legacy_v2/contrib/__init__.py
|
ebalduf/nova-backports
|
6bf97ec73467de522d34ab7a17ca0e0874baa7f9
|
[
"Apache-2.0"
] | 1
|
2020-03-01T17:04:57.000Z
|
2020-03-01T17:04:57.000Z
|
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Contrib contains extensions that are shipped with nova.
It can't be called 'extensions' because that causes namespacing problems.
"""
from oslo_log import log as logging
from nova.api.openstack import extensions
import nova.conf
CONF = nova.conf.CONF
LOG = logging.getLogger(__name__)
def standard_extensions(ext_mgr):
extensions.load_standard_extensions(ext_mgr, LOG, __path__, __package__)
def select_extensions(ext_mgr):
extensions.load_standard_extensions(ext_mgr, LOG, __path__, __package__,
CONF.osapi_compute_ext_list)
| 32.157895
| 78
| 0.738134
|
31563e01e0c37e006ebec92e498e7dd89dd9162b
| 1,052
|
py
|
Python
|
example_nodeview/migrations/0001_initial.py
|
SolarLiner/django-node
|
0d094301c57fdcd9369dfe40a87566c7092a25d9
|
[
"MIT"
] | null | null | null |
example_nodeview/migrations/0001_initial.py
|
SolarLiner/django-node
|
0d094301c57fdcd9369dfe40a87566c7092a25d9
|
[
"MIT"
] | null | null | null |
example_nodeview/migrations/0001_initial.py
|
SolarLiner/django-node
|
0d094301c57fdcd9369dfe40a87566c7092a25d9
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.0.6 on 2020-05-11 21:59
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='House',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('address', models.CharField(max_length=255)),
],
),
migrations.CreateModel(
name='Person',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=60)),
('houses', models.ManyToManyField(related_name='owners', to='example_nodeview.House')),
('parent', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='children', to='example_nodeview.Person')),
],
),
]
| 32.875
| 146
| 0.594106
|
242d312dda357b9c9f9ff472ccf39298ab86ba78
| 8,772
|
py
|
Python
|
alpaca_trade_api/rest.py
|
h55nick/alpaca-trade-api-python
|
b870b0a1896c1373a2ee0ce37179ba21b4f642ee
|
[
"Apache-2.0"
] | null | null | null |
alpaca_trade_api/rest.py
|
h55nick/alpaca-trade-api-python
|
b870b0a1896c1373a2ee0ce37179ba21b4f642ee
|
[
"Apache-2.0"
] | null | null | null |
alpaca_trade_api/rest.py
|
h55nick/alpaca-trade-api-python
|
b870b0a1896c1373a2ee0ce37179ba21b4f642ee
|
[
"Apache-2.0"
] | null | null | null |
import logging
import os
import requests
from requests.exceptions import HTTPError
import time
from .common import get_base_url, get_credentials
from .entity import (
Account, Asset, Order, Position,
AssetBars, Quote, Fundamental,
Clock, Calendar,
)
from . import polygon
logger = logging.getLogger(__name__)
class RetryException(Exception):
pass
class APIError(Exception):
'''Represent API related error.
error.status_code will have http status code.
'''
def __init__(self, error, http_error=None):
super().__init__(error['message'])
self._error = error
self._http_error = http_error
@property
def code(self):
return self._error['code']
@property
def status_code(self):
http_error = self._http_error
if http_error is not None and hasattr(http_error, 'response'):
return http_error.response.status_code
@property
def request(self):
if self._http_error is not None:
return self._http_error.request
@property
def response(self):
if self._http_error is not None:
return self._http_error.response
class REST(object):
def __init__(self, key_id=None, secret_key=None, base_url=None):
self._key_id, self._secret_key = get_credentials(key_id, secret_key)
self._base_url = base_url or get_base_url()
self._session = requests.Session()
self._retry = int(os.environ.get('APCA_RETRY_MAX', 3))
self._retry_wait = int(os.environ.get('APCA_RETRY_WAIT', 3))
self._retry_codes = [int(o)for o in os.environ.get(
'APCA_RETRY_CODES', '429,504').split(',')]
self.polygon = polygon.REST(
self._key_id, 'staging' in self._base_url)
def _request(self, method, path, data=None, prefix='/v1'):
url = self._base_url + prefix + path
headers = {
'APCA-API-KEY-ID': self._key_id,
'APCA-API-SECRET-KEY': self._secret_key,
}
opts = {
'headers': headers,
}
if method.upper() == 'GET':
opts['params'] = data
else:
opts['json'] = data
retry = self._retry
if retry < 0:
retry = 0
while retry >= 0:
try:
return self._one_request(method, url, opts, retry)
except RetryException:
retry_wait = self._retry_wait
logger.warn(
'sleep {} seconds and retrying {} '
'{} more time(s)...'.format(
retry_wait, url, retry))
time.sleep(retry_wait)
retry -= 1
continue
def _one_request(self, method, url, opts, retry):
'''
Perform one request, possibly raising RetryException in the case
the response is 429. Otherwise, if error text contain "code" string,
then it decodes to json object and returns APIError.
Returns the body json in the 200 status.
'''
retry_codes = self._retry_codes
resp = self._session.request(method, url, **opts)
try:
resp.raise_for_status()
except HTTPError as http_error:
# retry if we hit Rate Limit
if resp.status_code in retry_codes and retry > 0:
raise RetryException()
if 'code' in resp.text:
error = resp.json()
if 'code' in error:
raise APIError(error, http_error)
else:
raise
if resp.text != '':
return resp.json()
return None
def get(self, path, data=None):
return self._request('GET', path, data)
def post(self, path, data=None):
return self._request('POST', path, data)
def delete(self, path, data=None):
return self._request('DELETE', path, data)
def get_account(self):
'''Get the account'''
resp = self.get('/account')
return Account(resp)
def list_orders(self, status=None):
'''Get a list of orders'''
params = dict()
if status is not None:
params['status'] = status
resp = self.get('/orders', params)
return [Order(o) for o in resp]
def submit_order(self, symbol, qty, side, type, time_in_force,
limit_price=None, stop_price=None, client_order_id=None):
'''Request a new order'''
params = {
'symbol': symbol,
'qty': qty,
'side': side,
'type': type,
'time_in_force': time_in_force,
}
if limit_price is not None:
params['limit_price'] = limit_price
if stop_price is not None:
params['stop_price'] = stop_price
if client_order_id is not None:
params['client_order_id'] = client_order_id
resp = self.post('/orders', params)
return Order(resp)
def get_order_by_client_order_id(self, client_order_id):
'''Get an order by client order id'''
resp = self.get('/orders:by_client_order_id', {
'client_order_id': client_order_id,
})
return Order(resp)
def get_order(self, order_id):
'''Get an order'''
resp = self.get('/orders/{}'.format(order_id))
return Order(resp)
def cancel_order(self, order_id):
'''Cancel an order'''
self.delete('/orders/{}'.format(order_id))
def list_positions(self):
'''Get a list of open positions'''
resp = self.get('/positions')
return [Position(o) for o in resp]
def get_position(self, symbol):
'''Get an open position'''
resp = self.get('/positions/{}'.format(symbol))
return Position(resp)
def list_assets(self, status=None, asset_class=None):
'''Get a list of assets'''
params = {
'status': status,
'assert_class': asset_class,
}
resp = self.get('/assets', params)
return [Asset(o) for o in resp]
def get_asset(self, symbol):
'''Get an asset'''
resp = self.get('/assets/{}'.format(symbol))
return Asset(resp)
def list_quotes(self, symbols):
'''Get a list of quotes'''
if not isinstance(symbols, str):
symbols = ','.join(symbols)
params = {
'symbols': symbols,
}
resp = self.get('/quotes', params)
return [Quote(o) for o in resp]
def get_quote(self, symbol):
'''Get a quote'''
resp = self.get('/assets/{}/quote'.format(symbol))
return Quote(resp)
def list_fundamentals(self, symbols):
'''Get a list of fundamentals'''
if not isinstance(symbols, str):
symbols = ','.join(symbols)
params = {
'symbols': symbols,
}
resp = self.get('/fundamentals', params)
return [Fundamental(o) for o in resp]
def get_fundamental(self, symbol):
'''Get a fundamental'''
resp = self.get('/assets/{}/fundamental'.format(symbol))
return Fundamental(resp)
def list_bars(
self,
symbols,
timeframe,
start_dt=None,
end_dt=None,
limit=None):
'''Get a list of bars'''
if not isinstance(symbols, str):
symbols = ','.join(symbols)
params = {
'symbols': symbols,
'timeframe': timeframe,
}
if start_dt is not None:
params['start_dt'] = start_dt
if end_dt is not None:
params['end_dt'] = end_dt
if limit is not None:
params['limit'] = limit
resp = self.get('/bars', params)
return [AssetBars(o) for o in resp]
def get_bars(
self,
symbol,
timeframe,
start_dt=None,
end_dt=None,
limit=None):
'''Get bars'''
params = {
'timeframe': timeframe,
}
if start_dt is not None:
params['start_dt'] = start_dt
if end_dt is not None:
params['end_dt'] = end_dt
if limit is not None:
params['limit'] = limit
resp = self.get('/assets/{}/bars'.format(symbol), params)
return AssetBars(resp)
def get_clock(self):
resp = self.get('/clock')
return Clock(resp)
def get_calendar(self, start=None, end=None):
params = {}
if start is not None:
params['start'] = start
if end is not None:
params['end'] = end
resp = self.get('/calendar', data=params)
return [Calendar(o) for o in resp]
| 30.778947
| 78
| 0.55301
|
fb2c9882bafbd9d4dcec664807375cd42e6d8cd8
| 4,938
|
py
|
Python
|
batchgenerators/generators/spatial_transform_generators.py
|
ORippler/MSD_2018
|
bf2bb158d8718052d0160daa8cce325fbe0728b4
|
[
"Apache-2.0"
] | 15
|
2018-12-03T12:33:30.000Z
|
2021-06-18T21:43:53.000Z
|
batchgenerators/generators/spatial_transform_generators.py
|
BboyT/batchgenerators
|
5ab40f822da52fb2c6bb207060ce3a6d630f4efe
|
[
"Apache-2.0"
] | 1
|
2018-12-09T04:04:14.000Z
|
2019-01-16T16:39:32.000Z
|
batchgenerators/generators/spatial_transform_generators.py
|
BboyT/batchgenerators
|
5ab40f822da52fb2c6bb207060ce3a6d630f4efe
|
[
"Apache-2.0"
] | 2
|
2018-11-10T14:03:03.000Z
|
2019-04-04T04:58:25.000Z
|
# Copyright 2017 Division of Medical Image Computing, German Cancer Research Center (DKFZ)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from batchgenerators.augmentations.spatial_transformations import augment_channel_translation, augment_mirroring, \
augment_spatial
def mirror_axis_generator(generator, axes=(2, 3, 4)):
'''
yields mirrored data and seg.
iff axes == [2,3,4]:
3D data: 12.5% of each constellation: x only, y only, z only, xy, xz, yz, xzy, none
2D data: 25% of each constellation: x, xy, y, none
'''
for data_dict in generator:
assert "data" in list(
data_dict.keys()), "your data generator needs to return a python dictionary with at least a 'data' key value pair"
data = data_dict["data"]
seg = None
if "seg" in list(data_dict.keys()):
seg = data_dict["seg"]
data, seg = augment_mirroring(data, seg, axes)
data_dict["data"] = data
if seg is not None:
data_dict["seg"] = seg
yield data_dict
def channel_translation_generator(generator, const_channel=0, max_shifts=None):
"""
Translates all channels within an instance of a batch according to randomly drawn shifts from within [-max_shift, max_shift].
One channel is held constant, the others are shifted in the same manner.
:param generator:
:param const_channel:
:param max_shifts:
:return:
"""
if max_shifts is None:
max_shifts = {'z': 2, 'y': 2, 'x': 2}
for data_dict in generator:
data_dict["data"] = augment_channel_translation(data_dict["data"], const_channel, max_shifts)
yield data_dict
def spatial_augmentation_generator(generator, patch_size, patch_center_dist_from_border=30,
do_elastic_deform=True, alpha=(0., 1000.), sigma=(10., 13.),
do_rotation=True, angle_x=(0, 2 * np.pi), angle_y=(0, 2 * np.pi),
angle_z=(0, 2 * np.pi),
do_scale=True, scale=(0.75, 1.25), border_mode_data='nearest', border_cval_data=0,
order_data=3,
border_mode_seg='constant', border_cval_seg=0, order_seg=0, random_crop=True):
'''
THE ultimate generator. It has all you need. It alleviates the problem of having to crop your data to a reasonably sized patch size before plugging it into the
old ultimate_transform generator (In the old one you would put in patches larger than your final patch size so that rotations and deformations to not introduce black borders).
Before: Large crops = no borders but slow, small crops = black borders (duh).
Here you can just plug in the whole uncropped image and get your desired patch size as output, without performance loss or black borders
:param generator:
:param do_elastic_deform:
:param alpha:
:param sigma:
:param do_rotation:
:param angle_x:
:param angle_y:
:param angle_z:
:param do_scale:
:param scale:
:return:
'''
if not (isinstance(alpha, list) or isinstance(alpha, tuple)):
alpha = [alpha, alpha]
if not (isinstance(sigma, list) or isinstance(sigma, tuple)):
sigma = [sigma, sigma]
for data_dict in generator:
assert "data" in list(
data_dict.keys()), "your data generator needs to return a python dictionary with at least a 'data' key value pair"
data = data_dict["data"]
do_seg = False
seg = None
shape = patch_size
assert len(shape) == len(data.shape[2:]), "dimension of patch_size and data must match!"
if "seg" in list(data_dict.keys()):
seg = data_dict["seg"]
do_seg = True
data_result, seg_result = augment_spatial(data, seg, patch_size, patch_center_dist_from_border,
do_elastic_deform, alpha, sigma, do_rotation, angle_x, angle_y,
angle_z, do_scale, scale, border_mode_data, border_cval_data,
order_data, border_mode_seg, border_cval_seg, order_seg,
random_crop)
if do_seg:
data_dict['seg'] = seg_result
data_dict['data'] = data_result
yield data_dict
| 45.302752
| 179
| 0.63305
|
4594cf1ac38f0d8aba5a0746d6edada2c33f5284
| 1,216
|
py
|
Python
|
peter_lists/users/views.py
|
pvize1/peter_lists
|
77e9f30cfc45f500e059b7b163db541335180332
|
[
"MIT"
] | null | null | null |
peter_lists/users/views.py
|
pvize1/peter_lists
|
77e9f30cfc45f500e059b7b163db541335180332
|
[
"MIT"
] | 8
|
2021-05-12T05:53:42.000Z
|
2022-03-31T04:08:18.000Z
|
peter_lists/users/views.py
|
pvize1/peter_lists
|
77e9f30cfc45f500e059b7b163db541335180332
|
[
"MIT"
] | null | null | null |
from django.contrib.auth import get_user_model
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.messages.views import SuccessMessageMixin
from django.urls import reverse
from django.utils.translation import gettext_lazy as _
from django.views.generic import DetailView, RedirectView, UpdateView
User = get_user_model()
class UserDetailView(LoginRequiredMixin, DetailView):
model = User
slug_field = "username"
slug_url_kwarg = "username"
user_detail_view = UserDetailView.as_view()
class UserUpdateView(LoginRequiredMixin, SuccessMessageMixin, UpdateView):
model = User
fields = [
"name",
"bio",
]
success_message = _("Information successfully updated")
def get_success_url(self):
return reverse("users:detail", kwargs={"username": self.request.user.username})
def get_object(self):
return self.request.user
user_update_view = UserUpdateView.as_view()
class UserRedirectView(LoginRequiredMixin, RedirectView):
permanent = False
def get_redirect_url(self):
return reverse("users:detail", kwargs={"username": self.request.user.username})
user_redirect_view = UserRedirectView.as_view()
| 24.816327
| 87
| 0.751645
|
f481a3800eb7f5f868fa8dd0af4beff10f62dc3a
| 2,159
|
py
|
Python
|
labs/lab3/problem5.py
|
ioanabirsan/python
|
5cff2acf36092e450bb269b37a0571ee62ccec31
|
[
"Apache-2.0"
] | null | null | null |
labs/lab3/problem5.py
|
ioanabirsan/python
|
5cff2acf36092e450bb269b37a0571ee62ccec31
|
[
"Apache-2.0"
] | null | null | null |
labs/lab3/problem5.py
|
ioanabirsan/python
|
5cff2acf36092e450bb269b37a0571ee62ccec31
|
[
"Apache-2.0"
] | null | null | null |
# 5. Fie functia validate_dict care primeste ca parametru un set de tuple care reprezinta reguli de validare pentru un
# dictionar cu chei de tipul string si valori tot de tipul string si un dictionar.
# O regula este definita astfel:
# (cheie, "prefix", "middle", "sufix").
# O valoare este considerata valida daca incepe cu "prefix", "middle" se gaseste in
# interiorul valorii (nu la inceput sau sfarsit) si se sfarsete cu "sufix".
# Functia va returna True daca dictionarul dat ca parametru respecta toate regulile, False in caz contrar.
# Exemplu: regulile [("key1", "", "inside", ""), ("key2", "start", "middle", "winter")] si
# dictionarul {"key2": "starting the engine in the middle of the winter",
# "key1": "come inside, it's too cold outside", "key3": "this is not valid"} => False
# deoarece desi regulile sunt respectate pentru "key1" si "key2", apare "key3" care nu apare in reguli.
def is_valid_value(prefix, middle, suffix, value):
return value.startswith(prefix) \
and middle in value \
and value.endswith(suffix) \
and not value.startswith(middle) \
and not value.endswith(middle)
def validate_dict(validation_rules, dictionary):
dictionary_copy = dictionary.copy()
for key, prefix, middle, suffix in validation_rules:
if key not in dictionary:
return False
if is_valid_value(prefix, middle, suffix, dictionary[key]) is not True:
return False
dictionary_copy.pop(key)
if len(dictionary_copy) is not 0:
return False
return True
print(validate_dict([("key1", "", "inside", ""), ("key2", "start", "middle", "winter")],
{"key2": "starting the engine in the middle of the winter",
"key1": "come inside, it's too cold outside", "key3": "this is not valid"}))
print(validate_dict([("key1", "", "inside", ""), ("key2", "start", "middle", "winter"), ("key3", "this", "valid", "valid")],
{"key2": "starting the engine in the middle of the winter",
"key1": "come inside, it's too cold outside", "key3": "this is not valid"}))
| 46.934783
| 124
| 0.642427
|
5b8e85584196b1a2dac112b1a83a0509471e43e4
| 5,620
|
py
|
Python
|
jina/drivers/helper.py
|
YueLiu-jina/jina
|
f3e860313f26edc6d9f6e6ecc74cf6c2a3c65bff
|
[
"Apache-2.0"
] | null | null | null |
jina/drivers/helper.py
|
YueLiu-jina/jina
|
f3e860313f26edc6d9f6e6ecc74cf6c2a3c65bff
|
[
"Apache-2.0"
] | null | null | null |
jina/drivers/helper.py
|
YueLiu-jina/jina
|
f3e860313f26edc6d9f6e6ecc74cf6c2a3c65bff
|
[
"Apache-2.0"
] | null | null | null |
__copyright__ = "Copyright (c) 2020 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import mimetypes
import os
import urllib.parse
import urllib.request
from typing import Dict, Any, Iterable, Tuple
import numpy as np
from ..proto import jina_pb2
def pb2array(blob: 'jina_pb2.NdArray') -> 'np.ndarray':
"""Convert a blob protobuf to a numpy ndarray.
Note if the argument ``quantize`` is specified in :func:`array2pb` then the returned result may be lossy.
Nonetheless, it will always in original ``dtype``, i.e. ``float32`` or ``float64``
:param blob: a blob described in protobuf
"""
x = np.frombuffer(blob.buffer, dtype=blob.dtype)
if blob.quantization == jina_pb2.NdArray.FP16:
x = x.astype(blob.original_dtype)
elif blob.quantization == jina_pb2.NdArray.UINT8:
x = x.astype(blob.original_dtype) * blob.scale + blob.min_val
return x.reshape(blob.shape)
def array2pb(x: 'np.ndarray', quantize: str = None) -> 'jina_pb2.NdArray':
"""Convert a numpy ndarray to blob protobuf.
:param x: the target ndarray
:param quantize: the quantization method used when converting to protobuf.
Availables are ``fp16``, ``uint8``, default is None.
Remarks on quantization:
The quantization only works when ``x`` is in ``float32`` or ``float64``. The motivation is to
save the network bandwidth by using less bits to store the numpy array in the protobuf.
- ``fp16`` quantization is lossless, can be used widely. Each float is represented by 16 bits.
- ``uint8`` quantization is lossy. Each float is represented by 8 bits. The algorithm behind is standard scaling.
There is no need to specify the quantization type in :func:`pb2array`,
as the quantize type is stored and the blob is self-contained to recover the original numpy array
"""
blob = jina_pb2.NdArray()
quantize = os.environ.get('JINA_ARRAY_QUANT', quantize)
if quantize == 'fp16' and (x.dtype == np.float32 or x.dtype == np.float64):
blob.quantization = jina_pb2.NdArray.FP16
blob.original_dtype = x.dtype.name
x = x.astype(np.float16)
elif quantize == 'uint8' and (x.dtype == np.float32 or x.dtype == np.float64 or x.dtype == np.float16):
blob.quantization = jina_pb2.NdArray.UINT8
blob.max_val, blob.min_val = x.max(), x.min()
blob.original_dtype = x.dtype.name
blob.scale = (blob.max_val - blob.min_val) / 256
x = ((x - blob.min_val) / blob.scale).astype(np.uint8)
else:
blob.quantization = jina_pb2.NdArray.NONE
blob.buffer = x.tobytes()
blob.shape.extend(list(x.shape))
blob.dtype = x.dtype.str
return blob
def extract_docs(docs: Iterable['jina_pb2.Document'], embedding: bool) -> Tuple:
"""Iterate over a list of protobuf documents and extract chunk-level information from them
:param docs: an iterable of protobuf documents
:param embedding: an indicator of extracting embedding or not.
If ``True`` then all doc-level embedding are extracted.
If ``False`` then ``text``, ``buffer``, ``blob`` info of each doc are extracted
:return: A tuple of 3 pieces:
- a numpy ndarray of extracted info
- the corresponding doc references
- the doc_id list where the doc has no contents, useful for debugging
"""
contents = []
docs_pts = []
bad_doc_ids = []
if embedding:
_extract_fn = lambda doc: doc.embedding.buffer and pb2array(doc.embedding)
else:
_extract_fn = lambda doc: doc.text or doc.buffer or (doc.blob and pb2array(doc.blob))
for doc in docs:
content = _extract_fn(doc)
if content is not None:
contents.append(content)
docs_pts.append(doc)
else:
bad_doc_ids.append((doc.id, doc.parent_id))
contents = np.stack(contents) if contents else None
return contents, docs_pts, bad_doc_ids
def routes2str(msg: 'jina_pb2.Message', flag_current: bool = False) -> str:
"""Get the string representation of the routes in a message.
:param msg: a protobuf message
:param flag_current: flag the current :class:`BasePod` as ``⚐``
"""
route_str = [r.pod for r in msg.envelope.routes]
if flag_current:
route_str.append('⚐')
from ..helper import colored
return colored('▸', 'green').join(route_str)
def add_route(evlp: 'jina_pb2.Envelope', name: str, identity: str) -> None:
"""Add a route to the envelope
:param evlp: the envelope to modify
:param name: the name of the pod service
:param identity: the identity of the pod service
"""
r = evlp.routes.add()
r.pod = name
r.start_time.GetCurrentTime()
r.pod_id = identity
def pb_obj2dict(obj, keys: Iterable[str]) -> Dict[str, Any]:
"""Convert a protobuf object to a Dict by selected keys
:param obj: a protobuf object
:param keys: an iterable of keys for extraction
"""
ret = {k: getattr(obj, k) for k in keys if hasattr(obj, k)}
if 'blob' in ret:
ret['blob'] = pb2array(obj.blob)
return ret
def guess_mime(uri):
# guess when uri points to a local file
m_type = mimetypes.guess_type(uri)[0]
# guess when uri points to a remote file
if not m_type and urllib.parse.urlparse(uri).scheme in {'http', 'https', 'data'}:
page = urllib.request.Request(uri, headers={'User-Agent': 'Mozilla/5.0'})
tmp = urllib.request.urlopen(page)
m_type = tmp.info().get_content_type()
return m_type
| 36.258065
| 125
| 0.659253
|
4b1e4975c7c35e2ab2ee452840d52a076eac9034
| 91,633
|
py
|
Python
|
mne/viz/utils.py
|
vagechirkov/mne-python
|
f20ffa0eb46e76baf4e30437bc03d765da83b38a
|
[
"BSD-3-Clause"
] | 1
|
2020-06-18T14:05:26.000Z
|
2020-06-18T14:05:26.000Z
|
mne/viz/utils.py
|
jaromilfrossard/mne-python
|
8714cda45d0f0269c15026323a9ac689b47722f8
|
[
"BSD-3-Clause"
] | 2
|
2017-01-18T16:29:02.000Z
|
2017-01-19T16:03:25.000Z
|
mne/viz/utils.py
|
jaromilfrossard/mne-python
|
8714cda45d0f0269c15026323a9ac689b47722f8
|
[
"BSD-3-Clause"
] | 1
|
2020-06-18T14:15:16.000Z
|
2020-06-18T14:15:16.000Z
|
# -*- coding: utf-8 -*-
"""Utility functions for plotting M/EEG data."""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Denis Engemann <denis.engemann@gmail.com>
# Martin Luessi <mluessi@nmr.mgh.harvard.edu>
# Eric Larson <larson.eric.d@gmail.com>
# Mainak Jas <mainak@neuro.hut.fi>
# Stefan Appelhoff <stefan.appelhoff@mailbox.org>
# Clemens Brunner <clemens.brunner@gmail.com>
# Daniel McCloy <dan@mccloy.info>
#
# License: Simplified BSD
from collections import defaultdict
from contextlib import contextmanager
from functools import partial
import difflib
import webbrowser
import tempfile
import math
import numpy as np
from copy import deepcopy
from distutils.version import LooseVersion
import warnings
from datetime import datetime
from ..defaults import _handle_default
from ..fixes import _get_args
from ..io import show_fiff, Info
from ..io.constants import FIFF
from ..io.pick import (channel_type, channel_indices_by_type, pick_channels,
_pick_data_channels, _DATA_CH_TYPES_SPLIT,
_DATA_CH_TYPES_ORDER_DEFAULT, _VALID_CHANNEL_TYPES,
pick_info, _picks_by_type, pick_channels_cov,
_contains_ch_type)
from ..io.meas_info import create_info
from ..rank import compute_rank
from ..io.proj import setup_proj
from ..utils import (verbose, get_config, warn, _check_ch_locs, _check_option,
logger, fill_doc, _pl, _check_sphere, _ensure_int,
_validate_type)
from ..transforms import apply_trans
_channel_type_prettyprint = {'eeg': "EEG channel", 'grad': "Gradiometer",
'mag': "Magnetometer", 'seeg': "sEEG channel",
'dbs': "DBS channel", 'eog': "EOG channel",
'ecg': "ECG sensor", 'emg': "EMG sensor",
'ecog': "ECoG channel",
'misc': "miscellaneous sensor"}
def _setup_vmin_vmax(data, vmin, vmax, norm=False):
"""Handle vmin and vmax parameters for visualizing topomaps.
For the normal use-case (when `vmin` and `vmax` are None), the parameter
`norm` drives the computation. When norm=False, data is supposed to come
from a mag and the output tuple (vmin, vmax) is symmetric range
(-x, x) where x is the max(abs(data)). When norm=True (a.k.a. data is the
L2 norm of a gradiometer pair) the output tuple corresponds to (0, x).
Otherwise, vmin and vmax are callables that drive the operation.
"""
should_warn = False
if vmax is None and vmin is None:
vmax = np.abs(data).max()
vmin = 0. if norm else -vmax
if vmin == 0 and np.min(data) < 0:
should_warn = True
else:
if callable(vmin):
vmin = vmin(data)
elif vmin is None:
vmin = 0. if norm else np.min(data)
if vmin == 0 and np.min(data) < 0:
should_warn = True
if callable(vmax):
vmax = vmax(data)
elif vmax is None:
vmax = np.max(data)
if should_warn:
warn_msg = ("_setup_vmin_vmax output a (min={vmin}, max={vmax})"
" range whereas the minimum of data is {data_min}")
warn_val = {'vmin': vmin, 'vmax': vmax, 'data_min': np.min(data)}
warn(warn_msg.format(**warn_val), UserWarning)
return vmin, vmax
def plt_show(show=True, fig=None, **kwargs):
"""Show a figure while suppressing warnings.
Parameters
----------
show : bool
Show the figure.
fig : instance of Figure | None
If non-None, use fig.show().
**kwargs : dict
Extra arguments for :func:`matplotlib.pyplot.show`.
"""
from matplotlib import get_backend
import matplotlib.pyplot as plt
if show and get_backend() != 'agg':
(fig or plt).show(**kwargs)
def tight_layout(pad=1.2, h_pad=None, w_pad=None, fig=None):
"""Adjust subplot parameters to give specified padding.
.. note:: For plotting please use this function instead of
``plt.tight_layout``.
Parameters
----------
pad : float
Padding between the figure edge and the edges of subplots, as a
fraction of the font-size.
h_pad : float
Padding height between edges of adjacent subplots.
Defaults to ``pad_inches``.
w_pad : float
Padding width between edges of adjacent subplots.
Defaults to ``pad_inches``.
fig : instance of Figure
Figure to apply changes to.
Notes
-----
This will not force constrained_layout=False if the figure was created
with that method.
"""
_validate_type(pad, 'numeric', 'pad')
import matplotlib.pyplot as plt
fig = plt.gcf() if fig is None else fig
fig.canvas.draw()
constrained = fig.get_constrained_layout()
if constrained:
return # no-op
try: # see https://github.com/matplotlib/matplotlib/issues/2654
with warnings.catch_warnings(record=True) as ws:
fig.tight_layout(pad=pad, h_pad=h_pad, w_pad=w_pad)
except Exception:
try:
with warnings.catch_warnings(record=True) as ws:
fig.set_tight_layout(dict(pad=pad, h_pad=h_pad, w_pad=w_pad))
except Exception:
warn('Matplotlib function "tight_layout" is not supported.'
' Skipping subplot adjustment.')
return
for w in ws:
w_msg = str(w.message) if hasattr(w, 'message') else w.get_message()
if not w_msg.startswith('This figure includes Axes'):
warn(w_msg, w.category, 'matplotlib')
def _check_delayed_ssp(container):
"""Handle interactive SSP selection."""
if container.proj is True or\
all(p['active'] for p in container.info['projs']):
raise RuntimeError('Projs are already applied. Please initialize'
' the data with proj set to False.')
elif len(container.info['projs']) < 1:
raise RuntimeError('No projs found in evoked.')
def _validate_if_list_of_axes(axes, obligatory_len=None):
"""Validate whether input is a list/array of axes."""
from matplotlib.axes import Axes
if obligatory_len is not None and not isinstance(obligatory_len, int):
raise ValueError('obligatory_len must be None or int, got %d',
'instead' % type(obligatory_len))
if not isinstance(axes, (list, np.ndarray)):
raise ValueError('axes must be a list or numpy array of matplotlib '
'axes objects, got %s instead.' % type(axes))
if isinstance(axes, np.ndarray) and axes.ndim > 1:
raise ValueError('if input is a numpy array, it must be '
'one-dimensional. The received numpy array has %d '
'dimensions however. Try using ravel or flatten '
'method of the array.' % axes.ndim)
is_correct_type = np.array([isinstance(x, Axes)
for x in axes])
if not np.all(is_correct_type):
first_bad = np.where(np.logical_not(is_correct_type))[0][0]
raise ValueError('axes must be a list or numpy array of matplotlib '
'axes objects while one of the list elements is '
'%s.' % type(axes[first_bad]))
if obligatory_len is not None and not len(axes) == obligatory_len:
raise ValueError('axes must be a list/array of length %d, while the'
' length is %d' % (obligatory_len, len(axes)))
def mne_analyze_colormap(limits=[5, 10, 15], format='mayavi'):
"""Return a colormap similar to that used by mne_analyze.
Parameters
----------
limits : list (or array) of length 3 or 6
Bounds for the colormap, which will be mirrored across zero if length
3, or completely specified (and potentially asymmetric) if length 6.
format : str
Type of colormap to return. If 'matplotlib', will return a
matplotlib.colors.LinearSegmentedColormap. If 'mayavi', will
return an RGBA array of shape (256, 4).
Returns
-------
cmap : instance of colormap | array
A teal->blue->gray->red->yellow colormap. See docstring of the 'format'
argument for further details.
Notes
-----
For this will return a colormap that will display correctly for data
that are scaled by the plotting function to span [-fmax, fmax].
""" # noqa: E501
# Ensure limits is an array
limits = np.asarray(limits, dtype='float')
if len(limits) != 3 and len(limits) != 6:
raise ValueError('limits must have 3 or 6 elements')
if len(limits) == 3 and any(limits < 0.):
raise ValueError('if 3 elements, limits must all be non-negative')
if any(np.diff(limits) <= 0):
raise ValueError('limits must be monotonically increasing')
if format == 'matplotlib':
from matplotlib import colors
if len(limits) == 3:
limits = (np.concatenate((-np.flipud(limits), limits)) +
limits[-1]) / (2 * limits[-1])
else:
limits = (limits - np.min(limits)) / np.max(limits -
np.min(limits))
cdict = {'red': ((limits[0], 0.0, 0.0),
(limits[1], 0.0, 0.0),
(limits[2], 0.5, 0.5),
(limits[3], 0.5, 0.5),
(limits[4], 1.0, 1.0),
(limits[5], 1.0, 1.0)),
'green': ((limits[0], 1.0, 1.0),
(limits[1], 0.0, 0.0),
(limits[2], 0.5, 0.5),
(limits[3], 0.5, 0.5),
(limits[4], 0.0, 0.0),
(limits[5], 1.0, 1.0)),
'blue': ((limits[0], 1.0, 1.0),
(limits[1], 1.0, 1.0),
(limits[2], 0.5, 0.5),
(limits[3], 0.5, 0.5),
(limits[4], 0.0, 0.0),
(limits[5], 0.0, 0.0)),
'alpha': ((limits[0], 1.0, 1.0),
(limits[1], 1.0, 1.0),
(limits[2], 0.0, 0.0),
(limits[3], 0.0, 0.0),
(limits[4], 1.0, 1.0),
(limits[5], 1.0, 1.0)),
}
return colors.LinearSegmentedColormap('mne_analyze', cdict)
elif format == 'mayavi':
if len(limits) == 3:
limits = np.concatenate((-np.flipud(limits), [0], limits)) /\
limits[-1]
else:
limits = np.concatenate((limits[:3], [0], limits[3:]))
limits /= np.max(np.abs(limits))
r = np.array([0, 0, 0, 0, 1, 1, 1])
g = np.array([1, 0, 0, 0, 0, 0, 1])
b = np.array([1, 1, 1, 0, 0, 0, 0])
a = np.array([1, 1, 0, 0, 0, 1, 1])
xp = (np.arange(256) - 128) / 128.0
colormap = np.r_[[np.interp(xp, limits, 255 * c)
for c in [r, g, b, a]]].T
return colormap
else:
raise ValueError('format must be either matplotlib or mayavi')
@contextmanager
def _events_off(obj):
obj.eventson = False
try:
yield
finally:
obj.eventson = True
def _toggle_proj(event, params, all_=False):
"""Perform operations when proj boxes clicked."""
# read options if possible
if 'proj_checks' in params:
bools = list(params['proj_checks'].get_status())
if all_:
new_bools = [not all(bools)] * len(bools)
with _events_off(params['proj_checks']):
for bi, (old, new) in enumerate(zip(bools, new_bools)):
if old != new:
params['proj_checks'].set_active(bi)
bools[bi] = new
for bi, (b, p) in enumerate(zip(bools, params['projs'])):
# see if they tried to deactivate an active one
if not b and p['active']:
bools[bi] = True
else:
proj = params.get('apply_proj', True)
bools = [proj] * len(params['projs'])
compute_proj = False
if 'proj_bools' not in params:
compute_proj = True
elif not np.array_equal(bools, params['proj_bools']):
compute_proj = True
# if projectors changed, update plots
if compute_proj is True:
params['plot_update_proj_callback'](params, bools)
def _get_channel_plotting_order(order, ch_types, picks=None):
"""Determine channel plotting order for browse-style Raw/Epochs plots."""
if order is None:
# for backward compat, we swap the first two to keep grad before mag
ch_type_order = list(_DATA_CH_TYPES_ORDER_DEFAULT)
ch_type_order = tuple(['grad', 'mag'] + ch_type_order[2:])
order = [pick_idx for order_type in ch_type_order
for pick_idx, pick_type in enumerate(ch_types)
if order_type == pick_type]
elif not isinstance(order, (np.ndarray, list, tuple)):
raise ValueError('order should be array-like; got '
f'"{order}" ({type(order)}).')
if picks is not None:
order = [ch for ch in order if ch in picks]
return np.asarray(order)
def _make_event_color_dict(event_color, events=None, event_id=None):
"""Make or validate a dict mapping event ids to colors."""
from .misc import _handle_event_colors
if isinstance(event_color, dict): # if event_color is a dict, validate it
event_id = dict() if event_id is None else event_id
event_color = {_ensure_int(event_id.get(key, key), 'event_color key'):
value for key, value in event_color.items()}
default = event_color.pop(-1, None)
default_factory = None if default is None else lambda: default
new_dict = defaultdict(default_factory)
for key, value in event_color.items():
if key < 1:
raise KeyError('event_color keys must be strictly positive, '
f'or -1 (cannot use {key})')
new_dict[key] = value
return new_dict
elif event_color is None: # make a dict from color cycle
uniq_events = set() if events is None else np.unique(events[:, 2])
return _handle_event_colors(event_color, uniq_events, event_id)
else: # if event_color is a MPL color-like thing, use it for all events
return defaultdict(lambda: event_color)
def _prepare_trellis(n_cells, ncols, nrows='auto', title=False, colorbar=False,
size=1.3, sharex=False, sharey=False):
from matplotlib.gridspec import GridSpec
from ._mpl_figure import _figure
if n_cells == 1:
nrows = ncols = 1
elif isinstance(ncols, int) and n_cells <= ncols:
nrows, ncols = 1, n_cells
else:
if ncols == 'auto' and nrows == 'auto':
nrows = math.floor(math.sqrt(n_cells))
ncols = math.ceil(n_cells / nrows)
elif ncols == 'auto':
ncols = math.ceil(n_cells / nrows)
elif nrows == 'auto':
nrows = math.ceil(n_cells / ncols)
else:
naxes = ncols * nrows
if naxes < n_cells:
raise ValueError("Cannot plot {} axes in a {} by {} "
"figure.".format(n_cells, nrows, ncols))
if colorbar:
ncols += 1
width = size * ncols
height = (size + max(0, 0.1 * (4 - size))) * nrows + bool(title) * 0.5
height_ratios = None
fig = _figure(toolbar=False, figsize=(width * 1.5, 0.25 + height * 1.5))
gs = GridSpec(nrows, ncols, figure=fig, height_ratios=height_ratios)
axes = []
if colorbar:
# exclude last axis of each row except top row, which is for colorbar
exclude = set(range(2 * ncols - 1, nrows * ncols, ncols))
ax_idxs = sorted(set(range(nrows * ncols)) - exclude)[:n_cells + 1]
else:
ax_idxs = range(n_cells)
for ax_idx in ax_idxs:
subplot_kw = dict()
if ax_idx > 0:
if sharex:
subplot_kw.update(sharex=axes[0])
if sharey:
subplot_kw.update(sharey=axes[0])
axes.append(fig.add_subplot(gs[ax_idx], **subplot_kw))
return fig, axes, ncols, nrows
def _draw_proj_checkbox(event, params, draw_current_state=True):
"""Toggle options (projectors) dialog."""
from matplotlib import widgets
projs = params['projs']
# turn on options dialog
labels = [p['desc'] for p in projs]
actives = ([p['active'] for p in projs] if draw_current_state else
params.get('proj_bools', [params['apply_proj']] * len(projs)))
width = max([4., max([len(p['desc']) for p in projs]) / 6.0 + 0.5])
height = (len(projs) + 1) / 6.0 + 1.5
fig_proj = figure_nobar(figsize=(width, height))
_set_window_title(fig_proj, 'SSP projection vectors')
offset = (1. / 6. / height)
params['fig_proj'] = fig_proj # necessary for proper toggling
ax_temp = fig_proj.add_axes((0, offset, 1, 0.8 - offset), frameon=False)
ax_temp.set_title('Projectors marked with "X" are active')
proj_checks = widgets.CheckButtons(ax_temp, labels=labels, actives=actives)
# make edges around checkbox areas
for rect in proj_checks.rectangles:
rect.set_edgecolor('0.5')
rect.set_linewidth(1.)
# change already-applied projectors to red
for ii, p in enumerate(projs):
if p['active']:
for x in proj_checks.lines[ii]:
x.set_color('#ff0000')
# make minimal size
# pass key presses from option dialog over
proj_checks.on_clicked(partial(_toggle_proj, params=params))
params['proj_checks'] = proj_checks
fig_proj.canvas.mpl_connect('key_press_event', _key_press)
# Toggle all
ax_temp = fig_proj.add_axes((0, 0, 1, offset), frameon=False)
proj_all = widgets.Button(ax_temp, 'Toggle all')
proj_all.on_clicked(partial(_toggle_proj, params=params, all_=True))
params['proj_all'] = proj_all
# this should work for non-test cases
try:
fig_proj.canvas.draw()
plt_show(fig=fig_proj, warn=False)
except Exception:
pass
def _simplify_float(label):
# Heuristic to turn floats to ints where possible (e.g. -500.0 to -500)
if isinstance(label, float) and np.isfinite(label) and \
float(str(label)) != round(label):
label = round(label, 2)
return label
def _get_figsize_from_config():
"""Get default / most recent figure size from config."""
figsize = get_config('MNE_BROWSE_RAW_SIZE')
if figsize is not None:
figsize = figsize.split(',')
figsize = tuple([float(s) for s in figsize])
return figsize
@verbose
def compare_fiff(fname_1, fname_2, fname_out=None, show=True, indent=' ',
read_limit=np.inf, max_str=30, verbose=None):
"""Compare the contents of two fiff files using diff and show_fiff.
Parameters
----------
fname_1 : str
First file to compare.
fname_2 : str
Second file to compare.
fname_out : str | None
Filename to store the resulting diff. If None, a temporary
file will be created.
show : bool
If True, show the resulting diff in a new tab in a web browser.
indent : str
How to indent the lines.
read_limit : int
Max number of bytes of data to read from a tag. Can be np.inf
to always read all data (helps test read completion).
max_str : int
Max number of characters of string representation to print for
each tag's data.
%(verbose)s
Returns
-------
fname_out : str
The filename used for storing the diff. Could be useful for
when a temporary file is used.
"""
file_1 = show_fiff(fname_1, output=list, indent=indent,
read_limit=read_limit, max_str=max_str)
file_2 = show_fiff(fname_2, output=list, indent=indent,
read_limit=read_limit, max_str=max_str)
diff = difflib.HtmlDiff().make_file(file_1, file_2, fname_1, fname_2)
if fname_out is not None:
f = open(fname_out, 'wb')
else:
f = tempfile.NamedTemporaryFile('wb', delete=False, suffix='.html')
fname_out = f.name
with f as fid:
fid.write(diff.encode('utf-8'))
if show is True:
webbrowser.open_new_tab(fname_out)
return fname_out
def figure_nobar(*args, **kwargs):
"""Make matplotlib figure with no toolbar.
Parameters
----------
*args : list
Arguments to pass to :func:`matplotlib.pyplot.figure`.
**kwargs : dict
Keyword arguments to pass to :func:`matplotlib.pyplot.figure`.
Returns
-------
fig : instance of Figure
The figure.
"""
from matplotlib import rcParams, pyplot as plt
old_val = rcParams['toolbar']
try:
rcParams['toolbar'] = 'none'
fig = plt.figure(*args, **kwargs)
# remove button press catchers (for toolbar)
cbs = list(fig.canvas.callbacks.callbacks['key_press_event'].keys())
for key in cbs:
fig.canvas.callbacks.disconnect(key)
finally:
rcParams['toolbar'] = old_val
return fig
def _show_help_fig(col1, col2, fig_help, ax, show):
_set_window_title(fig_help, 'Help')
celltext = [[c1, c2] for c1, c2 in zip(col1.strip().split("\n"),
col2.strip().split("\n"))]
table = ax.table(cellText=celltext, loc="center", cellLoc="left")
table.auto_set_font_size(False)
table.set_fontsize(12)
ax.set_axis_off()
for (row, col), cell in table.get_celld().items():
cell.set_edgecolor(None) # remove cell borders
# right justify, following:
# https://stackoverflow.com/questions/48210749/matplotlib-table-assign-different-text-alignments-to-different-columns?rq=1 # noqa: E501
if col == 0:
cell._loc = 'right'
fig_help.canvas.mpl_connect('key_press_event', _key_press)
if show:
# this should work for non-test cases
try:
fig_help.canvas.draw()
plt_show(fig=fig_help, warn=False)
except Exception:
pass
def _show_help(col1, col2, width, height):
fig_help = figure_nobar(figsize=(width, height), dpi=80)
ax = fig_help.add_subplot(111)
_show_help_fig(col1, col2, fig_help, ax, show=True)
def _key_press(event):
"""Handle key press in dialog."""
import matplotlib.pyplot as plt
if event.key == 'escape':
plt.close(event.canvas.figure)
class ClickableImage(object):
"""Display an image so you can click on it and store x/y positions.
Takes as input an image array (can be any array that works with imshow,
but will work best with images. Displays the image and lets you
click on it. Stores the xy coordinates of each click, so now you can
superimpose something on top of it.
Upon clicking, the x/y coordinate of the cursor will be stored in
self.coords, which is a list of (x, y) tuples.
Parameters
----------
imdata : ndarray
The image that you wish to click on for 2-d points.
**kwargs : dict
Keyword arguments. Passed to ax.imshow.
Notes
-----
.. versionadded:: 0.9.0
"""
def __init__(self, imdata, **kwargs):
"""Display the image for clicking."""
import matplotlib.pyplot as plt
self.coords = []
self.imdata = imdata
self.fig = plt.figure()
self.ax = self.fig.add_subplot(111)
self.ymax = self.imdata.shape[0]
self.xmax = self.imdata.shape[1]
self.im = self.ax.imshow(imdata,
extent=(0, self.xmax, 0, self.ymax),
picker=True, **kwargs)
self.ax.axis('off')
self.fig.canvas.mpl_connect('pick_event', self.onclick)
plt_show(block=True)
def onclick(self, event):
"""Handle Mouse clicks.
Parameters
----------
event : matplotlib.backend_bases.Event
The matplotlib object that we use to get x/y position.
"""
mouseevent = event.mouseevent
self.coords.append((mouseevent.xdata, mouseevent.ydata))
def plot_clicks(self, **kwargs):
"""Plot the x/y positions stored in self.coords.
Parameters
----------
**kwargs : dict
Arguments are passed to imshow in displaying the bg image.
"""
import matplotlib.pyplot as plt
if len(self.coords) == 0:
raise ValueError('No coordinates found, make sure you click '
'on the image that is first shown.')
f, ax = plt.subplots()
ax.imshow(self.imdata, extent=(0, self.xmax, 0, self.ymax), **kwargs)
xlim, ylim = [ax.get_xlim(), ax.get_ylim()]
xcoords, ycoords = zip(*self.coords)
ax.scatter(xcoords, ycoords, c='#ff0000')
ann_text = np.arange(len(self.coords)).astype(str)
for txt, coord in zip(ann_text, self.coords):
ax.annotate(txt, coord, fontsize=20, color='#ff0000')
ax.set_xlim(xlim)
ax.set_ylim(ylim)
plt_show()
def to_layout(self, **kwargs):
"""Turn coordinates into an MNE Layout object.
Normalizes by the image you used to generate clicks
Parameters
----------
**kwargs : dict
Arguments are passed to generate_2d_layout.
Returns
-------
layout : instance of Layout
The layout.
"""
from ..channels.layout import generate_2d_layout
coords = np.array(self.coords)
lt = generate_2d_layout(coords, bg_image=self.imdata, **kwargs)
return lt
def _fake_click(fig, ax, point, xform='ax', button=1, kind='press'):
"""Fake a click at a relative point within axes."""
if xform == 'ax':
x, y = ax.transAxes.transform_point(point)
elif xform == 'data':
x, y = ax.transData.transform_point(point)
else:
assert xform == 'pix'
x, y = point
if kind == 'press':
func = partial(fig.canvas.button_press_event, x=x, y=y, button=button)
elif kind == 'release':
func = partial(fig.canvas.button_release_event, x=x, y=y,
button=button)
elif kind == 'motion':
func = partial(fig.canvas.motion_notify_event, x=x, y=y)
func(guiEvent=None)
def add_background_image(fig, im, set_ratios=None):
"""Add a background image to a plot.
Adds the image specified in ``im`` to the
figure ``fig``. This is generally meant to
be done with topo plots, though it could work
for any plot.
.. note:: This modifies the figure and/or axes in place.
Parameters
----------
fig : Figure
The figure you wish to add a bg image to.
im : array, shape (M, N, {3, 4})
A background image for the figure. This must be a valid input to
`matplotlib.pyplot.imshow`. Defaults to None.
set_ratios : None | str
Set the aspect ratio of any axes in fig
to the value in set_ratios. Defaults to None,
which does nothing to axes.
Returns
-------
ax_im : instance of Axes
Axes created corresponding to the image you added.
Notes
-----
.. versionadded:: 0.9.0
"""
if im is None:
# Don't do anything and return nothing
return None
if set_ratios is not None:
for ax in fig.axes:
ax.set_aspect(set_ratios)
ax_im = fig.add_axes([0, 0, 1, 1], label='background')
ax_im.imshow(im, aspect='auto')
ax_im.set_zorder(-1)
return ax_im
def _find_peaks(evoked, npeaks):
"""Find peaks from evoked data.
Returns ``npeaks`` biggest peaks as a list of time points.
"""
from scipy.signal import argrelmax
gfp = evoked.data.std(axis=0)
order = len(evoked.times) // 30
if order < 1:
order = 1
peaks = argrelmax(gfp, order=order, axis=0)[0]
if len(peaks) > npeaks:
max_indices = np.argsort(gfp[peaks])[-npeaks:]
peaks = np.sort(peaks[max_indices])
times = evoked.times[peaks]
if len(times) == 0:
times = [evoked.times[gfp.argmax()]]
return times
def _process_times(inst, use_times, n_peaks=None, few=False):
"""Return a list of times for topomaps."""
if isinstance(use_times, str):
if use_times == 'interactive':
use_times, n_peaks = 'peaks', 1
if use_times == 'peaks':
if n_peaks is None:
n_peaks = min(3 if few else 7, len(inst.times))
use_times = _find_peaks(inst, n_peaks)
elif use_times == 'auto':
if n_peaks is None:
n_peaks = min(5 if few else 10, len(use_times))
use_times = np.linspace(inst.times[0], inst.times[-1], n_peaks)
else:
raise ValueError("Got an unrecognized method for `times`. Only "
"'peaks', 'auto' and 'interactive' are supported "
"(or directly passing numbers).")
elif np.isscalar(use_times):
use_times = [use_times]
use_times = np.array(use_times, float)
if use_times.ndim != 1:
raise ValueError('times must be 1D, got %d dimensions'
% use_times.ndim)
if len(use_times) > 25:
warn('More than 25 topomaps plots requested. This might take a while.')
return use_times
@verbose
def plot_sensors(info, kind='topomap', ch_type=None, title=None,
show_names=False, ch_groups=None, to_sphere=True, axes=None,
block=False, show=True, sphere=None, verbose=None):
"""Plot sensors positions.
Parameters
----------
%(info_not_none)s
kind : str
Whether to plot the sensors as 3d, topomap or as an interactive
sensor selection dialog. Available options 'topomap', '3d', 'select'.
If 'select', a set of channels can be selected interactively by using
lasso selector or clicking while holding control key. The selected
channels are returned along with the figure instance. Defaults to
'topomap'.
ch_type : None | str
The channel type to plot. Available options 'mag', 'grad', 'eeg',
'seeg', 'dbs', 'ecog', 'all'. If ``'all'``, all the available mag,
grad, eeg, seeg, dbs and ecog channels are plotted. If None (default),
then channels are chosen in the order given above.
title : str | None
Title for the figure. If None (default), equals to
``'Sensor positions (%%s)' %% ch_type``.
show_names : bool | array of str
Whether to display all channel names. If an array, only the channel
names in the array are shown. Defaults to False.
ch_groups : 'position' | array of shape (n_ch_groups, n_picks) | None
Channel groups for coloring the sensors. If None (default), default
coloring scheme is used. If 'position', the sensors are divided
into 8 regions. See ``order`` kwarg of :func:`mne.viz.plot_raw`. If
array, the channels are divided by picks given in the array.
.. versionadded:: 0.13.0
to_sphere : bool
Whether to project the 3d locations to a sphere. When False, the
sensor array appears similar as to looking downwards straight above the
subject's head. Has no effect when kind='3d'. Defaults to True.
.. versionadded:: 0.14.0
axes : instance of Axes | instance of Axes3D | None
Axes to draw the sensors to. If ``kind='3d'``, axes must be an instance
of Axes3D. If None (default), a new axes will be created.
.. versionadded:: 0.13.0
block : bool
Whether to halt program execution until the figure is closed. Defaults
to False.
.. versionadded:: 0.13.0
show : bool
Show figure if True. Defaults to True.
%(topomap_sphere_auto)s
%(verbose)s
Returns
-------
fig : instance of Figure
Figure containing the sensor topography.
selection : list
A list of selected channels. Only returned if ``kind=='select'``.
See Also
--------
mne.viz.plot_layout
Notes
-----
This function plots the sensor locations from the info structure using
matplotlib. For drawing the sensors using mayavi see
:func:`mne.viz.plot_alignment`.
.. versionadded:: 0.12.0
"""
from .evoked import _rgb
_check_option('kind', kind, ['topomap', '3d', 'select'])
if not isinstance(info, Info):
raise TypeError('info must be an instance of Info not %s' % type(info))
ch_indices = channel_indices_by_type(info)
allowed_types = _DATA_CH_TYPES_SPLIT
if ch_type is None:
for this_type in allowed_types:
if _contains_ch_type(info, this_type):
ch_type = this_type
break
picks = ch_indices[ch_type]
elif ch_type == 'all':
picks = list()
for this_type in allowed_types:
picks += ch_indices[this_type]
elif ch_type in allowed_types:
picks = ch_indices[ch_type]
else:
raise ValueError("ch_type must be one of %s not %s!" % (allowed_types,
ch_type))
if len(picks) == 0:
raise ValueError('Could not find any channels of type %s.' % ch_type)
chs = [info['chs'][pick] for pick in picks]
if not _check_ch_locs(chs):
raise RuntimeError('No valid channel positions found')
dev_head_t = info['dev_head_t']
pos = np.empty((len(chs), 3))
for ci, ch in enumerate(chs):
pos[ci] = ch['loc'][:3]
if ch['coord_frame'] == FIFF.FIFFV_COORD_DEVICE:
if dev_head_t is None:
warn('dev_head_t is None, transforming MEG sensors to head '
'coordinate frame using identity transform')
dev_head_t = np.eye(4)
pos[ci] = apply_trans(dev_head_t, pos[ci])
del dev_head_t
ch_names = np.array([ch['ch_name'] for ch in chs])
bads = [idx for idx, name in enumerate(ch_names) if name in info['bads']]
if ch_groups is None:
def_colors = _handle_default('color')
colors = ['red' if i in bads else def_colors[channel_type(info, pick)]
for i, pick in enumerate(picks)]
else:
if ch_groups in ['position', 'selection']:
# Avoid circular import
from ..channels import (read_vectorview_selection, _SELECTIONS,
_EEG_SELECTIONS, _divide_to_regions)
if ch_groups == 'position':
ch_groups = _divide_to_regions(info, add_stim=False)
ch_groups = list(ch_groups.values())
else:
ch_groups, color_vals = list(), list()
for selection in _SELECTIONS + _EEG_SELECTIONS:
channels = pick_channels(
info['ch_names'],
read_vectorview_selection(selection, info=info))
ch_groups.append(channels)
color_vals = np.ones((len(ch_groups), 4))
for idx, ch_group in enumerate(ch_groups):
color_picks = [np.where(picks == ch)[0][0] for ch in ch_group
if ch in picks]
if len(color_picks) == 0:
continue
x, y, z = pos[color_picks].T
color = np.mean(_rgb(x, y, z), axis=0)
color_vals[idx, :3] = color # mean of spatial color
else:
import matplotlib.pyplot as plt
colors = np.linspace(0, 1, len(ch_groups))
color_vals = [plt.cm.jet(colors[i]) for i in range(len(ch_groups))]
if not isinstance(ch_groups, (np.ndarray, list)):
raise ValueError("ch_groups must be None, 'position', "
"'selection', or an array. Got %s." % ch_groups)
colors = np.zeros((len(picks), 4))
for pick_idx, pick in enumerate(picks):
for ind, value in enumerate(ch_groups):
if pick in value:
colors[pick_idx] = color_vals[ind]
break
title = 'Sensor positions (%s)' % ch_type if title is None else title
fig = _plot_sensors(pos, info, picks, colors, bads, ch_names, title,
show_names, axes, show, kind, block,
to_sphere, sphere)
if kind == 'select':
return fig, fig.lasso.selection
return fig
def _onpick_sensor(event, fig, ax, pos, ch_names, show_names):
"""Pick a channel in plot_sensors."""
if event.mouseevent.inaxes != ax:
return
if event.mouseevent.key == 'control' and fig.lasso is not None:
for ind in event.ind:
fig.lasso.select_one(ind)
return
if show_names:
return # channel names already visible
ind = event.ind[0] # Just take the first sensor.
ch_name = ch_names[ind]
this_pos = pos[ind]
# XXX: Bug in matplotlib won't allow setting the position of existing
# text item, so we create a new one.
ax.texts.pop(0)
if len(this_pos) == 3:
ax.text(this_pos[0], this_pos[1], this_pos[2], ch_name)
else:
ax.text(this_pos[0], this_pos[1], ch_name)
fig.canvas.draw()
def _close_event(event, fig):
"""Listen for sensor plotter close event."""
if getattr(fig, 'lasso', None) is not None:
fig.lasso.disconnect()
def _plot_sensors(pos, info, picks, colors, bads, ch_names, title, show_names,
ax, show, kind, block, to_sphere, sphere):
"""Plot sensors."""
from matplotlib import rcParams
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D # noqa: F401 analysis:ignore
from .topomap import _get_pos_outlines, _draw_outlines
sphere = _check_sphere(sphere, info)
edgecolors = np.repeat(rcParams['axes.edgecolor'], len(colors))
edgecolors[bads] = 'red'
axes_was_none = ax is None
if axes_was_none:
subplot_kw = dict()
if kind == '3d':
subplot_kw.update(projection='3d')
fig, ax = plt.subplots(
1, figsize=(max(rcParams['figure.figsize']),) * 2,
subplot_kw=subplot_kw)
else:
fig = ax.get_figure()
if kind == '3d':
ax.text(0, 0, 0, '', zorder=1)
ax.scatter(pos[:, 0], pos[:, 1], pos[:, 2], picker=True, c=colors,
s=75, edgecolor=edgecolors, linewidth=2)
ax.azim = 90
ax.elev = 0
ax.xaxis.set_label_text('x (m)')
ax.yaxis.set_label_text('y (m)')
ax.zaxis.set_label_text('z (m)')
else: # kind in 'select', 'topomap'
ax.text(0, 0, '', zorder=1)
pos, outlines = _get_pos_outlines(info, picks, sphere,
to_sphere=to_sphere)
_draw_outlines(ax, outlines)
pts = ax.scatter(pos[:, 0], pos[:, 1], picker=True, clip_on=False,
c=colors, edgecolors=edgecolors, s=25, lw=2)
if kind == 'select':
fig.lasso = SelectFromCollection(ax, pts, ch_names)
else:
fig.lasso = None
# Equal aspect for 3D looks bad, so only use for 2D
ax.set(aspect='equal')
if axes_was_none: # we'll show the plot title as the window title
fig.subplots_adjust(left=0, bottom=0, right=1, top=1)
ax.axis("off") # remove border around figure
del sphere
connect_picker = True
if show_names:
if isinstance(show_names, (list, np.ndarray)): # only given channels
indices = [list(ch_names).index(name) for name in show_names]
else: # all channels
indices = range(len(pos))
for idx in indices:
this_pos = pos[idx]
if kind == '3d':
ax.text(this_pos[0], this_pos[1], this_pos[2], ch_names[idx])
else:
ax.text(this_pos[0] + 0.0025, this_pos[1], ch_names[idx],
ha='left', va='center')
connect_picker = (kind == 'select')
if connect_picker:
picker = partial(_onpick_sensor, fig=fig, ax=ax, pos=pos,
ch_names=ch_names, show_names=show_names)
fig.canvas.mpl_connect('pick_event', picker)
if axes_was_none:
_set_window_title(fig, title)
closed = partial(_close_event, fig=fig)
fig.canvas.mpl_connect('close_event', closed)
plt_show(show, block=block)
return fig
def _compute_scalings(scalings, inst, remove_dc=False, duration=10):
"""Compute scalings for each channel type automatically.
Parameters
----------
scalings : dict
The scalings for each channel type. If any values are
'auto', this will automatically compute a reasonable
scaling for that channel type. Any values that aren't
'auto' will not be changed.
inst : instance of Raw or Epochs
The data for which you want to compute scalings. If data
is not preloaded, this will read a subset of times / epochs
up to 100mb in size in order to compute scalings.
remove_dc : bool
Whether to remove the mean (DC) before calculating the scalings. If
True, the mean will be computed and subtracted for short epochs in
order to compensate not only for global mean offset, but also for slow
drifts in the signals.
duration : float
If remove_dc is True, the mean will be computed and subtracted on
segments of length ``duration`` seconds.
Returns
-------
scalings : dict
A scalings dictionary with updated values
"""
from ..io.base import BaseRaw
from ..epochs import BaseEpochs
scalings = _handle_default('scalings_plot_raw', scalings)
if not isinstance(inst, (BaseRaw, BaseEpochs)):
raise ValueError('Must supply either Raw or Epochs')
ch_types = channel_indices_by_type(inst.info)
ch_types = {i_type: i_ixs
for i_type, i_ixs in ch_types.items() if len(i_ixs) != 0}
scalings = deepcopy(scalings)
if inst.preload is False:
if isinstance(inst, BaseRaw):
# Load a window of data from the center up to 100mb in size
n_times = 1e8 // (len(inst.ch_names) * 8)
n_times = np.clip(n_times, 1, inst.n_times)
n_secs = n_times / float(inst.info['sfreq'])
time_middle = np.mean(inst.times)
tmin = np.clip(time_middle - n_secs / 2., inst.times.min(), None)
tmax = np.clip(time_middle + n_secs / 2., None, inst.times.max())
smin, smax = [
int(round(x * inst.info['sfreq'])) for x in (tmin, tmax)]
data = inst._read_segment(smin, smax)
elif isinstance(inst, BaseEpochs):
# Load a random subset of epochs up to 100mb in size
n_epochs = 1e8 // (len(inst.ch_names) * len(inst.times) * 8)
n_epochs = int(np.clip(n_epochs, 1, len(inst)))
ixs_epochs = np.random.choice(range(len(inst)), n_epochs, False)
inst = inst.copy()[ixs_epochs].load_data()
else:
data = inst._data
if isinstance(inst, BaseEpochs):
data = inst._data.swapaxes(0, 1).reshape([len(inst.ch_names), -1])
# Iterate through ch types and update scaling if ' auto'
for key, value in scalings.items():
if key not in ch_types:
continue
if not (isinstance(value, str) and value == 'auto'):
try:
scalings[key] = float(value)
except Exception:
raise ValueError(
f'scalings must be "auto" or float, got scalings[{key!r}]='
f'{value!r} which could not be converted to float')
continue
this_data = data[ch_types[key]]
if remove_dc and (this_data.shape[1] / inst.info["sfreq"] >= duration):
length = int(duration * inst.info["sfreq"]) # segment length
# truncate data so that we can divide into segments of equal length
this_data = this_data[:, :this_data.shape[1] // length * length]
shape = this_data.shape # original shape
this_data = this_data.T.reshape(-1, length, shape[0]) # segment
this_data -= np.nanmean(this_data, 0) # subtract segment means
this_data = this_data.T.reshape(shape) # reshape into original
this_data = this_data.ravel()
this_data = this_data[np.isfinite(this_data)]
if this_data.size:
iqr = np.diff(np.percentile(this_data, [25, 75]))[0]
else:
iqr = 1.
scalings[key] = iqr
return scalings
def _setup_cmap(cmap, n_axes=1, norm=False):
"""Set color map interactivity."""
if cmap == 'interactive':
cmap = ('Reds' if norm else 'RdBu_r', True)
elif not isinstance(cmap, tuple):
if cmap is None:
cmap = 'Reds' if norm else 'RdBu_r'
cmap = (cmap, False if n_axes > 2 else True)
return cmap
def _prepare_joint_axes(n_maps, figsize=None):
"""Prepare axes for topomaps and colorbar in joint plot figure.
Parameters
----------
n_maps: int
Number of topomaps to include in the figure
figsize: tuple
Figure size, see plt.figsize
Returns
-------
fig : matplotlib.figure.Figure
Figure with initialized axes
main_ax: matplotlib.axes._subplots.AxesSubplot
Axes in which to put the main plot
map_ax: list
List of axes for each topomap
cbar_ax: matplotlib.axes._subplots.AxesSubplot
Axes for colorbar next to topomaps
"""
import matplotlib.pyplot as plt
fig = plt.figure(figsize=figsize)
main_ax = fig.add_subplot(212)
ts = n_maps + 2
map_ax = [plt.subplot(4, ts, x + 2 + ts) for x in range(n_maps)]
# Position topomap subplots on the second row, starting on the
# second column
cbar_ax = plt.subplot(4, 5 * (ts + 1), 10 * (ts + 1))
# Position colorbar at the very end of a more finely divided
# second row of subplots
return fig, main_ax, map_ax, cbar_ax
class DraggableColorbar(object):
"""Enable interactive colorbar.
See http://www.ster.kuleuven.be/~pieterd/python/html/plotting/interactive_colorbar.html
""" # noqa: E501
def __init__(self, cbar, mappable):
import matplotlib.pyplot as plt
self.cbar = cbar
self.mappable = mappable
self.press = None
self.cycle = sorted([i for i in dir(plt.cm) if
hasattr(getattr(plt.cm, i), 'N')])
self.cycle += [mappable.get_cmap().name]
self.index = self.cycle.index(mappable.get_cmap().name)
self.lims = (self.cbar.norm.vmin, self.cbar.norm.vmax)
self.connect()
def connect(self):
"""Connect to all the events we need."""
self.cidpress = self.cbar.ax.figure.canvas.mpl_connect(
'button_press_event', self.on_press)
self.cidrelease = self.cbar.ax.figure.canvas.mpl_connect(
'button_release_event', self.on_release)
self.cidmotion = self.cbar.ax.figure.canvas.mpl_connect(
'motion_notify_event', self.on_motion)
self.keypress = self.cbar.ax.figure.canvas.mpl_connect(
'key_press_event', self.key_press)
self.scroll = self.cbar.ax.figure.canvas.mpl_connect(
'scroll_event', self.on_scroll)
def on_press(self, event):
"""Handle button press."""
if event.inaxes != self.cbar.ax:
return
self.press = event.y
def key_press(self, event):
"""Handle key press."""
# print(event.key)
scale = self.cbar.norm.vmax - self.cbar.norm.vmin
perc = 0.03
if event.key == 'down':
self.index += 1
elif event.key == 'up':
self.index -= 1
elif event.key == ' ': # space key resets scale
self.cbar.norm.vmin = self.lims[0]
self.cbar.norm.vmax = self.lims[1]
elif event.key == '+':
self.cbar.norm.vmin -= (perc * scale) * -1
self.cbar.norm.vmax += (perc * scale) * -1
elif event.key == '-':
self.cbar.norm.vmin -= (perc * scale) * 1
self.cbar.norm.vmax += (perc * scale) * 1
elif event.key == 'pageup':
self.cbar.norm.vmin -= (perc * scale) * 1
self.cbar.norm.vmax -= (perc * scale) * 1
elif event.key == 'pagedown':
self.cbar.norm.vmin -= (perc * scale) * -1
self.cbar.norm.vmax -= (perc * scale) * -1
else:
return
if self.index < 0:
self.index = len(self.cycle) - 1
elif self.index >= len(self.cycle):
self.index = 0
cmap = self.cycle[self.index]
self.cbar.mappable.set_cmap(cmap)
self.cbar.draw_all()
self.mappable.set_cmap(cmap)
self._update()
def on_motion(self, event):
"""Handle mouse movements."""
if self.press is None:
return
if event.inaxes != self.cbar.ax:
return
yprev = self.press
dy = event.y - yprev
self.press = event.y
scale = self.cbar.norm.vmax - self.cbar.norm.vmin
perc = 0.03
if event.button == 1:
self.cbar.norm.vmin -= (perc * scale) * np.sign(dy)
self.cbar.norm.vmax -= (perc * scale) * np.sign(dy)
elif event.button == 3:
self.cbar.norm.vmin -= (perc * scale) * np.sign(dy)
self.cbar.norm.vmax += (perc * scale) * np.sign(dy)
self._update()
def on_release(self, event):
"""Handle release."""
self.press = None
self._update()
def on_scroll(self, event):
"""Handle scroll."""
scale = 1.1 if event.step < 0 else 1. / 1.1
self.cbar.norm.vmin *= scale
self.cbar.norm.vmax *= scale
self._update()
def _update(self):
from matplotlib.ticker import AutoLocator
self.cbar.set_ticks(AutoLocator())
self.cbar.update_ticks()
self.cbar.draw_all()
self.mappable.set_norm(self.cbar.norm)
self.cbar.ax.figure.canvas.draw()
class SelectFromCollection(object):
"""Select channels from a matplotlib collection using ``LassoSelector``.
Selected channels are saved in the ``selection`` attribute. This tool
highlights selected points by fading other points out (i.e., reducing their
alpha values).
Parameters
----------
ax : instance of Axes
Axes to interact with.
collection : instance of matplotlib collection
Collection you want to select from.
alpha_other : 0 <= float <= 1
To highlight a selection, this tool sets all selected points to an
alpha value of 1 and non-selected points to ``alpha_other``.
Defaults to 0.3.
linewidth_other : float
Linewidth to use for non-selected sensors. Default is 1.
Notes
-----
This tool selects collection objects based on their *origins*
(i.e., ``offsets``). Emits mpl event 'lasso_event' when selection is ready.
"""
def __init__(self, ax, collection, ch_names, alpha_other=0.5,
linewidth_other=0.5, alpha_selected=1, linewidth_selected=1):
from matplotlib import __version__
if LooseVersion(__version__) < LooseVersion('1.2.1'):
raise ImportError('Interactive selection not possible for '
'matplotlib versions < 1.2.1. Upgrade '
'matplotlib.')
from matplotlib.widgets import LassoSelector
self.canvas = ax.figure.canvas
self.collection = collection
self.ch_names = ch_names
self.alpha_other = alpha_other
self.linewidth_other = linewidth_other
self.alpha_selected = alpha_selected
self.linewidth_selected = linewidth_selected
self.xys = collection.get_offsets()
self.Npts = len(self.xys)
# Ensure that we have separate colors for each object
self.fc = collection.get_facecolors()
self.ec = collection.get_edgecolors()
self.lw = collection.get_linewidths()
if len(self.fc) == 0:
raise ValueError('Collection must have a facecolor')
elif len(self.fc) == 1:
self.fc = np.tile(self.fc, self.Npts).reshape(self.Npts, -1)
self.ec = np.tile(self.ec, self.Npts).reshape(self.Npts, -1)
self.fc[:, -1] = self.alpha_other # deselect in the beginning
self.ec[:, -1] = self.alpha_other
self.lw = np.full(self.Npts, self.linewidth_other)
line_kw = _prop_kw('line', dict(color='red', linewidth=0.5))
self.lasso = LassoSelector(ax, onselect=self.on_select, **line_kw)
self.selection = list()
def on_select(self, verts):
"""Select a subset from the collection."""
from matplotlib.path import Path
if len(verts) <= 3: # Seems to be a good way to exclude single clicks.
return
path = Path(verts)
inds = np.nonzero([path.contains_point(xy) for xy in self.xys])[0]
if self.canvas._key == 'control': # Appending selection.
sels = [np.where(self.ch_names == c)[0][0] for c in self.selection]
inters = set(inds) - set(sels)
inds = list(inters.union(set(sels) - set(inds)))
self.selection[:] = np.array(self.ch_names)[inds].tolist()
self.style_sensors(inds)
self.canvas.callbacks.process('lasso_event')
def select_one(self, ind):
"""Select or deselect one sensor."""
ch_name = self.ch_names[ind]
if ch_name in self.selection:
sel_ind = self.selection.index(ch_name)
self.selection.pop(sel_ind)
else:
self.selection.append(ch_name)
inds = np.in1d(self.ch_names, self.selection).nonzero()[0]
self.style_sensors(inds)
self.canvas.callbacks.process('lasso_event')
def select_many(self, inds):
"""Select many sensors using indices (for predefined selections)."""
self.selection[:] = np.array(self.ch_names)[inds].tolist()
self.style_sensors(inds)
def style_sensors(self, inds):
"""Style selected sensors as "active"."""
# reset
self.fc[:, -1] = self.alpha_other
self.ec[:, -1] = self.alpha_other / 2
self.lw[:] = self.linewidth_other
# style sensors at `inds`
self.fc[inds, -1] = self.alpha_selected
self.ec[inds, -1] = self.alpha_selected
self.lw[inds] = self.linewidth_selected
self.collection.set_facecolors(self.fc)
self.collection.set_edgecolors(self.ec)
self.collection.set_linewidths(self.lw)
self.canvas.draw_idle()
def disconnect(self):
"""Disconnect the lasso selector."""
self.lasso.disconnect_events()
self.fc[:, -1] = self.alpha_selected
self.ec[:, -1] = self.alpha_selected
self.collection.set_facecolors(self.fc)
self.collection.set_edgecolors(self.ec)
self.canvas.draw_idle()
def _get_color_list(annotations=False):
"""Get the current color list from matplotlib rcParams.
Parameters
----------
annotations : boolean
Has no influence on the function if false. If true, check if color
"red" (#ff0000) is in the cycle and remove it.
Returns
-------
colors : list
"""
from matplotlib import rcParams
color_cycle = rcParams.get('axes.prop_cycle')
if not color_cycle:
# Use deprecated color_cycle to avoid KeyErrors in environments
# with Python 2.7 and Matplotlib < 1.5
# this will already be a list
colors = rcParams.get('axes.color_cycle')
else:
# we were able to use the prop_cycle. Now just convert to list
colors = color_cycle.by_key()['color']
# If we want annotations, red is reserved ... remove if present. This
# checks for the reddish color in MPL dark background style, normal style,
# and MPL "red", and defaults to the last of those if none are present
for red in ('#fa8174', '#d62728', '#ff0000'):
if annotations and red in colors:
colors.remove(red)
break
return (colors, red) if annotations else colors
def _merge_annotations(start, stop, description, annotations, current=()):
"""Handle drawn annotations."""
ends = annotations.onset + annotations.duration
idx = np.intersect1d(np.where(ends >= start)[0],
np.where(annotations.onset <= stop)[0])
idx = np.intersect1d(idx,
np.where(annotations.description == description)[0])
new_idx = np.setdiff1d(idx, current) # don't include modified annotation
end = max(np.append((annotations.onset[new_idx] +
annotations.duration[new_idx]), stop))
onset = min(np.append(annotations.onset[new_idx], start))
duration = end - onset
annotations.delete(idx)
annotations.append(onset, duration, description)
def _connection_line(x, fig, sourceax, targetax, y=1.,
y_source_transform="transAxes"):
"""Connect source and target plots with a line.
Connect source and target plots with a line, such as time series
(source) and topolots (target). Primarily used for plot_joint
functions.
"""
from matplotlib.lines import Line2D
trans_fig = fig.transFigure
trans_fig_inv = fig.transFigure.inverted()
xt, yt = trans_fig_inv.transform(targetax.transAxes.transform([.5, 0.]))
xs, _ = trans_fig_inv.transform(sourceax.transData.transform([x, 0.]))
_, ys = trans_fig_inv.transform(getattr(sourceax, y_source_transform
).transform([0., y]))
return Line2D((xt, xs), (yt, ys), transform=trans_fig, color='grey',
linestyle='-', linewidth=1.5, alpha=.66, zorder=1,
clip_on=False)
class DraggableLine(object):
"""Custom matplotlib line for moving around by drag and drop.
Parameters
----------
line : instance of matplotlib Line2D
Line to add interactivity to.
callback : function
Callback to call when line is released.
"""
def __init__(self, line, modify_callback, drag_callback):
self.line = line
self.press = None
self.x0 = line.get_xdata()[0]
self.modify_callback = modify_callback
self.drag_callback = drag_callback
self.cidpress = self.line.figure.canvas.mpl_connect(
'button_press_event', self.on_press)
self.cidrelease = self.line.figure.canvas.mpl_connect(
'button_release_event', self.on_release)
self.cidmotion = self.line.figure.canvas.mpl_connect(
'motion_notify_event', self.on_motion)
def set_x(self, x):
"""Repoisition the line."""
self.line.set_xdata([x, x])
self.x0 = x
def on_press(self, event):
"""Store button press if on top of the line."""
if event.inaxes != self.line.axes or not self.line.contains(event)[0]:
return
x0 = self.line.get_xdata()
y0 = self.line.get_ydata()
self.press = x0, y0, event.xdata, event.ydata
def on_motion(self, event):
"""Move the line on drag."""
if self.press is None:
return
if event.inaxes != self.line.axes:
return
x0, y0, xpress, ypress = self.press
dx = event.xdata - xpress
self.line.set_xdata(x0 + dx)
self.drag_callback((x0 + dx)[0])
self.line.figure.canvas.draw()
def on_release(self, event):
"""Handle release."""
if event.inaxes != self.line.axes or self.press is None:
return
self.press = None
self.line.figure.canvas.draw()
self.modify_callback(self.x0, event.xdata)
self.x0 = event.xdata
def remove(self):
"""Remove the line."""
self.line.figure.canvas.mpl_disconnect(self.cidpress)
self.line.figure.canvas.mpl_disconnect(self.cidrelease)
self.line.figure.canvas.mpl_disconnect(self.cidmotion)
self.line.remove()
def _setup_ax_spines(axes, vlines, xmin, xmax, ymin, ymax, invert_y=False,
unit=None, truncate_xaxis=True, truncate_yaxis=True,
skip_axlabel=False, hline=True):
# don't show zero line if it coincides with x-axis (even if hline=True)
if hline and ymin != 0.:
axes.spines['top'].set_position('zero')
else:
axes.spines['top'].set_visible(False)
# the axes can become very small with topo plotting. This prevents the
# x-axis from shrinking to length zero if truncate_xaxis=True, by adding
# new ticks that are nice round numbers close to (but less extreme than)
# xmin and xmax
vlines = [] if vlines is None else vlines
xticks = _trim_ticks(axes.get_xticks(), xmin, xmax)
xticks = np.array(sorted(set([x for x in xticks] + vlines)))
if len(xticks) < 2:
def log_fix(tval):
exp = np.log10(np.abs(tval))
return np.sign(tval) * 10 ** (np.fix(exp) - (exp < 0))
xlims = np.array([xmin, xmax])
temp_ticks = log_fix(xlims)
closer_idx = np.argmin(np.abs(xlims - temp_ticks))
further_idx = np.argmax(np.abs(xlims - temp_ticks))
start_stop = [temp_ticks[closer_idx], xlims[further_idx]]
step = np.sign(np.diff(start_stop)) * np.max(np.abs(temp_ticks))
tts = np.arange(*start_stop, step)
xticks = np.array(sorted(xticks + [tts[0], tts[-1]]))
axes.set_xticks(xticks)
# y-axis is simpler
yticks = _trim_ticks(axes.get_yticks(), ymin, ymax)
axes.set_yticks(yticks)
# truncation case 1: truncate both
if truncate_xaxis and truncate_yaxis:
axes.spines['bottom'].set_bounds(*xticks[[0, -1]])
axes.spines['left'].set_bounds(*yticks[[0, -1]])
# case 2: truncate only x (only right side; connect to y at left)
elif truncate_xaxis:
xbounds = np.array(axes.get_xlim())
xbounds[1] = axes.get_xticks()[-1]
axes.spines['bottom'].set_bounds(*xbounds)
# case 3: truncate only y (only top; connect to x at bottom)
elif truncate_yaxis:
ybounds = np.array(axes.get_ylim())
if invert_y:
ybounds[0] = axes.get_yticks()[0]
else:
ybounds[1] = axes.get_yticks()[-1]
axes.spines['left'].set_bounds(*ybounds)
# handle axis labels
if skip_axlabel:
axes.set_yticklabels([''] * len(yticks))
axes.set_xticklabels([''] * len(xticks))
else:
if unit is not None:
axes.set_ylabel(unit, rotation=90)
axes.set_xlabel('Time (s)')
# plot vertical lines
if vlines:
_ymin, _ymax = axes.get_ylim()
axes.vlines(vlines, _ymax, _ymin, linestyles='--', colors='k',
linewidth=1., zorder=1)
# invert?
if invert_y:
axes.invert_yaxis()
# changes we always make:
axes.tick_params(direction='out')
axes.tick_params(right=False)
axes.spines['right'].set_visible(False)
axes.spines['left'].set_zorder(0)
def _handle_decim(info, decim, lowpass):
"""Handle decim parameter for plotters."""
from ..evoked import _check_decim
from ..utils import _ensure_int
if isinstance(decim, str) and decim == 'auto':
lp = info['sfreq'] if info['lowpass'] is None else info['lowpass']
lp = min(lp, info['sfreq'] if lowpass is None else lowpass)
info['lowpass'] = lp
decim = max(int(info['sfreq'] / (lp * 3) + 1e-6), 1)
decim = _ensure_int(decim, 'decim', must_be='an int or "auto"')
if decim <= 0:
raise ValueError('decim must be "auto" or a positive integer, got %s'
% (decim,))
decim = _check_decim(info, decim, 0)[0]
data_picks = _pick_data_channels(info, exclude=())
return decim, data_picks
def _setup_plot_projector(info, noise_cov, proj=True, use_noise_cov=True,
nave=1):
from ..cov import compute_whitener
projector = np.eye(len(info['ch_names']))
whitened_ch_names = []
if noise_cov is not None and use_noise_cov:
# any channels in noise_cov['bads'] but not in info['bads'] get
# set to nan, which means that they are not plotted.
data_picks = _pick_data_channels(info, with_ref_meg=False, exclude=())
data_names = {info['ch_names'][pick] for pick in data_picks}
# these can be toggled by the user
bad_names = set(info['bads'])
# these can't in standard pipelines be enabled (we always take the
# union), so pretend they're not in cov at all
cov_names = ((set(noise_cov['names']) & set(info['ch_names'])) -
set(noise_cov['bads']))
# Actually compute the whitener only using the difference
whiten_names = cov_names - bad_names
whiten_picks = pick_channels(info['ch_names'], whiten_names)
whiten_info = pick_info(info, whiten_picks)
rank = _triage_rank_sss(whiten_info, [noise_cov])[1][0]
whitener, whitened_ch_names = compute_whitener(
noise_cov, whiten_info, rank=rank, verbose=False)
whitener *= np.sqrt(nave) # proper scaling for Evoked data
assert set(whitened_ch_names) == whiten_names
projector[whiten_picks, whiten_picks[:, np.newaxis]] = whitener
# Now we need to change the set of "whitened" channels to include
# all data channel names so that they are properly italicized.
whitened_ch_names = data_names
# We would need to set "bad_picks" to identity to show the traces
# (but in gray), but here we don't need to because "projector"
# starts out as identity. So all that is left to do is take any
# *good* data channels that are not in the noise cov to be NaN
nan_names = data_names - (bad_names | cov_names)
# XXX conditional necessary because of annoying behavior of
# pick_channels where an empty list means "all"!
if len(nan_names) > 0:
nan_picks = pick_channels(info['ch_names'], nan_names)
projector[nan_picks] = np.nan
elif proj:
projector, _ = setup_proj(info, add_eeg_ref=False, verbose=False)
return projector, whitened_ch_names
def _check_sss(info):
"""Check SSS history in info."""
ch_used = [ch for ch in _DATA_CH_TYPES_SPLIT
if _contains_ch_type(info, ch)]
has_meg = 'mag' in ch_used and 'grad' in ch_used
has_sss = (has_meg and len(info['proc_history']) > 0 and
info['proc_history'][0].get('max_info') is not None)
return ch_used, has_meg, has_sss
def _triage_rank_sss(info, covs, rank=None, scalings=None):
rank = dict() if rank is None else rank
scalings = _handle_default('scalings_cov_rank', scalings)
# Only look at good channels
picks = _pick_data_channels(info, with_ref_meg=False, exclude='bads')
info = pick_info(info, picks)
ch_used, has_meg, has_sss = _check_sss(info)
if has_sss:
if 'mag' in rank or 'grad' in rank:
raise ValueError('When using SSS, pass "meg" to set the rank '
'(separate rank values for "mag" or "grad" are '
'meaningless).')
elif 'meg' in rank:
raise ValueError('When not using SSS, pass separate rank values '
'for "mag" and "grad" (do not use "meg").')
picks_list = _picks_by_type(info, meg_combined=has_sss)
if has_sss:
# reduce ch_used to combined mag grad
ch_used = list(zip(*picks_list))[0]
# order pick list by ch_used (required for compat with plot_evoked)
picks_list = [x for x, y in sorted(zip(picks_list, ch_used))]
n_ch_used = len(ch_used)
# make sure we use the same rank estimates for GFP and whitening
picks_list2 = [k for k in picks_list]
# add meg picks if needed.
if has_meg:
# append ("meg", picks_meg)
picks_list2 += _picks_by_type(info, meg_combined=True)
rank_list = [] # rank dict for each cov
for cov in covs:
# We need to add the covariance projectors, compute the projector,
# and apply it, just like we will do in prepare_noise_cov, otherwise
# we risk the rank estimates being incorrect (i.e., if the projectors
# do not match).
info_proj = info.copy()
info_proj['projs'] += cov['projs']
this_rank = {}
# assemble rank dict for this cov, such that we have meg
for ch_type, this_picks in picks_list2:
# if we have already estimates / values for mag/grad but not
# a value for meg, combine grad and mag.
if ('mag' in this_rank and 'grad' in this_rank and
'meg' not in rank):
this_rank['meg'] = this_rank['mag'] + this_rank['grad']
# and we're done here
break
if rank.get(ch_type) is None:
ch_names = [info['ch_names'][pick] for pick in this_picks]
this_C = pick_channels_cov(cov, ch_names)
this_estimated_rank = compute_rank(
this_C, scalings=scalings, info=info_proj)[ch_type]
this_rank[ch_type] = this_estimated_rank
elif rank.get(ch_type) is not None:
this_rank[ch_type] = rank[ch_type]
rank_list.append(this_rank)
return n_ch_used, rank_list, picks_list, has_sss
def _check_cov(noise_cov, info):
"""Check the noise_cov for whitening and issue an SSS warning."""
from ..cov import read_cov, Covariance
if noise_cov is None:
return None
if isinstance(noise_cov, str):
noise_cov = read_cov(noise_cov)
if not isinstance(noise_cov, Covariance):
raise TypeError('noise_cov must be a str or Covariance, got %s'
% (type(noise_cov),))
if _check_sss(info)[2]: # has_sss
warn('Data have been processed with SSS, which changes the relative '
'scaling of magnetometers and gradiometers when viewing data '
'whitened by a noise covariance')
return noise_cov
def _set_title_multiple_electrodes(title, combine, ch_names, max_chans=6,
all=False, ch_type=None):
"""Prepare a title string for multiple electrodes."""
if title is None:
title = ", ".join(ch_names[:max_chans])
ch_type = _channel_type_prettyprint.get(ch_type, ch_type)
if ch_type is None:
ch_type = "sensor"
if len(ch_names) > 1:
ch_type += "s"
if all is True and isinstance(combine, str):
combine = combine.capitalize()
title = "{} of {} {}".format(
combine, len(ch_names), ch_type)
elif len(ch_names) > max_chans and combine != "gfp":
logger.info("More than {} channels, truncating title ...".format(
max_chans))
title += ", ...\n({} of {} {})".format(
combine, len(ch_names), ch_type,)
return title
def _check_time_unit(time_unit, times):
if not isinstance(time_unit, str):
raise TypeError('time_unit must be str, got %s' % (type(time_unit),))
if time_unit == 's':
pass
elif time_unit == 'ms':
times = 1e3 * times
else:
raise ValueError("time_unit must be 's' or 'ms', got %r" % time_unit)
return time_unit, times
def _plot_masked_image(ax, data, times, mask=None, yvals=None,
cmap="RdBu_r", vmin=None, vmax=None, ylim=None,
mask_style="both", mask_alpha=.25, mask_cmap="Greys",
yscale="linear"):
"""Plot a potentially masked (evoked, TFR, ...) 2D image."""
from matplotlib import ticker, __version__ as mpl_version
if mask_style is None and mask is not None:
mask_style = "both" # default
draw_mask = mask_style in {"both", "mask"}
draw_contour = mask_style in {"both", "contour"}
if cmap is None:
mask_cmap = cmap
# mask param check and preparation
if draw_mask is None:
if mask is not None:
draw_mask = True
else:
draw_mask = False
if draw_contour is None:
if mask is not None:
draw_contour = True
else:
draw_contour = False
if mask is None:
if draw_mask:
warn("`mask` is None, not masking the plot ...")
draw_mask = False
if draw_contour:
warn("`mask` is None, not adding contour to the plot ...")
draw_contour = False
if draw_mask:
if mask.shape != data.shape:
raise ValueError(
"The mask must have the same shape as the data, "
"i.e., %s, not %s" % (data.shape, mask.shape))
if draw_contour and yscale == "log":
warn("Cannot draw contours with linear yscale yet ...")
if yvals is None: # for e.g. Evoked images
yvals = np.arange(data.shape[0])
# else, if TFR plot, yvals will be freqs
# test yscale
if yscale == 'log' and not yvals[0] > 0:
raise ValueError('Using log scale for frequency axis requires all your'
' frequencies to be positive (you cannot include'
' the DC component (0 Hz) in the TFR).')
if len(yvals) < 2 or yvals[0] == 0:
yscale = 'linear'
elif yscale != 'linear':
ratio = yvals[1:] / yvals[:-1]
if yscale == 'auto':
if yvals[0] > 0 and np.allclose(ratio, ratio[0]):
yscale = 'log'
else:
yscale = 'linear'
# https://github.com/matplotlib/matplotlib/pull/9477
if yscale == "log" and mpl_version == "2.1.0":
warn("With matplotlib version 2.1.0, lines may not show up in "
"`AverageTFR.plot_joint`. Upgrade to a more recent version.")
if yscale == "log": # pcolormesh for log scale
# compute bounds between time samples
time_lims, = centers_to_edges(times)
log_yvals = np.concatenate([[yvals[0] / ratio[0]], yvals,
[yvals[-1] * ratio[0]]])
yval_lims = np.sqrt(log_yvals[:-1] * log_yvals[1:])
# construct a time-yvaluency bounds grid
time_mesh, yval_mesh = np.meshgrid(time_lims, yval_lims)
if mask is not None:
ax.pcolormesh(time_mesh, yval_mesh, data, cmap=mask_cmap,
vmin=vmin, vmax=vmax, alpha=mask_alpha)
im = ax.pcolormesh(time_mesh, yval_mesh,
np.ma.masked_where(~mask, data), cmap=cmap,
vmin=vmin, vmax=vmax, alpha=1)
else:
im = ax.pcolormesh(time_mesh, yval_mesh, data, cmap=cmap,
vmin=vmin, vmax=vmax)
if ylim is None:
ylim = yval_lims[[0, -1]]
if yscale == 'log':
ax.set_yscale('log')
ax.get_yaxis().set_major_formatter(ticker.ScalarFormatter())
ax.yaxis.set_minor_formatter(ticker.NullFormatter())
# get rid of minor ticks
ax.yaxis.set_minor_locator(ticker.NullLocator())
tick_vals = yvals[np.unique(np.linspace(
0, len(yvals) - 1, 12).round().astype('int'))]
ax.set_yticks(tick_vals)
else:
# imshow for linear because the y ticks are nicer
# and the masked areas look better
dt = np.median(np.diff(times)) / 2. if len(times) > 1 else 0.1
dy = np.median(np.diff(yvals)) / 2. if len(yvals) > 1 else 0.5
extent = [times[0] - dt, times[-1] + dt,
yvals[0] - dy, yvals[-1] + dy]
im_args = dict(interpolation='nearest', origin='lower',
extent=extent, aspect='auto', vmin=vmin, vmax=vmax)
if draw_mask:
ax.imshow(data, alpha=mask_alpha, cmap=mask_cmap, **im_args)
im = ax.imshow(
np.ma.masked_where(~mask, data), cmap=cmap, **im_args)
else:
ax.imshow(data, cmap=cmap, **im_args) # see #6481
im = ax.imshow(data, cmap=cmap, **im_args)
if draw_contour and np.unique(mask).size == 2:
big_mask = np.kron(mask, np.ones((10, 10)))
ax.contour(big_mask, colors=["k"], extent=extent,
linewidths=[.75], corner_mask=False,
antialiased=False, levels=[.5])
time_lims = [extent[0], extent[1]]
if ylim is None:
ylim = [extent[2], extent[3]]
ax.set_xlim(time_lims[0], time_lims[-1])
ax.set_ylim(ylim)
if (draw_mask or draw_contour) and mask is not None:
if mask.all():
t_end = ", all points masked)"
else:
fraction = 1 - (np.float64(mask.sum()) / np.float64(mask.size))
t_end = ", %0.3g%% of points masked)" % (fraction * 100,)
else:
t_end = ")"
return im, t_end
@fill_doc
def _make_combine_callable(combine):
"""Convert None or string values of ``combine`` into callables.
Params
------
%(combine)s
If callable, the callable must accept one positional input (data of
shape ``(n_epochs, n_channels, n_times)`` or ``(n_evokeds, n_channels,
n_times)``) and return an :class:`array <numpy.ndarray>` of shape
``(n_epochs, n_times)`` or ``(n_evokeds, n_times)``.
"""
if combine is None:
combine = partial(np.squeeze, axis=1)
elif isinstance(combine, str):
combine_dict = {key: partial(getattr(np, key), axis=1)
for key in ('mean', 'median', 'std')}
combine_dict['gfp'] = lambda data: np.sqrt((data ** 2).mean(axis=1))
try:
combine = combine_dict[combine]
except KeyError:
raise ValueError('"combine" must be None, a callable, or one of '
'"mean", "median", "std", or "gfp"; got {}'
''.format(combine))
return combine
def center_cmap(cmap, vmin, vmax, name="cmap_centered"):
"""Center given colormap (ranging from vmin to vmax) at value 0.
Parameters
----------
cmap : matplotlib.colors.Colormap
The colormap to center around 0.
vmin : float
Minimum value in the data to map to the lower end of the colormap.
vmax : float
Maximum value in the data to map to the upper end of the colormap.
name : str
Name of the new colormap. Defaults to 'cmap_centered'.
Returns
-------
cmap_centered : matplotlib.colors.Colormap
The new colormap centered around 0.
Notes
-----
This function can be used in situations where vmin and vmax are not
symmetric around zero. Normally, this results in the value zero not being
mapped to white anymore in many colormaps. Using this function, the value
zero will be mapped to white even for asymmetric positive and negative
value ranges. Note that this could also be achieved by re-normalizing a
given colormap by subclassing matplotlib.colors.Normalize as described
here:
https://matplotlib.org/users/colormapnorms.html#custom-normalization-two-linear-ranges
""" # noqa: E501
from matplotlib.colors import LinearSegmentedColormap
vzero = abs(vmin) / float(vmax - vmin)
index_old = np.linspace(0, 1, cmap.N)
index_new = np.hstack([np.linspace(0, vzero, cmap.N // 2, endpoint=False),
np.linspace(vzero, 1, cmap.N // 2)])
colors = "red", "green", "blue", "alpha"
cdict = {name: [] for name in colors}
for old, new in zip(index_old, index_new):
for color, name in zip(cmap(old), colors):
cdict[name].append((new, color, color))
return LinearSegmentedColormap(name, cdict)
def _convert_psds(psds, dB, estimate, scaling, unit, ch_names=None,
first_dim='channel'):
"""Convert PSDs to dB (if necessary) and appropriate units.
The following table summarizes the relationship between the value of
parameters ``dB`` and ``estimate``, and the type of plot and corresponding
units.
| dB | estimate | plot | units |
|-------+-------------+------+-------------------|
| True | 'power' | PSD | amp**2/Hz (dB) |
| True | 'amplitude' | ASD | amp/sqrt(Hz) (dB) |
| True | 'auto' | PSD | amp**2/Hz (dB) |
| False | 'power' | PSD | amp**2/Hz |
| False | 'amplitude' | ASD | amp/sqrt(Hz) |
| False | 'auto' | ASD | amp/sqrt(Hz) |
where amp are the units corresponding to the variable, as specified by
``unit``.
"""
_check_option('first_dim', first_dim, ['channel', 'epoch'])
where = np.where(psds.min(1) <= 0)[0]
if len(where) > 0:
# Construct a helpful error message, depending on whether the first
# dimension of `psds` are channels or epochs.
if dB:
bad_value = 'Infinite'
else:
bad_value = 'Zero'
if first_dim == 'channel':
bads = ', '.join(ch_names[ii] for ii in where)
else:
bads = ', '.join(str(ii) for ii in where)
msg = f'{bad_value} value in PSD for {first_dim}{_pl(where)} {bads}.'
if first_dim == 'channel':
msg += '\nThese channels might be dead.'
warn(msg, UserWarning)
if estimate == 'auto':
estimate = 'power' if dB else 'amplitude'
if estimate == 'amplitude':
np.sqrt(psds, out=psds)
psds *= scaling
ylabel = r'$\mathrm{%s/\sqrt{Hz}}$' % unit
else:
psds *= scaling * scaling
if '/' in unit:
unit = '(%s)' % unit
ylabel = r'$\mathrm{%s²/Hz}$' % unit
if dB:
np.log10(np.maximum(psds, np.finfo(float).tiny), out=psds)
psds *= 10
ylabel += r'$\ \mathrm{(dB)}$'
return ylabel
def _plot_psd(inst, fig, freqs, psd_list, picks_list, titles_list,
units_list, scalings_list, ax_list, make_label, color, area_mode,
area_alpha, dB, estimate, average, spatial_colors, xscale,
line_alpha, sphere, xlabels_list):
# helper function for plot_raw_psd and plot_epochs_psd
from matplotlib.ticker import ScalarFormatter
from .evoked import _plot_lines
for key, ls in zip(['lowpass', 'highpass', 'line_freq'],
['--', '--', '-.']):
if inst.info[key] is not None:
for ax in ax_list:
ax.axvline(inst.info[key], color='k', linestyle=ls,
alpha=0.25, linewidth=2, zorder=2)
if line_alpha is None:
line_alpha = 1.0 if average else 0.75
line_alpha = float(line_alpha)
ylabels = list()
for ii, (psd, picks, title, ax, scalings, units) in enumerate(zip(
psd_list, picks_list, titles_list, ax_list,
scalings_list, units_list)):
ylabel = _convert_psds(psd, dB, estimate, scalings, units,
[inst.ch_names[pi] for pi in picks])
ylabels.append(ylabel)
del ylabel
if average:
# mean across channels
psd_mean = np.mean(psd, axis=0)
if area_mode == 'std':
# std across channels
psd_std = np.std(psd, axis=0)
hyp_limits = (psd_mean - psd_std, psd_mean + psd_std)
elif area_mode == 'range':
hyp_limits = (np.min(psd, axis=0),
np.max(psd, axis=0))
else: # area_mode is None
hyp_limits = None
ax.plot(freqs, psd_mean, color=color, alpha=line_alpha,
linewidth=0.5)
if hyp_limits is not None:
ax.fill_between(freqs, hyp_limits[0], y2=hyp_limits[1],
facecolor=color, alpha=area_alpha)
if not average:
picks = np.concatenate(picks_list)
psd_list = np.concatenate(psd_list)
types = np.array(inst.get_channel_types(picks=picks))
# Needed because the data do not match the info anymore.
info = create_info([inst.ch_names[p] for p in picks],
inst.info['sfreq'], types)
info['chs'] = [inst.info['chs'][p] for p in picks]
info['dev_head_t'] = inst.info['dev_head_t']
ch_types_used = list()
for this_type in _VALID_CHANNEL_TYPES:
if this_type in types:
ch_types_used.append(this_type)
assert len(ch_types_used) == len(ax_list)
unit = ''
units = {t: yl for t, yl in zip(ch_types_used, ylabels)}
titles = {c: t for c, t in zip(ch_types_used, titles_list)}
picks = np.arange(len(psd_list))
if not spatial_colors:
spatial_colors = color
_plot_lines(psd_list, info, picks, fig, ax_list, spatial_colors,
unit, units=units, scalings=None, hline=None, gfp=False,
types=types, zorder='std', xlim=(freqs[0], freqs[-1]),
ylim=None, times=freqs, bad_ch_idx=[], titles=titles,
ch_types_used=ch_types_used, selectable=True, psd=True,
line_alpha=line_alpha, nave=None, time_unit='ms',
sphere=sphere)
for ii, (ax, xlabel) in enumerate(zip(ax_list, xlabels_list)):
ax.grid(True, linestyle=':')
if xscale == 'log':
ax.set(xscale='log')
ax.set(xlim=[freqs[1] if freqs[0] == 0 else freqs[0], freqs[-1]])
ax.get_xaxis().set_major_formatter(ScalarFormatter())
else: # xscale == 'linear'
ax.set(xlim=(freqs[0], freqs[-1]))
if make_label:
ax.set(ylabel=ylabels[ii], title=titles_list[ii])
if xlabel:
ax.set_xlabel('Frequency (Hz)')
if make_label:
fig.align_ylabels(axs=ax_list)
return fig
def _trim_ticks(ticks, _min, _max):
"""Remove ticks that are more extreme than the given limits."""
keep = np.where(np.logical_and(ticks >= _min, ticks <= _max))
return ticks[keep]
def _set_window_title(fig, title):
if fig.canvas.manager is not None:
fig.canvas.manager.set_window_title(title)
def _shorten_path_from_middle(fpath, max_len=60, replacement='...'):
"""Truncate a path from the middle by omitting complete path elements."""
from os.path import sep
if len(fpath) > max_len:
pathlist = fpath.split(sep)
# indices starting from middle, alternating sides, omitting final elem:
# range(8) → 3, 4, 2, 5, 1, 6; range(7) → 2, 3, 1, 4, 0, 5
ixs_to_trunc = list(zip(range(len(pathlist) // 2 - 1, -1, -1),
range(len(pathlist) // 2, len(pathlist) - 1)))
ixs_to_trunc = np.array(ixs_to_trunc).flatten()
for ix in ixs_to_trunc:
pathlist[ix] = replacement
truncs = (np.array(pathlist) == replacement).nonzero()[0]
newpath = sep.join(pathlist[:truncs[0]] + pathlist[truncs[-1]:])
if len(newpath) < max_len:
break
return newpath
return fpath
def centers_to_edges(*arrays):
"""Convert center points to edges.
Parameters
----------
*arrays : list of ndarray
Each input array should be 1D monotonically increasing,
and will be cast to float.
Returns
-------
arrays : list of ndarray
Given each input of shape (N,), the output will have shape (N+1,).
Examples
--------
>>> x = [0., 0.1, 0.2, 0.3]
>>> y = [20, 30, 40]
>>> centers_to_edges(x, y) # doctest: +SKIP
[array([-0.05, 0.05, 0.15, 0.25, 0.35]), array([15., 25., 35., 45.])]
"""
out = list()
for ai, arr in enumerate(arrays):
arr = np.asarray(arr, dtype=float)
_check_option(f'arrays[{ai}].ndim', arr.ndim, (1,))
if len(arr) > 1:
arr_diff = np.diff(arr) / 2.
else:
arr_diff = [abs(arr[0]) * 0.001] if arr[0] != 0 else [0.001]
out.append(np.concatenate([
[arr[0] - arr_diff[0]],
arr[:-1] + arr_diff,
[arr[-1] + arr_diff[-1]]]))
return out
def _figure_agg(**kwargs):
from matplotlib.backends.backend_agg import FigureCanvasAgg
from matplotlib.figure import Figure
fig = Figure(**kwargs)
FigureCanvasAgg(fig)
return fig
def _ndarray_to_fig(img):
"""Convert to MPL figure, adapted from matplotlib.image.imsave."""
dpi = 100
figsize = np.array(img.shape[:2][::-1]) / dpi
fig = _figure_agg(dpi=dpi, figsize=figsize, frameon=False)
fig.figimage(img, resize=True)
return fig
def _save_ndarray_img(fname, img):
"""Save an image to disk."""
from PIL import Image
Image.fromarray(img).save(fname)
def concatenate_images(images, axis=0, bgcolor='black', centered=True,
n_channels=3):
"""Concatenate a list of images.
Parameters
----------
images : list of ndarray
The list of images to concatenate.
axis : 0 or 1
The images are concatenated horizontally if 0 and vertically otherwise.
The default orientation is horizontal.
bgcolor : str | list
The color of the background. The name of the color is accepted
(e.g 'red') or a list of RGB values between 0 and 1. Defaults to
'black'.
centered : bool
If True, the images are centered. Defaults to True.
n_channels : int
Number of color channels. Can be 3 or 4. The default value is 3.
Returns
-------
img : ndarray
The concatenated image.
"""
from matplotlib.colors import colorConverter
if isinstance(bgcolor, str):
func_name = 'to_rgb' if n_channels == 3 else 'to_rgba'
bgcolor = getattr(colorConverter, func_name)(bgcolor)
bgcolor = np.asarray(bgcolor) * 255
funcs = [np.sum, np.max]
ret_shape = np.asarray([
funcs[axis]([image.shape[0] for image in images]),
funcs[1 - axis]([image.shape[1] for image in images]),
])
ret = np.zeros((ret_shape[0], ret_shape[1], n_channels), dtype=np.uint8)
ret[:, :, :] = bgcolor
ptr = np.array([0, 0])
sec = np.array([0 == axis, 1 == axis]).astype(int)
for image in images:
shape = image.shape[:-1]
dec = ptr
dec += ((ret_shape - shape) // 2) * (1 - sec) if centered else 0
ret[dec[0]:dec[0] + shape[0], dec[1]:dec[1] + shape[1], :] = image
ptr += shape * sec
return ret
def _generate_default_filename(ext=".png"):
now = datetime.now()
dt_string = now.strftime("_%Y-%m-%d_%H-%M-%S")
return "MNE" + dt_string + ext
def _prop_kw(kind, val):
# Can be removed in when we depend on matplotlib 3.4.3+
# https://github.com/matplotlib/matplotlib/pull/20585
from matplotlib.widgets import SpanSelector
pre = '' if 'props' in _get_args(SpanSelector) else kind
return {pre + 'props': val}
| 38.663713
| 144
| 0.593716
|
f5493ef1568865b4ce69891b4511017a569d3321
| 2,916
|
py
|
Python
|
murano/openstack/common/sslutils.py
|
chenyujie/hybrid-murano
|
5577bee4bd636d0de794ca928897fe6b1b69b1a4
|
[
"Apache-2.0"
] | null | null | null |
murano/openstack/common/sslutils.py
|
chenyujie/hybrid-murano
|
5577bee4bd636d0de794ca928897fe6b1b69b1a4
|
[
"Apache-2.0"
] | null | null | null |
murano/openstack/common/sslutils.py
|
chenyujie/hybrid-murano
|
5577bee4bd636d0de794ca928897fe6b1b69b1a4
|
[
"Apache-2.0"
] | 1
|
2016-04-30T07:27:52.000Z
|
2016-04-30T07:27:52.000Z
|
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import os
import ssl
from oslo_config import cfg
from murano.openstack.common._i18n import _
ssl_opts = [
cfg.StrOpt('ca_file',
help="CA certificate file to use to verify "
"connecting clients."),
cfg.StrOpt('cert_file',
help="Certificate file to use when starting "
"the server securely."),
cfg.StrOpt('key_file',
help="Private key file to use when starting "
"the server securely."),
]
CONF = cfg.CONF
config_section = 'ssl'
CONF.register_opts(ssl_opts, config_section)
def list_opts():
"""Entry point for oslo.config-generator."""
return [(config_section, copy.deepcopy(ssl_opts))]
def is_enabled():
cert_file = CONF.ssl.cert_file
key_file = CONF.ssl.key_file
ca_file = CONF.ssl.ca_file
use_ssl = cert_file or key_file
if cert_file and not os.path.exists(cert_file):
raise RuntimeError(_("Unable to find cert_file : %s") % cert_file)
if ca_file and not os.path.exists(ca_file):
raise RuntimeError(_("Unable to find ca_file : %s") % ca_file)
if key_file and not os.path.exists(key_file):
raise RuntimeError(_("Unable to find key_file : %s") % key_file)
if use_ssl and (not cert_file or not key_file):
raise RuntimeError(_("When running server in SSL mode, you must "
"specify both a cert_file and key_file "
"option value in your configuration file"))
return use_ssl
def wrap(sock):
ssl_kwargs = {
'server_side': True,
'certfile': CONF.ssl.cert_file,
'keyfile': CONF.ssl.key_file,
'cert_reqs': ssl.CERT_NONE,
}
if CONF.ssl.ca_file:
ssl_kwargs['ca_certs'] = CONF.ssl.ca_file
ssl_kwargs['cert_reqs'] = ssl.CERT_REQUIRED
return ssl.wrap_socket(sock, **ssl_kwargs)
_SSL_PROTOCOLS = {
"tlsv1": ssl.PROTOCOL_TLSv1,
"sslv23": ssl.PROTOCOL_SSLv23,
"sslv3": ssl.PROTOCOL_SSLv3
}
try:
_SSL_PROTOCOLS["sslv2"] = ssl.PROTOCOL_SSLv2
except AttributeError:
pass
def validate_ssl_version(version):
key = version.lower()
try:
return _SSL_PROTOCOLS[key]
except KeyError:
raise RuntimeError(_("Invalid SSL version : %s") % version)
| 28.588235
| 78
| 0.653978
|
7da99970aefd207e2010bf7c73c1c7647dc9b012
| 4,663
|
py
|
Python
|
pebbles/views/import_export.py
|
CSCfi/pebbles
|
24b32e8fc538cc8095fda62c892a8221346c2bce
|
[
"MIT"
] | 4
|
2017-05-11T14:50:32.000Z
|
2020-01-10T09:02:27.000Z
|
pebbles/views/import_export.py
|
CSCfi/pebbles
|
24b32e8fc538cc8095fda62c892a8221346c2bce
|
[
"MIT"
] | 145
|
2017-04-07T11:01:58.000Z
|
2019-12-11T15:30:23.000Z
|
pebbles/views/import_export.py
|
CSCfi/pebbles
|
24b32e8fc538cc8095fda62c892a8221346c2bce
|
[
"MIT"
] | 3
|
2017-10-25T12:36:16.000Z
|
2018-04-26T08:49:34.000Z
|
from flask import g
from flask_restful import fields, marshal_with
from flask import Blueprint as FlaskBlueprint
import logging
from pebbles.models import db, Blueprint, BlueprintTemplate, Plugin, Group
from pebbles.server import restful
from pebbles.views.commons import auth, requires_group_manager_or_admin
from pebbles.views.blueprint_templates import blueprint_schemaform_config
from pebbles.utils import requires_admin
from pebbles.rules import apply_rules_export_blueprints
from pebbles.forms import BlueprintImportForm, BlueprintTemplateImportForm
import_export = FlaskBlueprint('import_export', __name__)
template_export_fields = {
'name': fields.String,
'is_enabled': fields.Boolean,
'plugin_name': fields.String,
'config': fields.Raw,
'allowed_attrs': fields.Raw
}
blueprint_export_fields = {
'maximum_lifetime': fields.Integer,
'name': fields.String,
'is_enabled': fields.Boolean,
'template_name': fields.String,
'config': fields.Raw,
'group_name': fields.String
}
class ImportExportBlueprintTemplates(restful.Resource):
@auth.login_required
@requires_admin
@marshal_with(template_export_fields)
def get(self):
query = BlueprintTemplate.query
templates = query.all()
results = []
for template in templates:
plugin = Plugin.query.filter_by(id=template.plugin).first()
obj = {
'name': template.name,
'is_enabled': template.is_enabled,
'config': template.config,
'allowed_attrs': template.allowed_attrs,
'plugin_name': plugin.name
}
results.append(obj)
return results
@auth.login_required
@requires_admin
def post(self):
form = BlueprintTemplateImportForm()
if not form.validate_on_submit():
logging.warn(form.errors)
logging.warn("validation error on create blueprint")
return form.errors, 422
plugin_name = form.plugin_name.data
plugin = Plugin.query.filter_by(name=plugin_name).first()
if not plugin:
logging.warn('no plugins found with name %s', plugin_name)
return {"error": "No plugins found"}, 404
template = BlueprintTemplate()
template.name = form.name.data
template.plugin = plugin.id
template.config = form.config.data
if isinstance(form.allowed_attrs.data, dict): # WTForms can only fetch a dict
template.allowed_attrs = form.allowed_attrs.data['allowed_attrs']
template = blueprint_schemaform_config(template)
db.session.add(template)
db.session.commit()
class ImportExportBlueprints(restful.Resource):
@auth.login_required
@requires_group_manager_or_admin
@marshal_with(blueprint_export_fields)
def get(self):
user = g.user
query = apply_rules_export_blueprints(user)
blueprints = query.all()
results = []
for blueprint in blueprints:
template = BlueprintTemplate.query.filter_by(id=blueprint.template_id).first()
obj = {
'name': blueprint.name,
'maximum_lifetime': blueprint.maximum_lifetime,
'is_enabled': blueprint.is_enabled,
'config': blueprint.config,
'template_name': template.name,
'group_name': blueprint.group.name
}
results.append(obj)
return results
@auth.login_required
@requires_group_manager_or_admin
def post(self):
form = BlueprintImportForm()
if not form.validate_on_submit():
logging.warn(form.errors)
logging.warn("validation error on creating blueprints with import")
return form.errors, 422
template_name = form.template_name.data
template = BlueprintTemplate.query.filter_by(name=template_name).first()
if not template:
logging.warn('no blueprint template found with name %s', template_name)
return {"error": "No blueprint template found"}, 404
group_name = form.group_name.data
group = Group.query.filter_by(name=group_name).first()
if not group:
logging.warn('no group found with name %s', group_name)
return {"error": "No group found"}, 404
blueprint = Blueprint()
blueprint.name = form.name.data
blueprint.template_id = template.id
blueprint.group_id = group.id
blueprint.config = form.config.data
db.session.add(blueprint)
db.session.commit()
| 33.546763
| 90
| 0.655372
|
11feb2573477c189a4c64565514bbdadb9cc0144
| 6,335
|
py
|
Python
|
src/mips/instruction.py
|
IOverflow/cool-compiler-2020
|
65ee19af19973e25b79de70e0dca7721b93c82b6
|
[
"MIT"
] | null | null | null |
src/mips/instruction.py
|
IOverflow/cool-compiler-2020
|
65ee19af19973e25b79de70e0dca7721b93c82b6
|
[
"MIT"
] | null | null | null |
src/mips/instruction.py
|
IOverflow/cool-compiler-2020
|
65ee19af19973e25b79de70e0dca7721b93c82b6
|
[
"MIT"
] | null | null | null |
"""
Este modulo ofrece wrappers a las instrucciones de mips de modo que se puedan
parametrizar y sea facil su uso a la hora de escribir en ensamblador.
"""
# ********************** REGISTROS *********************************
zero = 0 # Siempre almacena la constante 0.
at = 1 # Reservado para el assembler.
# Registros para almacenar resultados
v0 = 2
v1 = 3
# Registros para almacenar argumentos
a0 = 4
a1 = 5
a2 = 6
a3 = 7
# Registros temporales
t0 = 8
t1 = 9
t2 = 10
t3 = 11
t4 = 12
t5 = 13
t6 = 14
t7 = 15
t8 = 24
t9 = 25
# Saved Registers
s0 = 16
s1 = 17
s2 = 18
s3 = 19
s4 = 20
s5 = 21
s6 = 22
s7 = 23
# Registros del kernel
k0 = 26
k1 = 27
# Global Data Pointer
gp = 28
# Stack Pointer
sp = 29
# Frame Pointer
fp = 30
# Direccion de retorno
ra = 31
TEMP_REGISTERS = (t0, t1, t2, t3, t4, t5, t6, t7, t8, t9, s2, s3, s4, s5, s6, s7, v1)
ARGS_REGISTERS = (a0, a1, a2, a3)
REG_TO_STR = {
zero: "zero",
v0: "v0",
v1: "v1",
s0: "s0",
s1: "s1",
s2: "s2",
s3: "s3",
s4: "s4",
s5: "s5",
s6: "s6",
s7: "s7",
sp: "sp",
fp: "fp",
ra: "ra",
gp: "gp",
t0: "t0",
t1: "t1",
t2: "t2",
t3: "t3",
t4: "t4",
t5: "t5",
t6: "t6",
t7: "t7",
t8: "t8",
t9: "t9",
a0: "a0",
a1: "a1",
a2: "a2",
a3: "a3",
at: "at"
}
class MipsNode:
pass
# ********************** INSTRUCCIONES ARITMETICAS *****************************
class ArithmeticNode(MipsNode):
def __init__(self, dest: int, src1: int, src2: int, const_src2=False):
self.dest = dest
self.src1 = src1
self.src2 = src2
self.action = self.__class__.__name__.lower()
self.const_src2 = const_src2
if '_' in self.action:
self.action = self.action.replace('_', "")
def __str__(self):
if self.const_src2:
return f'{self.action} ${REG_TO_STR[self.dest]}, ${REG_TO_STR[self.src1]}, {self.src2}'
return f'{self.action} ${REG_TO_STR[self.dest]}, ${REG_TO_STR[self.src1]}, ${REG_TO_STR[self.src2]}'
class BinaryNode(MipsNode):
def __init__(self, dest: int, src1: int):
self.dest = dest
self.src1 = src1
self.action = self.__class__.__name__.lower().replace('_', "")
def __str__(self):
return f'{self.action} ${REG_TO_STR[self.dest]}, ${REG_TO_STR[self.src1]}'
# ************************* INSTRUCCIONES DE COMPARACION ******************
class ComparisonNode(MipsNode):
def __init__(self, dest: int, src1: int, src2: int, const_src2=False):
self.dest = dest
self.src1 = src1
self.src2 = src2
self.action = self.__class__.__name__.lower()
self.const_src2 = const_src2
def __str__(self):
if self.const_src2:
return f'{self.action} ${REG_TO_STR[self.dest]}, ${REG_TO_STR[self.src1]}, {self.src2}'
return f'{self.action} ${REG_TO_STR[self.dest]}, ${REG_TO_STR[self.src1]}, ${REG_TO_STR[self.src2]}'
# ************************ INSTRUCCIONES DE SALTO *********************
class UnconditionalJumpNode(MipsNode):
def __init__(self, label: str):
self.label = label
self.action = self.__class__.__name__.lower()
def __str__(self):
return f'{self.action} {self.label}'
class UnconditionalJumpRegisterNode(MipsNode):
def __init__(self, src1: int):
self.src1 = src1
self.action = self.__class__.__name__.lower()
def __str__(self):
return f'{self.action} ${REG_TO_STR[self.src1]}'
class UnaryJumpNode(MipsNode):
def __init__(self, src1: int, label: str):
self.src1 = src1
self.label = label
self.action = self.__class__.__name__.lower()
def __str__(self):
return f'{self.action} ${REG_TO_STR[self.src1]}, {self.label}'
class BinaryJumpNode(MipsNode):
def __init__(self, src1: int, src2: int, label: str, const_src2=False):
self.src1 = src1
self.src2 = src2
self.label = label
self.const_src2 = const_src2
self.action = self.__class__.__name__.lower()
def __str__(self):
if self.const_src2:
return f'{self.action} ${REG_TO_STR[self.src1]}, {self.src2}, {self.label}'
return f'{self.action} ${REG_TO_STR[self.src1]}, ${REG_TO_STR[self.src2]}, {self.label}'
# ******************** INSTRUCCIONES PARA ALMACENAR Y CARGAR DATOS EN REGISTROS ************
class AbstractLoadNode(MipsNode):
def __init__(self, dest: int, src):
self.dest = dest
self.src = src
self.action = self.__class__.__name__.lower()
def __str__(self):
return f'{self.action} ${REG_TO_STR[self.dest]}, {self.src}'
class MOVE(BinaryNode):
"""
Copia el contenido de $src1 en $dest.
"""
pass
# ******************** MANEJO DE EXCEPCIONES *************************
class RFE(MipsNode):
"""
Retorna de una excepcion.
"""
def __str__(self):
return "rfe"
class SYSCALL(MipsNode):
"""
Realiza una llamada a sistema.
"""
def __str__(self):
return "syscall"
class BREAK(MipsNode):
"""
Usado por el debugger.
"""
def __init__(self, const: int):
self.const = const
def __str__(self):
return f'break {self.const}'
class NOP(MipsNode):
"""
Instruccion que no hace nada, salvo consumir un ciclo del reloj.
"""
def __str__(self):
return "nop"
class LineComment(MipsNode):
"""
Representa un comentario en una linea
"""
def __init__(self, string: str):
self.text = string
def __str__(self):
return f'# {self.text}'
class Label(MipsNode):
"""
Representa un label. (almacena una direccion de memoria a la cual se puede referenciar)
"""
def __init__(self, label: str):
self.label = label
def __str__(self):
return f"{self.label}: "
class FixedData(MipsNode):
"""
Representa un dato en la seccion .data.
Por ejemplo:
msg: .asciiz "Hello World!\n"
"""
def __init__(self, name: str, value, type_="word"):
assert type_ in ("word", "asciiz", "byte", "space")
self.name = name
self.value = value
self.type = type_
def __str__(self):
return f"{self.name}: .{self.type} {self.value}"
| 23.120438
| 108
| 0.569061
|
0ac108acc327f2b80a43dbd9cd37a167c4a80f9d
| 22,956
|
py
|
Python
|
kge/job/entity_ranking.py
|
pbloem/kge
|
9dcba730cf85acab02cccd49e5ffbd74c8774230
|
[
"MIT"
] | 1
|
2021-06-03T10:48:43.000Z
|
2021-06-03T10:48:43.000Z
|
kge/job/entity_ranking.py
|
pbloem/kge
|
9dcba730cf85acab02cccd49e5ffbd74c8774230
|
[
"MIT"
] | null | null | null |
kge/job/entity_ranking.py
|
pbloem/kge
|
9dcba730cf85acab02cccd49e5ffbd74c8774230
|
[
"MIT"
] | null | null | null |
import math
import time
import torch
import kge.job
from kge.job import EvaluationJob, Job
from kge import Config, Dataset
from collections import defaultdict
class EntityRankingJob(EvaluationJob):
""" Entity ranking evaluation protocol """
def __init__(self, config: Config, dataset: Dataset, parent_job, model):
super().__init__(config, dataset, parent_job, model)
self.is_prepared = False
if self.__class__ == EntityRankingJob:
for f in Job.job_created_hooks:
f(self)
def _prepare(self):
"""Construct all indexes needed to run."""
if self.is_prepared:
return
# create data and precompute indexes
self.triples = self.dataset.split(self.config.get("eval.split"))
for split in self.filter_splits:
self.dataset.index(f"{split}_sp_to_o")
self.dataset.index(f"{split}_po_to_s")
if "test" not in self.filter_splits and self.filter_with_test:
self.dataset.index("test_sp_to_o")
self.dataset.index("test_po_to_s")
# and data loader
self.loader = torch.utils.data.DataLoader(
self.triples,
collate_fn=self._collate,
shuffle=False,
batch_size=self.batch_size,
num_workers=self.config.get("eval.num_workers"),
pin_memory=self.config.get("eval.pin_memory"),
)
# let the model add some hooks, if it wants to do so
self.model.prepare_job(self)
self.is_prepared = True
def _collate(self, batch):
"Looks up true triples for each triple in the batch"
label_coords = []
for split in self.filter_splits:
split_label_coords = kge.job.util.get_sp_po_coords_from_spo_batch(
batch,
self.dataset.num_entities(),
self.dataset.index(f"{split}_sp_to_o"),
self.dataset.index(f"{split}_po_to_s"),
)
label_coords.append(split_label_coords)
label_coords = torch.cat(label_coords)
if "test" not in self.filter_splits and self.filter_with_test:
test_label_coords = kge.job.util.get_sp_po_coords_from_spo_batch(
batch,
self.dataset.num_entities(),
self.dataset.index("test_sp_to_o"),
self.dataset.index("test_po_to_s"),
)
else:
test_label_coords = torch.zeros([0, 2], dtype=torch.long)
batch = torch.cat(batch).reshape((-1, 3))
return batch, label_coords, test_label_coords
@torch.no_grad()
def run(self) -> dict:
self._prepare()
was_training = self.model.training
self.model.eval()
self.config.log(
"Evaluating on "
+ self.eval_split
+ " data (epoch {})...".format(self.epoch)
)
num_entities = self.dataset.num_entities()
# we also filter with test data if requested
filter_with_test = (
"test" not in self.filter_splits and self.filter_with_test
)
# which rankings to compute (DO NOT REORDER; code assumes the order given here)
rankings = (
["_raw", "_filt", "_filt_test"]
if filter_with_test
else ["_raw", "_filt"]
)
# dictionary that maps entry of rankings to a sparse tensor containing the
# true labels for this option
labels_for_ranking = defaultdict(lambda: None)
# Initiliaze dictionaries that hold the overall histogram of ranks of true
# answers. These histograms are used to compute relevant metrics. The dictionary
# entry with key 'all' collects the overall statistics and is the default.
hists = dict()
hists_filt = dict()
hists_filt_test = dict()
# let's go
epoch_time = -time.time()
for batch_number, batch_coords in enumerate(self.loader):
# construct a sparse label tensor of shape batch_size x 2*num_entities
# entries are either 0 (false) or infinity (true)
# TODO add timing information
batch = batch_coords[0].to(self.device)
s, p, o = batch[:, 0], batch[:, 1], batch[:, 2]
label_coords = batch_coords[1].to(self.device)
if filter_with_test:
test_label_coords = batch_coords[2].to(self.device)
# create sparse labels tensor
test_labels = kge.job.util.coord_to_sparse_tensor(
len(batch),
2 * num_entities,
test_label_coords,
self.device,
float("Inf"),
)
labels_for_ranking["_filt_test"] = test_labels
# create sparse labels tensor
labels = kge.job.util.coord_to_sparse_tensor(
len(batch), 2 * num_entities, label_coords, self.device, float("Inf")
)
labels_for_ranking["_filt"] = labels
# compute true scores beforehand, since we can't get them from a chunked
# score table
o_true_scores = self.model.score_spo(s, p, o, "o").view(-1)
s_true_scores = self.model.score_spo(s, p, o, "s").view(-1)
# default dictionary storing rank and num_ties for each key in rankings
# as list of len 2: [rank, num_ties]
ranks_and_ties_for_ranking = defaultdict(
lambda: [
torch.zeros(s.size(0), dtype=torch.long).to(self.device),
torch.zeros(s.size(0), dtype=torch.long).to(self.device),
]
)
# calculate scores in chunks to not have the complete score matrix in memory
# a chunk here represents a range of entity_values to score against
if self.config.get("eval.chunk_size") > -1:
chunk_size = self.config.get("eval.chunk_size")
else:
chunk_size = self.dataset.num_entities()
# process chunk by chunk
for chunk_number in range(math.ceil(num_entities / chunk_size)):
chunk_start = chunk_size * chunk_number
chunk_end = min(chunk_size * (chunk_number + 1), num_entities)
# compute scores of chunk
scores = self.model.score_sp_po(
s, p, o, torch.arange(chunk_start, chunk_end).to(self.device)
)
scores_sp = scores[:, : chunk_end - chunk_start]
scores_po = scores[:, chunk_end - chunk_start :]
# replace the precomputed true_scores with the ones occurring in the
# scores matrix to avoid floating point issues
s_in_chunk_mask = (chunk_start <= s) & (s < chunk_end)
o_in_chunk_mask = (chunk_start <= o) & (o < chunk_end)
o_in_chunk = (o[o_in_chunk_mask] - chunk_start).long()
s_in_chunk = (s[s_in_chunk_mask] - chunk_start).long()
scores_sp[o_in_chunk_mask, o_in_chunk] = o_true_scores[o_in_chunk_mask]
scores_po[s_in_chunk_mask, s_in_chunk] = s_true_scores[s_in_chunk_mask]
# now compute the rankings (assumes order: None, _filt, _filt_test)
for ranking in rankings:
if labels_for_ranking[ranking] is None:
labels_chunk = None
else:
# densify the needed part of the sparse labels tensor
labels_chunk = self._densify_chunk_of_labels(
labels_for_ranking[ranking], chunk_start, chunk_end
)
# remove current example from labels
labels_chunk[o_in_chunk_mask, o_in_chunk] = 0
labels_chunk[
s_in_chunk_mask, s_in_chunk + (chunk_end - chunk_start)
] = 0
# compute partial ranking and filter the scores (sets scores of true
# labels to infinity)
(
s_rank_chunk,
s_num_ties_chunk,
o_rank_chunk,
o_num_ties_chunk,
scores_sp_filt,
scores_po_filt,
) = self._filter_and_rank(
scores_sp, scores_po, labels_chunk, o_true_scores, s_true_scores
)
# from now on, use filtered scores
scores_sp = scores_sp_filt
scores_po = scores_po_filt
# update rankings
ranks_and_ties_for_ranking["s" + ranking][0] += s_rank_chunk
ranks_and_ties_for_ranking["s" + ranking][1] += s_num_ties_chunk
ranks_and_ties_for_ranking["o" + ranking][0] += o_rank_chunk
ranks_and_ties_for_ranking["o" + ranking][1] += o_num_ties_chunk
# we are done with the chunk
# We are done with all chunks; calculate final ranks from counts
s_ranks = self._get_ranks(
ranks_and_ties_for_ranking["s_raw"][0],
ranks_and_ties_for_ranking["s_raw"][1],
)
o_ranks = self._get_ranks(
ranks_and_ties_for_ranking["o_raw"][0],
ranks_and_ties_for_ranking["o_raw"][1],
)
s_ranks_filt = self._get_ranks(
ranks_and_ties_for_ranking["s_filt"][0],
ranks_and_ties_for_ranking["s_filt"][1],
)
o_ranks_filt = self._get_ranks(
ranks_and_ties_for_ranking["o_filt"][0],
ranks_and_ties_for_ranking["o_filt"][1],
)
# Update the histograms of of raw ranks and filtered ranks
batch_hists = dict()
batch_hists_filt = dict()
for f in self.hist_hooks:
f(batch_hists, s, p, o, s_ranks, o_ranks, job=self)
f(batch_hists_filt, s, p, o, s_ranks_filt, o_ranks_filt, job=self)
# and the same for filtered_with_test ranks
if filter_with_test:
batch_hists_filt_test = dict()
s_ranks_filt_test = self._get_ranks(
ranks_and_ties_for_ranking["s_filt_test"][0],
ranks_and_ties_for_ranking["s_filt_test"][1],
)
o_ranks_filt_test = self._get_ranks(
ranks_and_ties_for_ranking["o_filt_test"][0],
ranks_and_ties_for_ranking["o_filt_test"][1],
)
for f in self.hist_hooks:
f(
batch_hists_filt_test,
s,
p,
o,
s_ranks_filt_test,
o_ranks_filt_test,
job=self,
)
# optionally: trace ranks of each example
if self.trace_examples:
entry = {
"type": "entity_ranking",
"scope": "example",
"split": self.eval_split,
"filter_splits": self.filter_splits,
"size": len(batch),
"batches": len(self.loader),
"epoch": self.epoch,
}
for i in range(len(batch)):
entry["batch"] = i
entry["s"], entry["p"], entry["o"] = (
s[i].item(),
p[i].item(),
o[i].item(),
)
if filter_with_test:
entry["rank_filtered_with_test"] = (
o_ranks_filt_test[i].item() + 1
)
self.trace(
event="example_rank",
task="sp",
rank=o_ranks[i].item() + 1,
rank_filtered=o_ranks_filt[i].item() + 1,
**entry,
)
if filter_with_test:
entry["rank_filtered_with_test"] = (
s_ranks_filt_test[i].item() + 1
)
self.trace(
event="example_rank",
task="po",
rank=s_ranks[i].item() + 1,
rank_filtered=s_ranks_filt[i].item() + 1,
**entry,
)
# Compute the batch metrics for the full histogram (key "all")
metrics = self._compute_metrics(batch_hists["all"])
metrics.update(
self._compute_metrics(batch_hists_filt["all"], suffix="_filtered")
)
if filter_with_test:
metrics.update(
self._compute_metrics(
batch_hists_filt_test["all"], suffix="_filtered_with_test"
)
)
# optionally: trace batch metrics
if self.trace_batch:
self.trace(
event="batch_completed",
type="entity_ranking",
scope="batch",
split=self.eval_split,
filter_splits = self.filter_splits,
epoch=self.epoch,
batch=batch_number,
size=len(batch),
batches=len(self.loader),
**metrics,
)
# output batch information to console
print(
(
"\r" # go back
+ "{} batch:{: "
+ str(1 + int(math.ceil(math.log10(len(self.loader)))))
+ "d}/{}, mrr (filt.): {:4.3f} ({:4.3f}), "
+ "hits@1: {:4.3f} ({:4.3f}), "
+ "hits@{}: {:4.3f} ({:4.3f})"
+ "\033[K" # clear to right
).format(
self.config.log_prefix,
batch_number,
len(self.loader) - 1,
metrics["mean_reciprocal_rank"],
metrics["mean_reciprocal_rank_filtered"],
metrics["hits_at_1"],
metrics["hits_at_1_filtered"],
self.hits_at_k_s[-1],
metrics["hits_at_{}".format(self.hits_at_k_s[-1])],
metrics["hits_at_{}_filtered".format(self.hits_at_k_s[-1])],
),
end="",
flush=True,
)
# merge batch histograms into global histograms
def merge_hist(target_hists, source_hists):
for key, hist in source_hists.items():
if key in target_hists:
target_hists[key] = target_hists[key] + hist
else:
target_hists[key] = hist
merge_hist(hists, batch_hists)
merge_hist(hists_filt, batch_hists_filt)
if filter_with_test:
merge_hist(hists_filt_test, batch_hists_filt_test)
# we are done; compute final metrics
print("\033[2K\r", end="", flush=True) # clear line and go back
for key, hist in hists.items():
name = "_" + key if key != "all" else ""
metrics.update(self._compute_metrics(hists[key], suffix=name))
metrics.update(
self._compute_metrics(hists_filt[key], suffix="_filtered" + name)
)
if filter_with_test:
metrics.update(
self._compute_metrics(
hists_filt_test[key], suffix="_filtered_with_test" + name
)
)
epoch_time += time.time()
# compute trace
trace_entry = dict(
type="entity_ranking",
scope="epoch",
split=self.eval_split,
filter_splits=self.filter_splits,
epoch=self.epoch,
batches=len(self.loader),
size=len(self.triples),
epoch_time=epoch_time,
event="eval_completed",
**metrics,
)
for f in self.post_epoch_trace_hooks:
f(self, trace_entry)
# if validation metric is not present, try to compute it
metric_name = self.config.get("valid.metric")
if metric_name not in trace_entry:
trace_entry[metric_name] = eval(
self.config.get("valid.metric_expr"),
None,
dict(config=self.config, **trace_entry),
)
# write out trace
trace_entry = self.trace(**trace_entry, echo=True, echo_prefix=" ", log=True)
# reset model and return metrics
if was_training:
self.model.train()
self.config.log("Finished evaluating on " + self.eval_split + " split.")
for f in self.post_valid_hooks:
f(self, trace_entry)
return trace_entry
def _densify_chunk_of_labels(
self, labels: torch.Tensor, chunk_start: int, chunk_end: int
) -> torch.Tensor:
"""Creates a dense chunk of a sparse label tensor.
A chunk here is a range of entity values with 'chunk_start' being the lower
bound and 'chunk_end' the upper bound.
The resulting tensor contains the labels for the sp chunk and the po chunk.
:param labels: sparse tensor containing the labels corresponding to the batch
for sp and po
:param chunk_start: int start index of the chunk
:param chunk_end: int end index of the chunk
:return: batch_size x chunk_size*2 dense tensor with labels for the sp chunk and
the po chunk.
"""
num_entities = self.dataset.num_entities()
indices = labels._indices()
mask_sp = (chunk_start <= indices[1, :]) & (indices[1, :] < chunk_end)
mask_po = ((chunk_start + num_entities) <= indices[1, :]) & (
indices[1, :] < (chunk_end + num_entities)
)
indices_sp_chunk = indices[:, mask_sp]
indices_sp_chunk[1, :] = indices_sp_chunk[1, :] - chunk_start
indices_po_chunk = indices[:, mask_po]
indices_po_chunk[1, :] = (
indices_po_chunk[1, :] - num_entities - chunk_start * 2 + chunk_end
)
indices_chunk = torch.cat((indices_sp_chunk, indices_po_chunk), dim=1)
dense_labels = torch.sparse.LongTensor(
indices_chunk,
labels._values()[mask_sp | mask_po],
torch.Size([labels.size()[0], (chunk_end - chunk_start) * 2]),
).to_dense()
return dense_labels
def _filter_and_rank(
self,
scores_sp: torch.Tensor,
scores_po: torch.Tensor,
labels: torch.Tensor,
o_true_scores: torch.Tensor,
s_true_scores: torch.Tensor,
):
"""Filters the current examples with the given labels and returns counts rank and
num_ties for each true score.
:param scores_sp: batch_size x chunk_size tensor of scores
:param scores_po: batch_size x chunk_size tensor of scores
:param labels: batch_size x 2*chunk_size tensor of scores
:param o_true_scores: batch_size x 1 tensor containing the scores of the actual
objects in batch
:param s_true_scores: batch_size x 1 tensor containing the scores of the actual
subjects in batch
:return: batch_size x 1 tensors rank and num_ties for s and o and filtered
scores_sp and scores_po
"""
chunk_size = scores_sp.shape[1]
if labels is not None:
# remove current example from labels
labels_sp = labels[:, :chunk_size]
labels_po = labels[:, chunk_size:]
scores_sp = scores_sp - labels_sp
scores_po = scores_po - labels_po
o_rank, o_num_ties = self._get_ranks_and_num_ties(scores_sp, o_true_scores)
s_rank, s_num_ties = self._get_ranks_and_num_ties(scores_po, s_true_scores)
return s_rank, s_num_ties, o_rank, o_num_ties, scores_sp, scores_po
@staticmethod
def _get_ranks_and_num_ties(
scores: torch.Tensor, true_scores: torch.Tensor
) -> (torch.Tensor, torch.Tensor):
"""Returns rank and number of ties of each true score in scores.
:param scores: batch_size x entities tensor of scores
:param true_scores: batch_size x 1 tensor containing the actual scores of the batch
:return: batch_size x 1 tensors rank and num_ties
"""
# process NaN values
scores = scores.clone()
scores[torch.isnan(scores)] = float("-Inf")
true_scores = true_scores.clone()
true_scores[torch.isnan(true_scores)] = float("-Inf")
# Determine how many scores are greater than / equal to each true answer (in its
# corresponding row of scores)
rank = torch.sum(scores > true_scores.view(-1, 1), dim=1, dtype=torch.long)
num_ties = torch.sum(scores == true_scores.view(-1, 1), dim=1, dtype=torch.long)
return rank, num_ties
@staticmethod
def _get_ranks(rank: torch.Tensor, num_ties: torch.Tensor) -> torch.Tensor:
"""Calculates the final rank from (minimum) rank and number of ties.
:param rank: batch_size x 1 tensor with number of scores greater than the one of
the true score
:param num_ties: batch_size x tensor with number of scores equal as the one of
the true score
:return: batch_size x 1 tensor of ranks
"""
ranks = rank + num_ties // 2
return ranks
def _compute_metrics(self, rank_hist, suffix=""):
"""Computes desired matrix from rank histogram"""
metrics = {}
n = torch.sum(rank_hist).item()
ranks = torch.arange(1, self.dataset.num_entities() + 1).float().to(self.device)
metrics["mean_rank" + suffix] = (
(torch.sum(rank_hist * ranks).item() / n) if n > 0.0 else 0.0
)
reciprocal_ranks = 1.0 / ranks
metrics["mean_reciprocal_rank" + suffix] = (
(torch.sum(rank_hist * reciprocal_ranks).item() / n) if n > 0.0 else 0.0
)
hits_at_k = (
(torch.cumsum(rank_hist[: max(self.hits_at_k_s)], dim=0) / n).tolist()
if n > 0.0
else [0.0] * max(self.hits_at_k_s)
)
for i, k in enumerate(self.hits_at_k_s):
metrics["hits_at_{}{}".format(k, suffix)] = hits_at_k[k - 1]
return metrics
| 39.923478
| 91
| 0.540164
|
c5c4ea1ffb37584d2eb03c9b8db1c0e875c8c409
| 6,329
|
py
|
Python
|
aiocloudpayments/dispatcher/aiohttp_dispatcher.py
|
drforse/aiocloudpayments
|
25b8827250279335d037754dca6978bc79c9b18d
|
[
"MIT"
] | null | null | null |
aiocloudpayments/dispatcher/aiohttp_dispatcher.py
|
drforse/aiocloudpayments
|
25b8827250279335d037754dca6978bc79c9b18d
|
[
"MIT"
] | null | null | null |
aiocloudpayments/dispatcher/aiohttp_dispatcher.py
|
drforse/aiocloudpayments
|
25b8827250279335d037754dca6978bc79c9b18d
|
[
"MIT"
] | null | null | null |
import logging
from typing import Optional, Callable, Awaitable
from aiohttp import web
from aiohttp.abc import Application
from .callback import Result
from .. import AioCpClient
from ..types.notifications import CancelNotification, CheckNotification, ConfirmNotification, \
FailNotification, PayNotification, RecurrentNotification, RefundNotification
from ..utils.hmac_check import hmac_check
from .base_dispatcher import BaseDispatcher
logger = logging.getLogger("aiocloudpayments.dispatcher")
NOTIFICATION_TYPES = {
"pay": PayNotification, "cancel": CancelNotification, "check": CheckNotification,
"confirm": ConfirmNotification, "fail": FailNotification,
"recurrent": RecurrentNotification, "refund": RefundNotification
}
class AiohttpDispatcher(BaseDispatcher):
def __init__(self, index: int = None):
self._web_paths = {}
self._ip_whitelist = None
self._check_hmac = True
self._cp_client: Optional[AioCpClient] = None
super().__init__(index)
async def process_request(self, request: web.Request) -> web.Response:
if self.ip_whitelist and request.remote not in self.ip_whitelist and "0.0.0.0" not in self.ip_whitelist:
logger.warning(f"skip request from ip {request.remote} because it is not in ip_whitelist")
return web.json_response(status=401)
if self.check_hmac is True and hmac_check(
await request.read(),
self.cp_client._api_secret,
request.headers.get("Content-HMAC")) is False:
logger.warning(f"skip request from because hmac check failed: {request} from {request.remote}")
return web.json_response(status=401)
name = self._web_paths[request.url.name]
notification_type = NOTIFICATION_TYPES.get(name)
if notification_type is None:
logger.error(f"notification type {name} not supported")
return web.json_response(status=500)
notification = notification_type(**(await request.post()))
result = await self.process_notification(notification)
if result == Result.INTERNAL_ERROR:
return web.json_response(status=500)
if result:
return web.json_response({"result": result.value})
def register_app(
self,
app: Application,
path: str,
pay_path: str = None,
cancel_path: str = None,
check_path: str = None,
confirm_path: str = None,
fail_path: str = None,
recurrent_path: str = None,
refund_path: str = None):
"""
Register route
All if path doesn't end with "/", sub-paths should start with it and vice-versa
Only not-null paths are registered :)
:param app: instance of aiohttp Application
:param path: route main path
:param pay_path: sub-path for pay notifications
:param cancel_path:
:param check_path:
:param confirm_path:
:param fail_path:
:param recurrent_path:
:param refund_path:
:param kwargs:
"""
paths = {
"pay": pay_path, "cancel": cancel_path, "check": check_path,
"confirm": confirm_path, "fail": fail_path,
"recurrent": recurrent_path, "refund": refund_path
}
paths = {k: v for k, v in paths.items() if v is not None}
for name, path_ in paths.items():
if path_ is None:
continue
self._web_paths[path_.replace("/", "")] = name
app.router.add_route(
"POST", path + path_, self.process_request
)
def run_app(
self,
cp_client: AioCpClient,
path: str,
pay_path: str = None,
cancel_path: str = None,
check_path: str = None,
confirm_path: str = None,
fail_path: str = None,
recurrent_path: str = None,
refund_path: str = None,
allow_ips: Optional[set[str]] = frozenset({"127.0.0.1", "130.193.70.192",
"185.98.85.109", "91.142.84.0/27",
"87.251.91.160/27", "185.98.81.0/28"}),
check_hmac: bool = True,
on_startup: Callable[[web.Application], Awaitable[None]] = None,
on_shutdown: Callable[[web.Application], Awaitable[None]] = None,
**kwargs
):
"""
Create aiohttp app and run it
All if path doesn't end with "/", sub-paths should start with it and vice-versa
Only not-null paths are registered :)
:param cp_client: AioCpClient
:param path: route main path
:param pay_path: sub-path for pay notifications
:param cancel_path:
:param check_path:
:param confirm_path:
:param fail_path:
:param recurrent_path:
:param refund_path:
:param allow_ips: only allow requests from this ips
:param check_hmac: pass False to disable hmac check
:param on_startup:
:param on_shutdown:
:param kwargs: aiohttp run_app parameters
"""
self.ip_whitelist = allow_ips
self.cp_client = cp_client
self.check_hmac = check_hmac
app = web.Application()
if on_startup:
app.on_startup.append(on_startup)
if on_shutdown:
app.on_shutdown.append(on_shutdown)
self.register_app(
app, path, pay_path, cancel_path, check_path, confirm_path,
fail_path, recurrent_path, refund_path
)
web.run_app(app, **kwargs)
@property
def ip_whitelist(self) -> Optional[set]:
return self._ip_whitelist
@ip_whitelist.setter
def ip_whitelist(self, value: Optional[set]):
self._ip_whitelist = value
@property
def check_hmac(self) -> bool:
return self._check_hmac
@check_hmac.setter
def check_hmac(self, value: bool):
self._check_hmac = value
@property
def cp_client(self) -> Optional[AioCpClient]:
return self._cp_client
@cp_client.setter
def cp_client(self, value: Optional[AioCpClient]):
self._cp_client = value
| 36.373563
| 112
| 0.609417
|
3dce706ded547ef250abc6494a0668d11fb368c7
| 886
|
py
|
Python
|
gilbo_experiments_trace_diag/explore.py
|
BachiLi/autodiff_comp
|
d2e33db4bd4ac7630b6ee6cfc56b3411cc37e14d
|
[
"MIT"
] | 2
|
2020-04-15T21:02:55.000Z
|
2020-05-07T00:37:21.000Z
|
gilbo_experiments_trace_diag/explore.py
|
BachiLi/autodiff_comp
|
d2e33db4bd4ac7630b6ee6cfc56b3411cc37e14d
|
[
"MIT"
] | null | null | null |
gilbo_experiments_trace_diag/explore.py
|
BachiLi/autodiff_comp
|
d2e33db4bd4ac7630b6ee6cfc56b3411cc37e14d
|
[
"MIT"
] | 1
|
2020-05-06T17:13:56.000Z
|
2020-05-06T17:13:56.000Z
|
import numpy as np
import tensorflow as tf
import time
import math
tf.compat.v1.enable_eager_execution()
def run_exp_0(N):
@tf.function(
experimental_compile=True,
input_signature=[tf.TensorSpec(shape=[N,N], dtype=tf.float32)]
)
def f(A):
grads = None
with tf.GradientTape() as tape:
tape.watch(A)
AAt = tf.matmul( A, tf.transpose(A) )
trAA = tf.linalg.trace(AAt)
grads = tape.gradient(trAA, A)
return trAA, grads/2.0
A = tf.random.uniform([N,N])
f(A)
start_time = time.perf_counter()
f(A)
stop_time = time.perf_counter()
return stop_time - start_time
for N in (100,200,400,800,1200,1600):
timing = run_exp_0(N)
print(f"time for N = {N}: {timing}")
print(f"sqrt(time) for N = {N}: {math.sqrt(timing)}")
print(f"cube_root(time) for N = {N}: {math.pow(timing,1.0/3.0)}")
| 22.15
| 76
| 0.61851
|
4e92a5839f506fe3ea2a43bba6d66216f1deae57
| 668
|
py
|
Python
|
creator/migrations/0009_auto_20210621_1300.py
|
MeTeoRise/chatbot_automation
|
6dfbbdbf8b71219b35052c6549ff32347a6248be
|
[
"MIT"
] | 3
|
2022-03-04T10:18:29.000Z
|
2022-03-23T20:16:01.000Z
|
creator/migrations/0009_auto_20210621_1300.py
|
MeTeoRise/chatbot_automation
|
6dfbbdbf8b71219b35052c6549ff32347a6248be
|
[
"MIT"
] | null | null | null |
creator/migrations/0009_auto_20210621_1300.py
|
MeTeoRise/chatbot_automation
|
6dfbbdbf8b71219b35052c6549ff32347a6248be
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2 on 2021-06-21 13:00
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('creator', '0008_steps_story'),
]
operations = [
migrations.AlterField(
model_name='steps',
name='action',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='creator.responses'),
),
migrations.AlterField(
model_name='steps',
name='intent',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='creator.intent'),
),
]
| 26.72
| 105
| 0.61976
|
e417ec9ff7b4c7766695217d77aa9bad8cfcfc08
| 397
|
py
|
Python
|
projectenv/projectenv/asgi.py
|
rzsaglam/project-env
|
f4c02b15cf924ba5d69d8a4a89efcc686b73aa9c
|
[
"MIT"
] | null | null | null |
projectenv/projectenv/asgi.py
|
rzsaglam/project-env
|
f4c02b15cf924ba5d69d8a4a89efcc686b73aa9c
|
[
"MIT"
] | null | null | null |
projectenv/projectenv/asgi.py
|
rzsaglam/project-env
|
f4c02b15cf924ba5d69d8a4a89efcc686b73aa9c
|
[
"MIT"
] | null | null | null |
"""
ASGI config for projectenv project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'projectenv.settings')
application = get_asgi_application()
| 23.352941
| 78
| 0.788413
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.