hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5256f8c0d04ee6af4a341be08c61fb9e7dcd7730 | 165 | py | Python | study_notes/proga_mod.py | chicolucio/python_useful_tips | 3a17a77f0f89923ba087188d4acfaadd1d1bfeea | [
"MIT"
] | null | null | null | study_notes/proga_mod.py | chicolucio/python_useful_tips | 3a17a77f0f89923ba087188d4acfaadd1d1bfeea | [
"MIT"
] | null | null | null | study_notes/proga_mod.py | chicolucio/python_useful_tips | 3a17a77f0f89923ba087188d4acfaadd1d1bfeea | [
"MIT"
] | null | null | null | print('Begin', __name__)
print('Defines fA')
def fA():
print('Inside fA')
if __name__ == "__main__":
print('Calls fA')
fA()
print('End', __name__)
| 11 | 26 | 0.593939 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 55 | 0.333333 |
52575caa64a642674717cd72e3bc72c2b83b3b27 | 2,238 | py | Python | wdae/wdae/common_reports_api/tests/test_common_reports_api.py | iossifovlab/gpf | e556243d29666179dbcb72859845b4d6c011af2b | [
"MIT"
] | null | null | null | wdae/wdae/common_reports_api/tests/test_common_reports_api.py | iossifovlab/gpf | e556243d29666179dbcb72859845b4d6c011af2b | [
"MIT"
] | 82 | 2019-07-22T11:44:23.000Z | 2022-01-13T15:27:33.000Z | wdae/wdae/common_reports_api/tests/test_common_reports_api.py | iossifovlab/gpf | e556243d29666179dbcb72859845b4d6c011af2b | [
"MIT"
] | null | null | null | import pytest
from rest_framework import status
pytestmark = pytest.mark.usefixtures(
"wdae_gpf_instance", "dae_calc_gene_sets", "use_common_reports"
)
def test_variant_reports(admin_client):
url = "/api/v3/common_reports/studies/study4"
response = admin_client.get(url)
assert response
assert response.status_code == status.HTTP_200_OK
data = response.data
assert data
def test_variant_reports_no_permissions(user_client):
url = "/api/v3/common_reports/studies/study4"
response = user_client.get(url)
assert response
assert response.status_code == status.HTTP_403_FORBIDDEN
data = response.data
assert data
@pytest.mark.xfail(reason="this test is flipping; should be investigated")
def test_variant_reports_not_found(admin_client):
url = "/api/v3/common_reports/studies/Study3"
response = admin_client.get(url)
assert response
assert response.data["error"] == "Common report Study3 not found"
assert response.status_code == status.HTTP_404_NOT_FOUND
def test_families_data_download(admin_client):
url = "/api/v3/common_reports/families_data/Study1"
response = admin_client.get(url)
assert response
assert response.status_code == status.HTTP_200_OK
streaming_content = list(response.streaming_content)
assert streaming_content
assert len(streaming_content) == 31
header = streaming_content[0].decode("utf8")
assert header[-1] == "\n"
header = header[:-1].split("\t")
assert len(header) == 8
assert header == [
"familyId",
"personId",
"dadId",
"momId",
"sex",
"status",
"role",
"genotype_data_study",
]
first_person = streaming_content[1].decode("utf8")
assert first_person[-1] == "\n"
first_person = first_person[:-1].split("\t")
assert len(first_person) == 8
assert first_person[-1] == "Study1"
@pytest.mark.xfail(reason="this test is flipping; should be investigated")
def test_families_data_download_no_permissions(user_client):
url = "/api/v3/common_reports/families_data/study4"
response = user_client.get(url)
assert response
assert response.status_code == status.HTTP_403_FORBIDDEN
| 26.329412 | 74 | 0.7042 | 0 | 0 | 0 | 0 | 672 | 0.300268 | 0 | 0 | 509 | 0.227435 |
5257bb106f5486659c2e12b6df0c6a213ab4a9ac | 8,000 | py | Python | ckan/tests/logic/action/test_create.py | rossjones/ckan-mini | cb39b3de7dcc9d12a64c5f37bcafe6306e9e3412 | [
"Apache-2.0"
] | 3 | 2015-09-30T20:07:52.000Z | 2015-10-01T04:16:31.000Z | ckan/tests/logic/action/test_create.py | rossjones/ckan-mini | cb39b3de7dcc9d12a64c5f37bcafe6306e9e3412 | [
"Apache-2.0"
] | 4 | 2015-09-29T18:29:56.000Z | 2015-10-04T14:55:22.000Z | ckan/tests/logic/action/test_create.py | rossjones/ckan-mini | cb39b3de7dcc9d12a64c5f37bcafe6306e9e3412 | [
"Apache-2.0"
] | null | null | null | '''Unit tests for ckan/logic/auth/create.py.
'''
from pylons import config
import mock
import nose.tools
import ckan.tests.helpers as helpers
import ckan.tests.factories as factories
import ckan.model as model
import ckan.logic as logic
import ckan.plugins as p
assert_equals = nose.tools.assert_equals
assert_raises = nose.tools.assert_raises
class TestResourceCreate(object):
@classmethod
def setup_class(cls):
helpers.reset_db()
def setup(self):
model.repo.rebuild_db()
def test_resource_create(self):
context = {}
params = {
'package_id': factories.Dataset()['id'],
'url': 'http://data',
'name': 'A nice resource',
}
result = helpers.call_action('resource_create', context, **params)
id = result.pop('id')
assert id
params.pop('package_id')
for key in params.keys():
assert_equals(params[key], result[key])
def test_it_requires_package_id(self):
data_dict = {
'url': 'http://data',
}
assert_raises(logic.ValidationError, helpers.call_action,
'resource_create', **data_dict)
def test_it_requires_url(self):
user = factories.User()
dataset = factories.Dataset(user=user)
data_dict = {
'package_id': dataset['id']
}
assert_raises(logic.ValidationError, helpers.call_action,
'resource_create', **data_dict)
class TestMemberCreate(object):
@classmethod
def setup_class(cls):
helpers.reset_db()
def setup(self):
model.repo.rebuild_db()
def test_group_member_creation(self):
user = factories.User()
group = factories.Group()
new_membership = helpers.call_action(
'group_member_create',
id=group['id'],
username=user['name'],
role='member',
)
assert_equals(new_membership['group_id'], group['id'])
assert_equals(new_membership['table_name'], 'user')
assert_equals(new_membership['table_id'], user['id'])
assert_equals(new_membership['capacity'], 'member')
def test_organization_member_creation(self):
user = factories.User()
organization = factories.Organization()
new_membership = helpers.call_action(
'organization_member_create',
id=organization['id'],
username=user['name'],
role='member',
)
assert_equals(new_membership['group_id'], organization['id'])
assert_equals(new_membership['table_name'], 'user')
assert_equals(new_membership['table_id'], user['id'])
assert_equals(new_membership['capacity'], 'member')
class TestDatasetCreate(helpers.FunctionalTestBase):
def test_normal_user_cant_set_id(self):
user = factories.User()
context = {
'user': user['name'],
'ignore_auth': False,
}
assert_raises(
logic.ValidationError,
helpers.call_action,
'package_create',
context=context,
id='1234',
name='test-dataset',
)
def test_sysadmin_can_set_id(self):
user = factories.Sysadmin()
context = {
'user': user['name'],
'ignore_auth': False,
}
dataset = helpers.call_action(
'package_create',
context=context,
id='1234',
name='test-dataset',
)
assert_equals(dataset['id'], '1234')
def test_id_cant_already_exist(self):
dataset = factories.Dataset()
user = factories.Sysadmin()
assert_raises(
logic.ValidationError,
helpers.call_action,
'package_create',
id=dataset['id'],
name='test-dataset',
)
class TestGroupCreate(helpers.FunctionalTestBase):
def test_create_group(self):
user = factories.User()
context = {
'user': user['name'],
'ignore_auth': True,
}
group = helpers.call_action(
'group_create',
context=context,
name='test-group',
)
assert len(group['users']) == 1
assert group['display_name'] == u'test-group'
assert group['package_count'] == 0
assert not group['is_organization']
assert group['type'] == 'group'
@nose.tools.raises(logic.ValidationError)
def test_create_group_validation_fail(self):
user = factories.User()
context = {
'user': user['name'],
'ignore_auth': True,
}
group = helpers.call_action(
'group_create',
context=context,
name='',
)
def test_create_group_return_id(self):
import re
user = factories.User()
context = {
'user': user['name'],
'ignore_auth': True,
'return_id_only': True
}
group = helpers.call_action(
'group_create',
context=context,
name='test-group',
)
assert isinstance(group, str)
assert re.match('([a-f\d]{8}(-[a-f\d]{4}){3}-[a-f\d]{12}?)', group)
def test_create_matches_show(self):
user = factories.User()
context = {
'user': user['name'],
'ignore_auth': True,
}
created = helpers.call_action(
'organization_create',
context=context,
name='test-organization',
)
shown = helpers.call_action(
'organization_show',
context=context,
id='test-organization',
)
assert sorted(created.keys()) == sorted(shown.keys())
for k in created.keys():
assert created[k] == shown[k], k
class TestOrganizationCreate(helpers.FunctionalTestBase):
def test_create_organization(self):
user = factories.User()
context = {
'user': user['name'],
'ignore_auth': True,
}
org = helpers.call_action(
'organization_create',
context=context,
name='test-organization',
)
assert len(org['users']) == 1
assert org['display_name'] == u'test-organization'
assert org['package_count'] == 0
assert org['is_organization']
assert org['type'] == 'organization'
@nose.tools.raises(logic.ValidationError)
def test_create_organization_validation_fail(self):
user = factories.User()
context = {
'user': user['name'],
'ignore_auth': True,
}
org = helpers.call_action(
'organization_create',
context=context,
name='',
)
def test_create_organization_return_id(self):
import re
user = factories.User()
context = {
'user': user['name'],
'ignore_auth': True,
'return_id_only': True
}
org = helpers.call_action(
'organization_create',
context=context,
name='test-organization',
)
assert isinstance(org, str)
assert re.match('([a-f\d]{8}(-[a-f\d]{4}){3}-[a-f\d]{12}?)', org)
def test_create_matches_show(self):
user = factories.User()
context = {
'user': user['name'],
'ignore_auth': True,
}
created = helpers.call_action(
'organization_create',
context=context,
name='test-organization',
)
shown = helpers.call_action(
'organization_show',
context=context,
id='test-organization',
)
assert sorted(created.keys()) == sorted(shown.keys())
for k in created.keys():
assert created[k] == shown[k], k
| 26.40264 | 75 | 0.550375 | 7,637 | 0.954625 | 0 | 0 | 832 | 0.104 | 0 | 0 | 1,403 | 0.175375 |
52580d1080b455dd0e2a017e5435bfca15bd5e74 | 232 | py | Python | CodeWars/7 Kyu/Broken sequence.py | anubhab-code/Competitive-Programming | de28cb7d44044b9e7d8bdb475da61e37c018ac35 | [
"MIT"
] | null | null | null | CodeWars/7 Kyu/Broken sequence.py | anubhab-code/Competitive-Programming | de28cb7d44044b9e7d8bdb475da61e37c018ac35 | [
"MIT"
] | null | null | null | CodeWars/7 Kyu/Broken sequence.py | anubhab-code/Competitive-Programming | de28cb7d44044b9e7d8bdb475da61e37c018ac35 | [
"MIT"
] | null | null | null | def find_missing_number(sequence):
try:
numbers = sorted(int(word) for word in sequence.split(" ") if word)
except ValueError:
return 1
return next((i + 1 for i, n in enumerate(numbers) if i + 1 != n), 0) | 38.666667 | 75 | 0.625 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.012931 |
5258259e0cc916706a7010aa6cabfb31332042c1 | 4,075 | py | Python | Topsis_Samarth_102083050/t_102083050.py | Samarth-Gupta-2909/Topsis_Samarth_102083050 | 1cd91125108bc4887ec0aebf5041ae2763cc4a31 | [
"MIT"
] | null | null | null | Topsis_Samarth_102083050/t_102083050.py | Samarth-Gupta-2909/Topsis_Samarth_102083050 | 1cd91125108bc4887ec0aebf5041ae2763cc4a31 | [
"MIT"
] | null | null | null | Topsis_Samarth_102083050/t_102083050.py | Samarth-Gupta-2909/Topsis_Samarth_102083050 | 1cd91125108bc4887ec0aebf5041ae2763cc4a31 | [
"MIT"
] | null | null | null | import pandas as pd
import os
import sys
def main():
# Arguments not equal to 5
if len(sys.argv) != 5:
print("ERROR : NUMBER OF PARAMETERS")
print("USAGE : python topsis.py inputfile.csv '1,1,1,1' '+,+,-,+' result.csv ")
exit(1)
# File Not Found error
elif not os.path.isfile(sys.argv[1]):
print(f"ERROR : {sys.argv[1]} Don't exist!!")
exit(1)
# File extension not csv
elif ".csv" != (os.path.splitext(sys.argv[1]))[1]:
print(f"ERROR : {sys.argv[1]} is not csv!!")
exit(1)
else:
dataset, tem_dataset = pd.read_csv(sys.argv[1]), pd.read_csv(sys.argv[1])
nCol = len(tem_dataset.columns.values)
# less then 3 columns in input dataset
if nCol < 3:
print("ERROR : Input file have less then 3 columns")
exit(1)
# Handeling non-numeric value
for i in range(1, nCol):
pd.to_numeric(dataset.iloc[:, i], errors='coerce')
dataset.iloc[:, i].fillna((dataset.iloc[:, i].mean()), inplace=True)
# Handling errors of weighted and impact arrays
try:
weights = [int(i) for i in sys.argv[2].split(',')]
except:
print("ERROR : In weights array please check again")
exit(1)
impact = sys.argv[3].split(',')
for i in impact:
if not (i == '+' or i == '-'):
print("ERROR : In impact array please check again")
exit(1)
# Checking number of column,weights and impacts is same or not
if nCol != len(weights)+1 or nCol != len(impact)+1:
print(
"ERROR : Number of weights, number of impacts and number of columns not same")
exit(1)
if (".csv" != (os.path.splitext(sys.argv[4]))[1]):
print("ERROR : Output file extension is wrong")
exit(1)
if os.path.isfile(sys.argv[4]):
os.remove(sys.argv[4])
# print(" No error found\n\n Applying Topsis Algorithm...\n")
topsis_samarth(tem_dataset, dataset, nCol, weights, impact)
def Normalisation(tem_dataset, nCol, weights):
# normalizing the array
# print(" Normalizing the DataSet...\n")
for i in range(1, nCol):
tem = 0
for j in range(len(tem_dataset)):
tem = tem + tem_dataset.iloc[j, i]**2
tem = tem**0.5
for j in range(len(tem_dataset)):
tem_dataset.iat[j, i] = (tem_dataset.iloc[j, i] / tem)*weights[i-1]
return tem_dataset
def Calculation(tem_dataset, nCol, impact):
# print(" Calculating Positive and Negative values...\n")
p_sln = (tem_dataset.max().values)[1:]
n_sln = (tem_dataset.min().values)[1:]
for i in range(1, nCol):
if impact[i-1] == '-':
p_sln[i-1], n_sln[i-1] = n_sln[i-1], p_sln[i-1]
return p_sln, n_sln
def topsis_samarth(tem_dataset, dataset, nCol, weights, impact):
# normalizing the array
tem_dataset = Normalisation(tem_dataset, nCol, weights)
# Calculating positive and negative values
p_sln, n_sln = Calculation(tem_dataset, nCol, impact)
# calculating topsis score
#Generating Score And Rank
score = []
for i in range(len(tem_dataset)):
tem_p, tem_n = 0, 0
for j in range(1, nCol):
tem_p = tem_p + (p_sln[j-1] - tem_dataset.iloc[i, j])**2
tem_n = tem_n + (n_sln[j-1] - tem_dataset.iloc[i, j])**2
tem_p, tem_n = tem_p**0.5, tem_n**0.5
score.append(tem_n/(tem_p + tem_n))
dataset['Topsis Score'] = score
# calculating the rank according to topsis score
dataset['Rank'] = (dataset['Topsis Score'].rank(method='max', ascending=False))
dataset = dataset.astype({"Rank": int})
# Writing the csv
# print(" Writing Result to CSV...\n")
dataset.to_csv(sys.argv[4], index=False)
# print(" Successfully Terminated")
if __name__ == "__main__":
main() | 34.82906 | 95 | 0.561963 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,221 | 0.299632 |
525a2422bd8be6f7826ece79661f8b6cc564ee5b | 6,337 | py | Python | pyrichlet/weight_models/_pitman_yor_process.py | cabo40/pyrichlet | 941b84e3fcf3c92de2239767e96ee235ff165adb | [
"Apache-2.0"
] | 13 | 2022-02-13T00:33:47.000Z | 2022-03-08T16:28:27.000Z | pyrichlet/weight_models/_pitman_yor_process.py | cabo40/pyrichlet | 941b84e3fcf3c92de2239767e96ee235ff165adb | [
"Apache-2.0"
] | null | null | null | pyrichlet/weight_models/_pitman_yor_process.py | cabo40/pyrichlet | 941b84e3fcf3c92de2239767e96ee235ff165adb | [
"Apache-2.0"
] | null | null | null | from ._base import BaseWeight
from ..exceptions import NotFittedError
from ..utils.functions import mean_log_beta
import numpy as np
from scipy.special import loggamma
class PitmanYorProcess(BaseWeight):
def __init__(self, pyd=0, alpha=1, truncation_length=-1, rng=None):
super().__init__(rng=rng)
assert -pyd < alpha, "alpha param must be greater than -pyd"
self.pyd = pyd
self.alpha = alpha
self.v = np.array([], dtype=np.float64)
self.truncation_length = truncation_length
def random(self, size=None):
if size is None and len(self.d) == 0:
raise ValueError("Weight structure not fitted and `n` not passed.")
if size is not None:
if type(size) is not int:
raise TypeError("size parameter must be integer or None")
if len(self.d) == 0:
pitman_yor_bias = np.arange(size)
self.v = self.rng.beta(a=1 - self.pyd,
b=self.alpha + pitman_yor_bias * self.pyd,
size=size)
self.w = self.v * np.cumprod(np.concatenate(([1],
1 - self.v[:-1])))
else:
a_c = np.bincount(self.d)
b_c = np.concatenate((np.cumsum(a_c[::-1])[-2::-1], [0]))
if size is not None and size < len(a_c):
a_c = a_c[:size]
b_c = b_c[:size]
pitman_yor_bias = np.arange(len(a_c))
self.v = self.rng.beta(
a=1 - self.pyd + a_c,
b=self.alpha + pitman_yor_bias * self.pyd + b_c
)
self.w = self.v * np.cumprod(np.concatenate(([1],
1 - self.v[:-1])))
if size is not None:
self.complete(size)
return self.w
def complete(self, size):
if type(size) is not int:
raise TypeError("size parameter must be integer or None")
if self.get_size() < size:
pitman_yor_bias = np.arange(self.get_size(), size)
self.v = np.concatenate(
(
self.v,
self.rng.beta(a=1 - self.pyd,
b=self.alpha + pitman_yor_bias * self.pyd)
)
)
self.w = self.v * np.cumprod(np.concatenate(([1],
1 - self.v[:-1])))
return self.w
def fit_variational(self, variational_d):
self.variational_d = variational_d
self.variational_k = len(self.variational_d)
self.variational_params = np.empty((self.variational_k, 2),
dtype=np.float64)
a_c = np.sum(self.variational_d, 1)
b_c = np.concatenate((np.cumsum(a_c[::-1])[-2::-1], [0]))
self.variational_params[:, 0] = 1 - self.pyd + a_c
self.variational_params[:, 1] = self.alpha + (
1 + np.arange(self.variational_params.shape[0])
) * self.pyd + b_c
def variational_mean_log_w_j(self, j):
if self.variational_d is None:
raise NotFittedError
res = 0
for jj in range(j):
res += mean_log_beta(self.variational_params[jj][1],
self.variational_params[jj][0])
res += mean_log_beta(self.variational_params[j, 0],
self.variational_params[j, 1]
)
return res
def variational_mean_log_p_d__w(self, variational_d=None):
if variational_d is None:
_variational_d = self.variational_d
if _variational_d is None:
raise NotFittedError
else:
_variational_d = variational_d
res = 0
for j, nj in enumerate(np.sum(_variational_d, 1)):
res += nj * self.variational_mean_log_w_j(j)
return res
def variational_mean_log_p_w(self):
if self.variational_d is None:
raise NotFittedError
res = 0
for j, params in enumerate(self.variational_params):
res += mean_log_beta(params[0], params[1]) * -self.pyd
res += mean_log_beta(params[1], params[0]) * (
self.alpha + (j + 1) * self.pyd - 1
)
res += loggamma(self.alpha + j * self.pyd + 1)
res -= loggamma(self.alpha + (j + 1) * self.pyd + 1)
res -= loggamma(1 - self.pyd)
return res
def variational_mean_log_q_w(self):
if self.variational_d is None:
raise NotFittedError
res = 0
for params in self.variational_params:
res += (params[0] - 1) * mean_log_beta(params[0], params[1])
res += (params[1] - 1) * mean_log_beta(params[1], params[0])
res += loggamma(params[0] + params[1])
res -= loggamma(params[0]) + loggamma(params[1])
return res
def variational_mean_w(self, j):
if j > self.variational_k:
return 0
res = 1
for jj in range(j):
res *= (self.variational_params[jj][1] /
self.variational_params[jj].sum())
res *= self.variational_params[j, 0] / self.variational_params[j].sum()
return res
def variational_mode_w(self, j):
if j > self.variational_k:
return 0
res = 1
for jj in range(j):
if self.variational_params[jj, 1] <= 1:
if self.variational_params[jj, 0] <= 1:
raise ValueError('multimodal distribution')
else:
return 0
elif self.variational_params[jj, 0] <= 1:
continue
res *= ((self.variational_params[jj, 1] - 1) /
(self.variational_params[jj].sum() - 2))
if self.variational_params[j, 0] <= 1:
if self.variational_params[j, 1] <= 1:
raise ValueError('multimodal distribution')
else:
return 0
elif self.variational_params[j, 1] <= 1:
return res
res *= ((self.variational_params[j, 0] - 1) /
(self.variational_params[j].sum() - 2))
return res
| 39.117284 | 79 | 0.515544 | 6,165 | 0.972858 | 0 | 0 | 0 | 0 | 0 | 0 | 218 | 0.034401 |
525a8b00826a337a4c293642d7c027ab056d2b82 | 2,259 | py | Python | nlp/router.py | kirollosHossam/MachineLearningTask | 3780513af04cf7bb97432436b4714c32d1c271e6 | [
"MIT"
] | null | null | null | nlp/router.py | kirollosHossam/MachineLearningTask | 3780513af04cf7bb97432436b4714c32d1c271e6 | [
"MIT"
] | null | null | null | nlp/router.py | kirollosHossam/MachineLearningTask | 3780513af04cf7bb97432436b4714c32d1c271e6 | [
"MIT"
] | null | null | null | from __future__ import annotations
from typing import Dict, List, Optional, Union
from fastapi import FastAPI, HTTPException
from pydantic import BaseModel
from nlp.nlp import Trainer
app = FastAPI()
trainer = Trainer()
#BaseModel is used as data validator when using fast api it cares all about exception handilng and validate
#your incoming json to be what you want to be.
class TestingData(BaseModel):
texts: List[str]
class QueryText(BaseModel):
text: str
class StatusObject(BaseModel):
status: str
timestamp: str
classes: List[str]
evaluation: Dict
class PredictionObject(BaseModel):
text: str
predictions: Dict
class PredictionsObject(BaseModel):
predictions: List[PredictionObject]
@app.get("/status", summary="Get current status of the system")
def get_status():
status = trainer.get_status()
return StatusObject(**status)
@app.get("/trainMachineLearning", summary="Train a new Machine Learning model")
def train():
try:
trainer.trainMachineLearning(trainer.merge().text, trainer.merge().dialect)
status = trainer.get_status()
return StatusObject(**status)
except Exception as e:
raise HTTPException(status_code=503, detail=str(e))
@app.get("/trainDeepLearning", summary="Train a new Deep Learning model")
def train():
try:
trainer.trainDeepLearning(trainer.merge().text, trainer.merge().dialect)
status = trainer.get_status()
return StatusObject(**status)
except Exception as e:
raise HTTPException(status_code=503, detail=str(e))
@app.post("/predict", summary="Predict single input")
def predict(query_text: QueryText):
try:
prediction = trainer.predict([query_text.text])[0]
return PredictionObject(**prediction)
except Exception as e:
raise HTTPException(status_code=503, detail=str(e))
@app.post("/predict-batch", summary="predict a batch of sentences")
def predict_batch(testing_data:TestingData):
try:
predictions = trainer.predict(testing_data.texts)
return PredictionsObject(predictions=predictions)
except Exception as e:
raise HTTPException(status_code=503, detail=str(e))
@app.get("/")
def home():
return({"message": "System is up"})
| 30.527027 | 107 | 0.715803 | 345 | 0.152722 | 0 | 0 | 1,515 | 0.670651 | 0 | 0 | 412 | 0.182382 |
525ce95ea9b89a99563ce003f523651a65ff4155 | 2,359 | py | Python | alibabacloud/endpoint/default_endpoint_resolver.py | wallisyan/alibabacloud-python-sdk-v2 | 6e024c97cded2403025a7dd8fea8261e41872156 | [
"Apache-2.0"
] | null | null | null | alibabacloud/endpoint/default_endpoint_resolver.py | wallisyan/alibabacloud-python-sdk-v2 | 6e024c97cded2403025a7dd8fea8261e41872156 | [
"Apache-2.0"
] | null | null | null | alibabacloud/endpoint/default_endpoint_resolver.py | wallisyan/alibabacloud-python-sdk-v2 | 6e024c97cded2403025a7dd8fea8261e41872156 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 Alibaba Cloud Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -*- coding: utf-8 -*-
from alibabacloud.endpoint import EndpointResolver
from alibabacloud.endpoint.chained_endpoint_resolver import ChainedEndpointResolver
from alibabacloud.endpoint.local_config_global_endpoint_resolver \
import LocalConfigGlobalEndpointResolver
from alibabacloud.endpoint.local_config_regional_endpoint_resolver \
import LocalConfigRegionalEndpointResolver
from alibabacloud.endpoint.location_service_endpoint_resolver \
import LocationServiceEndpointResolver
from alibabacloud.endpoint.user_customized_endpoint_resolver import UserCustomizedEndpointResolver
class DefaultEndpointResolver(EndpointResolver):
"""
`Alibaba Cloud Python` endpoint 解析链
.. note::
Deprecated use for add_endpoint and modify_endpoint
Not recommended
"""
predefined_endpoint_resolver = UserCustomizedEndpointResolver()
def __init__(self, config, credentials_provider, user_config=None):
self._user_customized_endpoint_resolver = UserCustomizedEndpointResolver()
endpoint_resolvers = [
self.predefined_endpoint_resolver,
self._user_customized_endpoint_resolver,
LocalConfigRegionalEndpointResolver(user_config),
LocalConfigGlobalEndpointResolver(user_config),
LocationServiceEndpointResolver(config, credentials_provider),
]
self._resolver = ChainedEndpointResolver(endpoint_resolvers)
def resolve(self, request):
return self._resolver.resolve(request)
def put_endpoint_entry(self, region_id, product_code, endpoint):
self._user_customized_endpoint_resolver.put_endpoint_entry(region_id, product_code,
endpoint)
| 41.385965 | 98 | 0.757948 | 1,166 | 0.493023 | 0 | 0 | 0 | 0 | 0 | 0 | 773 | 0.32685 |
525db5cd98632fab60967c35aa4c83a509cd65b6 | 1,891 | py | Python | src/experiment_collector.py | Context-Aware-Monitoring/Efficient-Stream-Monitoring | f08faaa87ac2ffe74014a9a6e864b641e4a160f5 | [
"MIT"
] | null | null | null | src/experiment_collector.py | Context-Aware-Monitoring/Efficient-Stream-Monitoring | f08faaa87ac2ffe74014a9a6e864b641e4a160f5 | [
"MIT"
] | null | null | null | src/experiment_collector.py | Context-Aware-Monitoring/Efficient-Stream-Monitoring | f08faaa87ac2ffe74014a9a6e864b641e4a160f5 | [
"MIT"
] | null | null | null | import pandas as pd
import numpy as np
import glob
import os
import global_config
import yaml
import time
import sys
import collections
def flatten(d, parent_key='', sep='_'):
items = []
for k, v in d.items():
new_key = parent_key + sep + k if parent_key else k
if isinstance(v, collections.MutableMapping):
items.extend(flatten(v, new_key, sep=sep).items())
else:
items.append((new_key, v))
return dict(items)
def collect_yaml_config(config):
df = pd.DataFrame()
keys = list(filter(lambda key: key not in ['policies', 'seed'], config.keys()))
global_setting = {key:config[key] for key in keys}
baseline_regret = 0
for pol in config['policies']:
if pol.get('identifier') == 'baseline':
baseline_regret = pol['regret']
for pol in config['policies']:
if pol.get('identifier') == 'baseline':
continue
else:
flat_dict = flatten(pol)
regret = flat_dict['regret']
del flat_dict['regret']
if regret != 0.0:
flat_dict['improvement'] = baseline_regret / regret
else:
flat_dict['improvement'] = 0
df = df.append(flat_dict | global_setting, ignore_index=True)
return df
if __name__ == "__main__":
filestart = sys.argv[1]
os.chdir(global_config.EXPERIMENT_SERIALIZATION_DIR)
experiment_files = glob.glob("*.yml")
df =pd.DataFrame()
i = 0
for ef in experiment_files:
print('%d/%d' % (i, len(experiment_files)))
i += 1
with open(ef, 'r') as ymlfile:
if filestart == ef[:len(filestart)]:
experiment_data = yaml.safe_load(ymlfile)
df = df.append(collect_yaml_config(experiment_data), ignore_index=True)
df.to_csv('collected_%s.csv' % filestart, index=False)
| 28.651515 | 87 | 0.601269 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 180 | 0.095188 |
525e027715d3144b67819590485c36cad49673b9 | 1,757 | py | Python | coord2vec/pipelines/lagoon_utils/auto_stage.py | jonzarecki/coord2vec | 4f267fdd87af7b3d3558ca834b88e9ab7c309c18 | [
"Apache-2.0"
] | null | null | null | coord2vec/pipelines/lagoon_utils/auto_stage.py | jonzarecki/coord2vec | 4f267fdd87af7b3d3558ca834b88e9ab7c309c18 | [
"Apache-2.0"
] | null | null | null | coord2vec/pipelines/lagoon_utils/auto_stage.py | jonzarecki/coord2vec | 4f267fdd87af7b3d3558ca834b88e9ab7c309c18 | [
"Apache-2.0"
] | 1 | 2021-01-25T09:21:17.000Z | 2021-01-25T09:21:17.000Z | from typing import Union, List
from lagoon import Stage, Task
from coord2vec.pipelines.lagoon_utils.lambda_task import LambdaTask
class AutoStage(Stage):
def __init__(self, name: str, **kwargs):
super().__init__(name, **kwargs)
self.output_param_to_task = dict()
def update_output_params(self, task):
# TODO: kind-of ugly, uses internal _dict_graph
if isinstance(task, LambdaTask) and task not in self._dict_graph:
for output_param in (task.pass_input_names + task.func_output_names):
self.output_param_to_task[output_param] = task
def add_auto(self, task: LambdaTask):
relevant_connections = set()
for input_param in task.func_input_names:
if input_param in self.output_param_to_task:
relevant_connections.add(self.output_param_to_task[input_param])
else:
pass # can come from pipelines variable
# raise AssertionError(f"input {input_param} not presented before")
if len(relevant_connections) == 0:
self.add(task)
else:
self.add_dependency(list(relevant_connections), task)
def add_dependency(
self, current_task: Union[Task, List[Task]], next_task: Union[Task, List[Task]]
) -> "Stage":
if not isinstance(current_task, list):
current_task = [current_task]
if not isinstance(next_task, list):
next_task = [next_task]
for task in (next_task + current_task):
self.update_output_params(task) # will try for all NEW tasks
return super(AutoStage, self).add_dependency(current_task, next_task)
def add_to_DAG(task: LambdaTask, s: AutoStage):
s.add_auto(task) | 37.382979 | 87 | 0.660216 | 1,553 | 0.883893 | 0 | 0 | 0 | 0 | 0 | 0 | 183 | 0.104155 |
525e67e026d94931f78e27fc1fb5b2d70f989a43 | 19,045 | py | Python | recipes/Python/576708_DVMVersusCAPM/recipe-576708.py | tdiprima/code | 61a74f5f93da087d27c70b2efe779ac6bd2a3b4f | [
"MIT"
] | 2,023 | 2017-07-29T09:34:46.000Z | 2022-03-24T08:00:45.000Z | recipes/Python/576708_DVMVersusCAPM/recipe-576708.py | unhacker/code | 73b09edc1b9850c557a79296655f140ce5e853db | [
"MIT"
] | 32 | 2017-09-02T17:20:08.000Z | 2022-02-11T17:49:37.000Z | recipes/Python/576708_DVMVersusCAPM/recipe-576708.py | unhacker/code | 73b09edc1b9850c557a79296655f140ce5e853db | [
"MIT"
] | 780 | 2017-07-28T19:23:28.000Z | 2022-03-25T20:39:41.000Z | #On the name of ALLAH and may the blessing and peace of Allah
#be upon the Messenger of Allah Mohamed Salla Allahu Aliahi Wassalam.
#Author : Fouad Teniou
#Date : 07/03/09
#version :2.6.1
"""
collections module's extras in python 2.6.1 were used in my program, DVMextrapolating
DVMgordonsModel and CAPM subclasses of namedtuple Python class provide the cost of equity
the calculation of the dividend growth g in two different ways, and the value of the company
if the cost of equity Ke is known.
I used an utility method and the try/exceptions statements to raise errors
"""
import math as m
from collections import namedtuple
class MyError:
""" Demonstrate imporper operation on negative number"""
def _negativeNumberException(self,*args):
""" Utility method to raise a negative number exception"""
for item in args:
if item <0:
raise ValueError,\
" <The value %s should be a positive number " % item
class DVMextrapolating(namedtuple('DVMextrapolating','dividend_just_paid,dividend_n_years,n,share_price,Ke'),MyError):
""" DVMeModel class inherits from tuple and MyError class """
#set __slots__ to an empty tuple keep memory requirements low
__slots__ = ()
#Pick Myerror method
_negativeNumberException =MyError._negativeNumberException
@property
def g_extrapolatingModel(self):
""" Compute g using extrapolating """
try:
#Test for negative numbers input and raise the exception
self._negativeNumberException(self.dividend_just_paid,self.dividend_n_years,self.n)
return "%2.2f" % ((float(m.pow((self.dividend_just_paid/self.dividend_n_years),(1/float(self.n)))) -1))
#Raise TypeError if input is not numerical
except TypeError:
print "\n<The entered value is not a number"
#division by zero raises ZeroDivisionError exception
except ZeroDivisionError:
raise ZeroDivisionError, "\n<Please check and re-enter the values"
@property
def valueOfShare(self):
""" Compute the share value """
try:
#Test for negative numbers input and raise the exception
self._negativeNumberException(self.dividend_just_paid,self.dividend_n_years,self.Ke)
return "%2.2f" % (((self.dividend_just_paid*
(1+float(self.g_extrapolatingModel)))/(self.Ke-float(self.g_extrapolatingModel))))
#Raise TypeError if input is not numerical
except TypeError:
print "\n<The entered value is not a number"
#division by zero raises ZeroDivisionError exception
except ZeroDivisionError:
raise ZeroDivisionError, "\n<Please check and re-enter the values"
@property
def costOfEquity(self):
""" Compute cost of equity using DVM Model """
try:
#Test for negative numbers input and raise the exception
self._negativeNumberException(self.dividend_just_paid,self.share_price)
return "%2.1f" % ((((self.dividend_just_paid*
(1+float(self.g_extrapolatingModel))/self.share_price))+ float(self.g_extrapolatingModel))*100)
#Raise TypeError if input is not numerical
except TypeError:
print "\n<The entered value is not a number"
#division by zero raises ZeroDivisionError exception
except ZeroDivisionError:
raise ZeroDivisionError, "\n<Please check and re-enter the values"
def __str__(self):
""" String representation of DVMeModel"""
if self.Ke == None:
return "\n< Extrapolating Growth Model g = %s\n \
\n< Cost of equity Ke = %s \n\
\n< Market value of the share Po = %s" % \
(self.g_extrapolatingModel,(self.costOfEquity+'%'),('$'+ str(self.share_price)))
else:
return "\n< Extrapolating Growth Model g = %s\n \
\n< Cost of equity Ke = %s \n\
\n< Market value of the share Po = %s" % \
(self.g_extrapolatingModel,self.Ke,('$'+ str(self.valueOfShare)))
class DVMgordonsModel(namedtuple('DVMgordonsModel','dividend_just_paid,return_on_equity,dividend_payout,share_price,Ke'),MyError):
""" DVMgModel class inherits from tuple and MyError classes """
#set __slots__ to an empty tuple keep memory requirements low
__slots__ = ()
#Pick Myerror method
_negativeNumberException =MyError._negativeNumberException
@property
def g_gordonsModel(self):
""" Compute g using Gordons growth Model """
try:
#Test for negative numbers input and raise the exception
self._negativeNumberException(self.return_on_equity,self.dividend_payout)
return self.return_on_equity * (1-self.dividend_payout)
#Raise TypeError if input is not numerical
except TypeError:
print "\n<The entered value is not a number"
@property
def valueOfShare(self):
""" Compute the share value """
try:
#Test for negative numbers input and raise the exception
self._negativeNumberException(self.dividend_just_paid,self.Ke)
return "%2.2f" % (((self.dividend_just_paid*
(1+float(self.g_gordonsModel)))/(self.Ke-self.g_gordonsModel)))
#Raise TypeError if input is not numerical
except TypeError:
print "\n<The entered value is not a number"
#division by zero raises ZeroDivisionError exception
except ZeroDivisionError:
raise ZeroDivisionError, "\n<Please check and re-enter the values"
@property
def costOfEquity(self):
""" Compute cost of equity using DVM Model """
try:
#Test for negative numbers input and raise the exception
self._negativeNumberException(self.dividend_just_paid,self.share_price)
return "%2.1f" % ((((self.dividend_just_paid*
(1+float(self.g_gordonsModel)))/(self.share_price))+ float(self.g_gordonsModel))*100 )
#Raise TypeError if input is not numerical
except TypeError:
print "\n<The entered value is not a number"
#division by zero raises ZeroDivisionError exception
except ZeroDivisionError:
raise ZeroDivisionError, "\n<Please check and re-enter the values"
def __str__(self):
""" String representation of DVMgModel"""
if self.Ke == None:
return "\n< Gordon's Growth Model g = %s\n \
\n< Cost of equity Ke = %s \n\
\n< Market value of the share Po = %s" % \
(self.g_gordonsModel,(self.costOfEquity+'%'),('$'+ str(self.share_price)))
else:
return "\n< Gordon's Growth Model g = %s\n \
\n< Cost of equity Ke = %s \n\
\n< Market value of the share Po = %s" % \
(self.g_gordonsModel,self.Ke,('$'+ str(self.valueOfShare)))
class CAPM(namedtuple('CAPM','Rf,Beta,Rm'),MyError):
""" CAPM class inherits from tuple and MyError class """
#set __slots__ to an empty tuple keep memory requirements low
__slots__ = ()
#Pick Myerror method
_negativeNumberException =MyError._negativeNumberException
@property
def Ke(self):
""" Compute cost of equity using CAPM model """
try:
#Test for negative numbers input and raise the exception
self._negativeNumberException(self.Rf,self.Beta,self.Rm)
return self.Rf + self.Beta*(self.Rm - self.Rf)
#Raise ValueError if input is not numerical
except TypeError:
print "\n<The entered value is not a number"
def __str__(self):
""" String representation of CAPM"""
return "\n< Ke = %s" % self.Ke+"%"
if __name__ == '__main__':
a = CAPM('Rf','Beta','Rm')
b = [7,0.7,17]
a = a._make(b)
print "\n"+"\4"*43
print a
print "\n"+"\4"*43
c = DVMextrapolating('dividend_just_paid','dividend_n_years','n','share_price','Ke')
d = [0.24,0.1525,4,None,a.Ke/100]
c = c._make(d)
print c
print "\n"+"\4"*43
e = DVMgordonsModel('dividend_just_paid','return_on_equity','dividend_payout','share_price','Ke')
f = [0.18,0.2,0.72,None,0.127]
e = e._make(f)
print e
print "\n"+"\4"*43
g = [0.25,0.17,7,17.50,None]
c = c._make(g)
print c
print "\n"+"\4"*43
h = [0.17,0.3,0.37,1.77,None]
e = e._make(h)
print e
print "\n"+"\4"*43
print
print c.g_extrapolatingModel
print c.costOfEquity
print e.g_gordonsModel
print e.costOfEquity
print "\n"+"\5"*43
m = [None,0.5,0.57,None,None]
e = e._make(m)
print e.g_gordonsModel
##########################################################################################
# c:\Python26>python "C:\Users\Fouad Teniou\Documents\python\DVM_Versus_CAPM7.py"
#♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦
#< Ke = 14.0%
#♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦
#< Extrapolating Growth Model g = 0.12
#< Cost of equity Ke = 0.14
#< Market value of the share Po = $13.44
#♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦
#< Gordon's Growth Model g = 0.056
#< Cost of equity Ke = 0.127
#< Market value of the share Po = $2.68
#♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦
#< Extrapolating Growth Model g = 0.06
#< Cost of equity Ke = 7.5%
#< Market value of the share Po = $17.5
#♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦
#< Gordon's Growth Model g = 0.189
#< Cost of equity Ke = 30.3%
#< Market value of the share Po = $1.77
#♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦
#0.06
#7.5
#0.189
#30.3
#♣♣♣♣♣♣♣♣♣♣♣♣♣♣♣♣♣♣♣♣♣♣♣♣♣♣♣♣♣♣♣♣♣♣♣♣♣♣♣♣♣♣♣
#0.215
#c:\Python26>
##########################################################################################
#Version : Python 3.2
#import math as m
#from collections import namedtuple
#class MyError:
# """ Demonstrate imporper operation on negative number"""
# def _negativeNumberException(self,*args):
# """ Utility method to raise a negative number exception"""
#
# for item in args:
# if item <0:
#
# raise ValueError(" <The value %s should be a positive number " % item)
#
#class DVMextrapolating(namedtuple('DVMextrapolating','dividend_just_paid,dividend_n_years,n,share_price,Ke'),MyError):
# """ DVMeModel class inherits from tuple and MyError class """
#
# #set __slots__ to an empty tuple keep memory requirements low
# __slots__ = ()
#
# #Pick Myerror method
# _negativeNumberException =MyError._negativeNumberException
#
# @property
# def g_extrapolatingModel(self):
# """ Compute g using extrapolating """
#
# try:
# #Test for negative numbers input and raise the exception
# self._negativeNumberException(self.dividend_just_paid,self.dividend_n_years,self.n)
# return "%2.2f" % ((float(m.pow((self.dividend_just_paid/self.dividend_n_years),(1/float(self.n)))) -1))
#
# #Raise TypeError if input is not numerical
# except TypeError:
# print("\n<The entered value is not a number")
#
# #division by zero raises ZeroDivisionError exception
# except ZeroDivisionError:
# raise ZeroDivisionError("\n<Please check and re-enter the values")
#
# @property
# def valueOfShare(self):
# """ Compute the share value """
#
# try:
# #Test for negative numbers input and raise the exception
# self._negativeNumberException(self.dividend_just_paid,self.dividend_n_years,self.Ke)
# return "%2.2f" % (((self.dividend_just_paid*
# (1+float(self.g_extrapolatingModel)))/(self.Ke-float(self.g_extrapolatingModel))))
#
# #Raise TypeError if input is not numerical
# except TypeError:
# print("\n<The entered value is not a number")
#
# #division by zero raises ZeroDivisionError exception
# except ZeroDivisionError:
# raise ZeroDivisionError("\n<Please check and re-enter the values")
#
# @property
# def costOfEquity(self):
# """ Compute cost of equity using DVM Model """
#
# try:
# #Test for negative numbers input and raise the exception
# self._negativeNumberException(self.dividend_just_paid,self.share_price)
# return "%2.1f" % ((((self.dividend_just_paid*
# (1+float(self.g_extrapolatingModel))/self.share_price))+ float(self.g_extrapolatingModel))*100)
#
# #Raise TypeError if input is not numerical
# except TypeError:
# print("\n<The entered value is not a number")
#
# #division by zero raises ZeroDivisionError exception
# except ZeroDivisionError:
# raise ZeroDivisionError("\n<Please check and re-enter the values")
#
# def __str__(self):
# """ String representation of DVMeModel"""
#
# if self.Ke == None:
# return "\n< Extrapolating Growth Model g = %s\n \
# \n< Cost of equity Ke = %s \n\
# \n< Market value of the share Po = %s" % \
# (self.g_extrapolatingModel,(self.costOfEquity+'%'),('$'+ str(self.share_price)))
# else:
# return "\n< Extrapolating Growth Model g = %s\n \
# \n< Cost of equity Ke = %s \n\
# \n< Market value of the share Po = %s" % \
# (self.g_extrapolatingModel,self.Ke,('$'+ str(self.valueOfShare)))
#
#class DVMgordonsModel(namedtuple('DVMgordonsModel','dividend_just_paid,return_on_equity,dividend_payout,share_price,Ke'),MyError):
# """ DVMgModel class inherits from tuple and MyError classes """
#
# #set __slots__ to an empty tuple keep memory requirements low
# __slots__ = ()
#
# #Pick Myerror method
# _negativeNumberException =MyError._negativeNumberException
#
# @property
# def g_gordonsModel(self):
# """ Compute g using Gordons growth Model """
#
# try:
# #Test for negative numbers input and raise the exception
# self._negativeNumberException(self.return_on_equity,self.dividend_payout)
# return self.return_on_equity * (1-self.dividend_payout)
# #Raise TypeError if input is not numerical
# except TypeError:
# print("\n<The entered value is not a number")
#
# @property
# def valueOfShare(self):
# """ Compute the share value """
# try:
# #Test for negative numbers input and raise the exception
# self._negativeNumberException(self.dividend_just_paid,self.Ke)
# return "%2.2f" % (((self.dividend_just_paid*
# (1+float(self.g_gordonsModel)))/(self.Ke-self.g_gordonsModel)))
#
# #Raise TypeError if input is not numerical
# except TypeError:
# print("\n<The entered value is not a number")
#
# #division by zero raises ZeroDivisionError exception
# except ZeroDivisionError:
# raise ZeroDivisionError("\n<Please check and re-enter the values")
#
# @property
# def costOfEquity(self):
# """ Compute cost of equity using DVM Model """
#
# try:
# #Test for negative numbers input and raise the exception
# self._negativeNumberException(self.dividend_just_paid,self.share_price)
# return "%2.1f" % ((((self.dividend_just_paid*
# (1+float(self.g_gordonsModel)))/(self.share_price))+ float(self.g_gordonsModel))*100 )
#
# #Raise TypeError if input is not numerical
# except TypeError:
# print("\n<The entered value is not a number")
#
# #division by zero raises ZeroDivisionError exception
# except ZeroDivisionError:
# raise ZeroDivisionError("\n<Please check and re-enter the values")
#
# def __str__(self):
# """ String representation of DVMgModel"""
#
# if self.Ke == None:
#
# return "\n< Gordon's Growth Model g = %s\n \
# \n< Cost of equity Ke = %s \n\
# \n< Market value of the share Po = %s" % \
# (self.g_gordonsModel,(self.costOfEquity+'%'),('$'+ str(self.share_price)))
#
# else:
# return "\n< Gordon's Growth Model g = %s\n \
# \n< Cost of equity Ke = %s \n\
# \n< Market value of the share Po = %s" % \
# (self.g_gordonsModel,self.Ke,('$'+ str(self.valueOfShare)))
#
#class CAPM(namedtuple('CAPM','Rf,Beta,Rm'),MyError):
# """ CAPM class inherits from tuple and MyError class """
#
# #set __slots__ to an empty tuple keep memory requirements low
# __slots__ = ()
#
# #Pick Myerror method
# _negativeNumberException =MyError._negativeNumberException
#
# @property
# def Ke(self):
# """ Compute cost of equity using CAPM model """
#
# try:
# #Test for negative numbers input and raise the exception
# self._negativeNumberException(self.Rf,self.Beta,self.Rm)
# return self.Rf + self.Beta*(self.Rm - self.Rf)
#
# #Raise ValueError if input is not numerical
# except TypeError:
# print("\n<The entered value is not a number")
#
# def __str__(self):
# """ String representation of CAPM"""
#
# return "\n< Ke = %s" % self.Ke+"%"
#
#if __name__ == '__main__':
# a = CAPM('Rf','Beta','Rm')
# b = [7,0.7,17]
# a = a._make(b)
# print("\n"+"\4"*43)
# print(a)
# print("\n"+"\4"*43)
# c = DVMextrapolating('dividend_just_paid','dividend_n_years','n','share_price','Ke')
# d = [0.24,0.1525,4,None,a.Ke/100]
# c = c._make(d)
# print(c)
#
# print("\n"+"\4"*43)
# e = DVMgordonsModel('dividend_just_paid','return_on_equity','dividend_payout','share_price','Ke')
#
# f = [0.18,0.2,0.72,None,0.127]
# e = e._make(f)
# print(e)
# print("\n"+"\4"*43)
# g = [0.25,0.17,7,17.50,None]
# c = c._make(g)
# print(c)
#
# print("\n"+"\4"*43)
# h = [0.17,0.3,0.37,1.77,None]
# e = e._make(h)
# print(e)
#
# print("\n"+"\4"*43)
# print()
# print(c.g_extrapolatingModel)
# print(c.costOfEquity)
# print(e.g_gordonsModel)
# print(e.costOfEquity)
# print("\n"+"\5"*43)
# m = [None,0.5,0.57,None,None]
# e = e._make(m)
# print(e.g_gordonsModel)
| 35.798872 | 131 | 0.582463 | 7,533 | 0.383417 | 0 | 0 | 4,564 | 0.2323 | 0 | 0 | 14,265 | 0.726065 |
525e847c9953f41e6f37f2f968fe49ecb18a88d9 | 141,262 | py | Python | nexinfosys/restful_service/service_main.py | ENVIRO-Module/nis-backend | fd86cf30f79f53cdccddd2a5479507d32f914d4e | [
"BSD-3-Clause"
] | 4 | 2021-04-15T08:45:00.000Z | 2022-02-04T18:12:06.000Z | nexinfosys/restful_service/service_main.py | ENVIRO-Module/nis-backend | fd86cf30f79f53cdccddd2a5479507d32f914d4e | [
"BSD-3-Clause"
] | null | null | null | nexinfosys/restful_service/service_main.py | ENVIRO-Module/nis-backend | fd86cf30f79f53cdccddd2a5479507d32f914d4e | [
"BSD-3-Clause"
] | null | null | null | import binascii
import csv
import io
import json
import logging
import os
import traceback
import urllib
from pathlib import Path
import openpyxl
import pandas as pd
import pyximport
import redis
# from flask import (jsonify, abort, redirect, url_for,
#
# )
from NamedAtomicLock import NamedAtomicLock
from flask import (Response, request, session as flask_session, send_from_directory, redirect
)
from flask.helpers import get_root_path
from flask_cors import CORS
from flask_session import Session as FlaskSessionServerSide
from openpyxl.writer.excel import save_virtual_workbook
from werkzeug.exceptions import NotFound
from nexinfosys.ie_exports.jupyter_notebook import generate_jupyter_notebook_python, generate_jupyter_notebook_r
from nexinfosys.restful_service.rest_helper import gzipped
pyximport.install(reload_support=True, language_level=3)
# >>>>>>>>>> IMPORTANT <<<<<<<<<
# To debug in local mode, prepare an environment variable "MAGIC_NIS_SERVICE_CONFIG_FILE", with value "./nis_local.conf"
# >>>>>>>>>> IMPORTANT <<<<<<<<<
from nexinfosys.command_generators.parser_spreadsheet_utils import rewrite_xlsx_file
# from nexinfosys.command_generators.parser_spreadsheet_utils_accel import rewrite_xlsx_file
from nexinfosys.ie_exports.reference_of_commands import obtain_commands_help
from nexinfosys.command_definitions import commands
from nexinfosys.command_field_definitions import command_fields, _command_field_names
from nexinfosys.command_generators import Issue, IType
from nexinfosys.command_generators.parser_field_parsers import string_to_ast, simple_ident
from nexinfosys.common.helper import generate_json, str2bool, \
download_file, any_error_issue, wv_upload_file
from nexinfosys.models.musiasem_methodology_support import *
from nexinfosys.restful_service import app, get_results_in_session
from nexinfosys.initialization import initialize_database_data, initialize_databases, get_parameters_in_state, \
get_scenarios_in_state, register_external_datasources, get_graph_from_state, \
get_dataset_from_state, get_model, get_geolayer, get_ontology, validate_command, command_field_help, comm_help
import nexinfosys
from nexinfosys.command_executors import create_command
from nexinfosys.command_executors.specification.metadata_command import generate_dublin_core_xml
from nexinfosys.model_services import State, get_case_study_registry_objects
from nexinfosys.model_services.workspace import InteractiveSession, CreateNew, ReproducibleSession, \
execute_command_container, convert_generator_to_native, prepare_and_solve_model
from nexinfosys.restful_service import nis_api_base, nis_client_base, nis_external_client_base
from nexinfosys.models import log_level
from nexinfosys.serialization import serialize, deserialize, serialize_state, deserialize_state
# #####################################################################################################################
# >>>> BOOT TIME. FUNCTIONS AND CODE <<<<
# #####################################################################################################################
def printNProcessors(s, state):
from nexinfosys.models.musiasem_concepts import Processor
glb_idx, p_sets, hh, datasets, mappings = get_case_study_registry_objects(state)
logging.debug("--------------------------------------------------------")
logging.debug(f"--- {s} -----------------------------------------")
logging.debug(f"Number of processors: {len(glb_idx.get(Processor.partial_key()))}")
logging.debug("--------------------------------------------------------")
logging.debug("--------------------------------------------------------")
logging.debug("--------------------------------------------------------")
def construct_session_persistence_backend():
# A REDIS instance needs to be available. Check it
# A local REDIS could be as simple as:
#
# docker run --rm -p 6379:6379 redis:alpine
#
d = {}
if 'REDIS_HOST' in app.config:
r_host = app.config['REDIS_HOST']
d["SESSION_KEY_PREFIX"] = "nis:"
d["SESSION_PERMANENT"] = False
rs2 = None
if r_host == "redis_lite":
try:
import redislite
rs2 = redislite.Redis("tmp_nis_backend_redislite.db") # serverconfig={'port': '6379'}
d["SESSION_TYPE"] = "redis"
d["SESSION_REDIS"] = rs2
# d["PERMANENT_SESSION_LIFETIME"] = 3600
except ImportError as e:
logging.error("Package 'redislite' not found. Please, either change REDIS_HOST configuration variable "
"to 'filesystem' or 'redis', or execute 'pip install redislite' and retry")
sys.exit(1)
elif r_host.startswith("filesystem:"):
d["SESSION_TYPE"] = "filesystem"
if app.config.get("REDIS_HOST_FILESYSTEM_DIR"):
d["SESSION_FILE_DIR"] = app.config.get("REDIS_HOST_FILESYSTEM_DIR")
d["SESSION_FILE_THRESHOLD"] = 100
# d["SESSION_FILE_MODE"] = 666
else:
rs2 = redis.Redis(r_host)
d["SESSION_TYPE"] = "redis"
d["SESSION_REDIS"] = rs2
# d["PERMANENT_SESSION_LIFETIME"] = 3600
if rs2:
try:
logging.debug("Trying connection to REDIS '"+r_host+"'")
rs2.ping()
logging.debug("Connected to REDIS instance '"+r_host+"'")
except:
logging.debug("REDIS instance '"+r_host+"' not reachable, exiting now!")
sys.exit(1)
elif "SESSION_TYPE" not in d:
logging.error("No session persistence backend configured, exiting now!")
sys.exit(1)
return d
# #####################################################################################################################
# >>>> THE INITIALIZATION CODE <<<<
#
logger = logging.getLogger(__name__)
logging.getLogger('flask_cors').level = logging.DEBUG
app.logger.setLevel(log_level)
logger.setLevel(log_level)
logging.basicConfig(level=logging.DEBUG)
lock = NamedAtomicLock("nis-backend-lock")
lock.acquire()
try:
initialize_databases()
finally:
lock.release()
nexinfosys.data_source_manager = register_external_datasources()
d = construct_session_persistence_backend()
if "SESSION_REDIS" in d:
nexinfosys.redis = d["SESSION_REDIS"]
else:
nexinfosys.redis = None
app.config.update(d)
FlaskSessionServerSide(app) # Flask Session
CORS(app, # CORS
resources={r"/nis_api/*": {"origins": "*"}},
supports_credentials=True
)
logging.debug(f"DB_CONNECTION_STRING: {app.config['DB_CONNECTION_STRING']}\n----------------------")
logging.debug(f'Assuming {os.environ[nexinfosys.cfg_file_env_var]} as configuration file')
logging.debug(f'command_field_names = {_command_field_names}')
# #####################################################################################################################
# >>>> UTILITY FUNCTIONS <<<<
# #####################################################################################################################
def reset_database():
"""
Empty ALL data in the database !!!!
Used in testing web services
:return:
"""
if is_testing_enabled():
connection2 = nexinfosys.engine.connect()
tables = ORMBase.metadata.tables
table_existence = [nexinfosys.engine.dialect.has_table(connection2, tables[t].name) for t in tables]
connection2.close()
if False in table_existence:
ORMBase.metadata.bind = nexinfosys.engine
ORMBase.metadata.create_all()
for tbl in reversed(ORMBase.metadata.sorted_tables):
nexinfosys.engine.execute(tbl.delete())
def build_json_response(obj, status=200):
return Response(generate_json(obj),
mimetype="text/json",
status=status)
def serialize_isession_and_close_db_session(sess: InteractiveSession):
logging.debug("serialize_isession IN")
# Serialize state
if isinstance(sess._state, str):
logging.debug("Str")
sess._state = serialize_state(sess._state) # TODO New
# Serialize WorkSession apart, if it exists
if sess._reproducible_session:
csvs = sess._reproducible_session._session
# csvs.version.state = st # TODO New
# sess._reproducible_session.state = st # TODO New
if csvs and csvs.version: # FIX: "csvs" may be None in some situations
o_list = [csvs.version.case_study, csvs.version, csvs]
o_list.extend(csvs.commands)
d_list = serialize(o_list)
# JSON Pickle and save string
s = jsonpickle.encode({"allow_saving": sess._reproducible_session._allow_saving, "pers": d_list})
flask_session["rsession"] = s
sess._reproducible_session = None
else:
# TODO New code. Test it
logging.debug("Reproducible session corrupted. Closing Reproducible session")
if "rsession" in flask_session:
del flask_session["rsession"]
else:
if "rsession" in flask_session:
del flask_session["rsession"]
tmp = sess.get_sf()
sess.set_sf(None)
sess._reproducible_session = None
# Serialize sess.state and sess._identity
s = jsonpickle.encode(sess)
flask_session["isession"] = s
# # Save pickled state, for "in-vitro" analysis
# with open("/home/rnebot/pickled_state", "w") as f:
# f.write(s)
sess.set_sf(tmp)
sess.close_db_session()
logging.debug("serialize_isession OUT")
def deserialize_isession_and_prepare_db_session(return_error_response_if_none=True) -> InteractiveSession:
logging.debug("deserialize_issesion IN")
if "isession" in flask_session:
s = flask_session["isession"]
try:
sess = jsonpickle.decode(s)
if sess._state:
sess._state = deserialize_state(sess._state)
sess.set_sf(DBSession)
if "rsession" in flask_session:
rs = ReproducibleSession(sess)
rs.set_sf(sess.get_sf())
d = jsonpickle.decode(flask_session["rsession"])
rs._allow_saving = d["allow_saving"]
o_list = deserialize(d["pers"])
rs._session = o_list[2] # type: CaseStudyVersionSession
sess._reproducible_session = rs
except Exception as e:
traceback.print_exc()
sess = None
else:
sess = None
logging.debug("deserialize_issesion OUT")
if not sess and return_error_response_if_none:
return NO_ISESS_RESPONSE
else:
return sess
def is_testing_enabled():
if "TESTING" in app.config:
if isinstance(app.config["TESTING"], bool):
testing = app.config["TESTING"]
else:
testing = app.config["TESTING"].lower() in ["true", "1"]
else:
testing = False
return testing
NO_ISESS_RESPONSE = build_json_response({"error": "No interactive session active. Please, open one first ('POST /isession')"}, 400)
# >>>> SPECIAL FUNCTIONS <<<<
# @app.before_request
# def print_headers():
# print("HEADER Authorization")
# found = False
# for h in request.headers:
# if h[0] in ["Authorization", "Autorizacion"]:
# print(h[0] + ": " + str(h[1]))
# found = True
# if not found:
# print("-- not sent --")
@app.after_request
def after_a_request(response):
for i in request.cookies.items():
response.set_cookie(i[0], i[1])
if "__invalidate__" in flask_session:
response.delete_cookie(app.session_cookie_name)
return response
# #####################################################################################################################
# >>>> SERVE ANGULAR2 CLIENT FILES <<<<
# #####################################################################################################################
@app.route("/")
def index():
return redirect(nis_client_base)
@app.route(nis_client_base + "/", methods=["GET"])
@app.route(nis_client_base + "/<path:path>", methods=["GET"])
@app.route(nis_external_client_base + "/<path:path>", methods=["GET"])
def send_web_client_file(path=None):
"""
Serve files from the Angular2 client
To generate these files (ON EACH UPDATE TO THE CLIENT:
* CD to the Angular2 project directory
* ng build --prod --aot --base-href /nis_client/
* CP * <FRONTEND directory>
:param path:
:return:
"""
def detect_mimetype(fn):
if fn.lower().startswith("main.") and fn.lower().endswith(".js"):
return "text/html"
if fn.lower().endswith(".js"):
return "application/javascript"
elif fn.lower().endswith(".html"):
return "text/html"
elif fn.lower().endswith(".png"):
return "image/png"
elif fn.lower().endswith(".jpg") or fn.lower().endswith(".jpeg"):
return "image/jpeg"
elif fn.lower().endswith(".css"):
return "text/css"
elif fn.lower().endswith(".json"):
return "application/json"
elif fn.lower().endswith(".ico"):
return "image/x-icon"
elif fn.lower().endswith(".svg"):
return "image/svg+xml"
elif fn.lower().endswith(".eot"):
return "application/vnd.ms-fontobject"
elif fn.lower().endswith(".woff"):
return "application/font-woff"
elif fn.lower().endswith(".woff2"):
return "application/font-woff2"
elif fn.lower().endswith(".ttf"):
return "application/x-font-ttf"
else:
return None
base = Path(get_root_path("nexinfosys.restful_service"))
base = str(base.parent)+os.sep+"frontend"
logger.debug("BASE DIRECTORY: "+base)
incoming_url = request.url_rule.rule
if not path or path == "":
path = "index.html"
logging.debug(f"NIS (as Web Server), serving static file with path: {path}")
if "config.json" in path:
return build_json_response(dict(url=f"{request.host_url[:-1]}"), 200)
if nis_external_client_base in incoming_url:
# From outside
if path == "index.html":
# TODO Possibility of changing both the base and the file name
# TODO The intention is to NOT show the "Login" possibilities, so
# TODO users are always anonymous. To be discussed.
base = get_root_path("clients/web")
new_name = "index.html"
else:
new_name = path
else:
# From inside
new_name = path
mimetype = detect_mimetype(new_name)
logger.debug(f"File: {new_name}; MIMETYPE: {mimetype}")
try:
return send_from_directory(base, new_name, mimetype=mimetype)
except NotFound:
return send_from_directory(base, "index.html", mimetype="text/html")
# #####################################################################################################################
# >>>> SERVE STATIC FILES <<<<
# #####################################################################################################################
@app.route(nis_api_base + "/static/<path:path>", methods=["GET"])
def send_static_file(path):
"""
Serve files from the Angular2 client
To generate these files (ON EACH UPDATE TO THE CLIENT:
* CD to the Angular2 project directory
* ng build --prod --aot --base-href /nis_client/
* CP * <FRONTEND directory>
:param path:
:return:
"""
base = Path(get_root_path("nexinfosys.restful_service"))
base = str(base)+"/static"
# logger.debug("BASE DIRECTORY: "+base)
return send_from_directory(base, path)
# #####################################################################################################################
# >>>> RESTFUL INTERFACE <<<<
# #####################################################################################################################
# -- Special "give me state" for Case Study Management --
@app.route(nis_api_base + "/isession/rsession/state_summary", methods=["GET"])
def summary_status(): # Summary status
# Recover InteractiveSession
isess = deserialize_isession_and_prepare_db_session()
d = {}
if isess:
d["isession_open"] = True
# Identity
d["identified"] = isess.get_identity_id() is not None or isess.get_identity_id() != ""
# Reproducible session
if isess.reproducible_session_opened():
d["rsession_open"] = True
# Return a list with executed flags, a flag per command
d["commands"] = [dict(executed=c.execution_end) for c in isess.reproducible_session.ws_commands]
else:
d["rsession_open"] = False
else:
d["isession_open"] = False
return build_json_response(d, 200)
# -- Interactive session --
@app.route(nis_api_base + "/resetdb", methods=["POST"])
def reset_db():
testing = is_testing_enabled()
if testing:
reset_database()
initialize_database_data()
interactive_session_close() # Leave session if already in
r = build_json_response({}, 204)
else:
r = build_json_response({"error": "Illegal operation!!"}, 400)
return r
@app.route(nis_api_base + "/isession", methods=["POST"])
def interactive_session_open():
isess = deserialize_isession_and_prepare_db_session(False)
if isess:
r = build_json_response({"error": "Close existing interactive session ('DELETE /isession'"}, 400)
else:
isess = InteractiveSession(DBSession)
serialize_isession_and_close_db_session(isess)
r = build_json_response({}, 204)
return r
@app.route(nis_api_base + "/isession", methods=["GET"])
def get_interactive_session():
isess = deserialize_isession_and_prepare_db_session(False)
if isess:
st = "isession_open"
else:
st = "isession_closed"
logging.debug("Get Isession: "+st)
return build_json_response(st, 200)
@app.route(nis_api_base + "/isession/state", methods=["DELETE"])
def interactive_session_reset_state():
# Recover InteractiveSession
isess = deserialize_isession_and_prepare_db_session()
if isess and isinstance(isess, Response):
return isess
# Clear state
if isess.state:
isess.reset_state()
serialize_isession_and_close_db_session(isess)
return build_json_response({}, 204)
# Set identity at this moment for the interactive session
@app.route(nis_api_base + "/isession/identity", methods=["PUT"])
def interactive_session_set_identity():
# Recover InteractiveSession
# if request.method=="OPTIONS":
# r = build_json_response({}, 200)
# h = r.headers
# h['Access-Control-Allow-Origin'] = "http://localhost:4200"
# h['Access-Control-Allow-Methods'] = "PUT,POST,DELETE,GET,OPTIONS"
# h['Access-Control-Max-Age'] = str(21600)
# h['Access-Control-Allow-Credentials'] = "true"
# h['Access-Control-Allow-Headers'] = "Content-Type, Authorization, Content-Length, X-Requested-With"
# return r
isess = deserialize_isession_and_prepare_db_session()
if isess and isinstance(isess, Response):
return isess
# If there is a current identity, issue an error. First "unidentify"
if isess.get_identity_id():
testing = is_testing_enabled()
if testing and request.args.get("user") and isess.get_identity_id() == request.args.get("user"):
result = True
else:
result = False
else:
# Two types of identification: external, using OAuth tokens, or application, using user+password
application_identification = True
if application_identification:
if request.args.get("user"):
testing = is_testing_enabled()
result = isess.identify({"user": request.args.get("user"),
"password": request.args.get("password", None)
},
testing=testing
)
else:
# TODO Check the validity of the token using the right Authentication service
result = isess.identify({"token": request.headers.get("token"),
"service": request.headers.get("auth_service")
}
)
serialize_isession_and_close_db_session(isess)
r = build_json_response({"identity": isess.get_identity_id()} if result else {},
200 if result else 401)
return r
@app.route(nis_api_base + "/isession/identity", methods=["GET"])
def interactive_session_get_identity():
# Recover InteractiveSession
isess = deserialize_isession_and_prepare_db_session()
if isess and isinstance(isess, Response):
return isess
return build_json_response({"identity": isess.get_identity_id()})
# Set to anonymous user again (or "logout")
@app.route(nis_api_base + "/isession/identity", methods=["DELETE"])
def interactive_session_remove_identity():
# Recover InteractiveSession
isess = deserialize_isession_and_prepare_db_session()
if isess and isinstance(isess, Response):
return isess
# Un-identify
if isess.get_identity_id():
if isess.reproducible_session_opened():
# If reproducible session open, error!
r = build_json_response({"error": "There is an open Reproducible Session. Close it first."}, 401)
else:
isess.unidentify()
r = build_json_response({"identity": isess.get_identity_id()})
serialize_isession_and_close_db_session(isess)
else:
r = build_json_response({"identity": isess.get_identity_id()})
return r
# Close interactive session (has to log out if some identity is active)
@app.route(nis_api_base + "/isession", methods=["DELETE"])
def interactive_session_close():
isess = deserialize_isession_and_prepare_db_session(False)
if isess:
isess.quit()
flask_session.clear()
flask_session["__invalidate__"] = True
return build_json_response({})
@app.route(nis_api_base + '/isession/generator.json', methods=['POST'])
def convert_generator_to_json_generator():
"""
Send the file to the service
Convert to native
Return it in JSON format
:return:
"""
# Check Interactive Session is Open. If not, open it
isess = deserialize_isession_and_prepare_db_session(False)
if not isess:
isess = InteractiveSession(DBSession)
testing = is_testing_enabled()
if testing:
result = isess.identify({"user": "test_user", "password": None}, testing=True)
# Receive file
generator_type, content_type, buffer, _, _ = receive_file_submission(request)
if len(buffer) == 0:
raise Exception("No content was received. Please check the original file exists.")
output = convert_generator_to_native(generator_type, content_type, buffer)
# Return the conversion
r = build_json_response(output, 200)
serialize_isession_and_close_db_session(isess)
return r
@app.route(nis_api_base + '/isession/generator.to_dc.xml', methods=['POST'])
def convert_generator_to_dublin_core():
"""
Send the file to the service
Convert to native
Return the Dublin Core XML record
:return:
"""
# Check Interactive Session is Open. If not, open it
isess = deserialize_isession_and_prepare_db_session(False)
if not isess:
isess = InteractiveSession(DBSession)
testing = is_testing_enabled()
if testing:
result = isess.identify({"user": "test_user", "password": None}, testing=True)
# Receive file
generator_type, content_type, buffer, _, _ = receive_file_submission(request)
if len(buffer) == 0:
raise Exception("No content was received. Please check the original file exists.")
output = convert_generator_to_native(generator_type, content_type, buffer)
xml = None
for c in output:
if "command" in c and c["command"] == "metadata" and "content" in c:
xml = generate_dublin_core_xml(c["content"])
break
# Return the conversion
if xml:
r = Response(xml,
mimetype="text/xml",
status=200)
else:
r = build_json_response({"message": "Could not elaborate Dublin Core XML record from the input generator"}, 401)
serialize_isession_and_close_db_session(isess)
return r
# -- Reproducible Sessions --
@app.route(nis_api_base + "/isession/rsession", methods=["POST"])
def reproducible_session_open():
def read_parameters(dd):
nonlocal uuid2, read_uuid_state, create_new, allow_saving
# Read query parameters
uuid2 = dd.get("uuid")
if "read_version_state" in dd:
read_uuid_state = dd["read_version_state"]
read_uuid_state = str2bool(read_uuid_state)
if "create_new" in dd:
create_new = str(dd["create_new"])
if create_new.lower() in ["1", "case_study", "casestudy"]:
create_new = CreateNew.CASE_STUDY
elif create_new.lower() in ["2", "version", "case_study_version"]:
create_new = CreateNew.VERSION
else:
create_new = CreateNew.NO
if "allow_saving" in dd:
allow_saving = dd["allow_saving"]
allow_saving = allow_saving.lower() == "true"
# Recover InteractiveSession
isess = deserialize_isession_and_prepare_db_session()
if isess and isinstance(isess, Response):
return isess
# Check identity
identity = isess.get_identity_id()
# Retrieve parameters
uuid2 = None
read_uuid_state = None
create_new = None
allow_saving = None
# First, read uploaded JSON
if len(request.files) > 0:
for k in request.files:
buffer = bytes(request.files[k].stream.getbuffer())
content_type = request.files[k].content_type
break
else:
buffer = bytes(io.BytesIO(request.get_data()).getbuffer())
if "Content-Type" in request.headers:
content_type = request.headers["Content-Type"]
if buffer:
read_parameters(json.loads(buffer))
if not uuid2 and not read_uuid_state and not create_new and not allow_saving:
read_parameters(request.form)
if not uuid2 and not read_uuid_state and not create_new and not allow_saving:
read_parameters(request.args)
if read_uuid_state is None:
read_uuid_state = True
if create_new is None:
create_new = CreateNew.NO
if allow_saving is None:
allow_saving = True
# Persistent object to open: None (new case study), UUID (case study version)
if isess.reproducible_session_opened():
r = build_json_response({"error": "There is an open Reproducible Session. Close it first."}, 401)
else:
if allow_saving and not identity:
r = build_json_response({"error": "When 'allow_saving==true' an identity is required."}, 401)
else:
try:
# TODO New, not checked
isess.reset_state()
isess.open_reproducible_session(case_study_version_uuid=uuid2,
recover_previous_state=read_uuid_state,
cr_new=create_new,
allow_saving=allow_saving
)
r = build_json_response({}, 204)
except Exception as e:
s = "Exception trying to open reproducible session: "+str(e)
logger.error(s)
r = build_json_response({"error": s}, 401)
#
serialize_isession_and_close_db_session(isess)
return r
@app.route(nis_api_base + "/isession/rsession", methods=["DELETE"])
def reproducible_session_save_close(): # Close the ReproducibleSession, with the option of saving it
# Recover InteractiveSession
isess = deserialize_isession_and_prepare_db_session()
if isess and isinstance(isess, Response):
return isess
# A reproducible session must be open, signal it if not,
save = request.args.get("save_before_close", "False")
if save:
save = str2bool(save)
else:
save = False
# The reproducible session, if saved, could be assigned to an existing case study
cs_uuid = request.args.get("cs_uuid", None)
if cs_uuid:
cs_uuid = str(cs_uuid)
# If specified, it is the name for the case study Version
cs_name = request.args.get("cs_name", None)
if cs_name:
cs_name = str(cs_name)
# Close reproducible session
if not isess.reproducible_session_opened():
r = build_json_response({"error": "There is no open Reproducible Session. Cannot close"}, 401)
else:
try:
uuid_, v_uuid, cs_uuid = isess.close_reproducible_session(issues=None,
output=None,
save=save,
from_web_service=True,
cs_uuid=cs_uuid,
cs_name=cs_name)
r = build_json_response({"session_uuid": str(uuid_),
"version_uuid": str(v_uuid),
"case_study_uuid": str(cs_uuid)
},
200)
except Exception as e:
s = "Exception trying to close reproducible session: " + str(e)
logger.error(s)
r = build_json_response({"error": s}, 401)
serialize_isession_and_close_db_session(isess)
return r
@app.route(nis_api_base + "/isession/rsession", methods=["GET"])
def reproducible_session_get_status(): # Return current status of ReproducibleSession
# Recover InteractiveSession
isess = deserialize_isession_and_prepare_db_session()
if isess and isinstance(isess, Response):
return isess
# A reproducible session must be open, signal about it if not
if isess.reproducible_session_opened():
r = build_json_response("rsession_open", 200)
else:
r = build_json_response("rsession_closed", 200)
return r
@app.route(nis_api_base + "/isession/rsession/command_generators/<order>", methods=["GET"])
def reproducible_session_get_command_generator(order): # Return one of the command generators
# Recover InteractiveSession
isess = deserialize_isession_and_prepare_db_session()
if isess and isinstance(isess, Response):
return isess
order = int(order)
# A reproducible session must be open, signal about it if not
if isess.reproducible_session_opened():
if order < len(isess.reproducible_session.ws_commands):
c = isess.reproducible_session.ws_commands[order]
r = Response(c.content, mimetype=c.content_type)
else:
r = build_json_response({"error":
"Command number " + str(order) +
" requested, only "+str(len(isess.reproducible_session.commands))+" available."})
else:
r = build_json_response("No open reproducible Session", 200)
return r
# ----------------------------------------------------------------------------------------------------------------------
# State management: save, list, get, delete ("update"" is "save", overwrite always)
# ----------------------------------------------------------------------------------------------------------------------
@app.route(nis_api_base + "/isession/rsession/state", methods=["PUT"])
def reproducible_session_save_state(): # Save state
"""
Save or overwrite state in-memory to a file at the backend side
Receives a "code" Query parameter with the name for the saved state file (which must be unique, unless an overwrite
is wanted)
:return: Empty if everything is ok, Error if there is an issue
"""
def ensure_dir(file_path):
directory = os.path.dirname(file_path)
if not os.path.exists(directory):
os.makedirs(directory)
# Recover InteractiveSession
isess = deserialize_isession_and_prepare_db_session()
if isess and isinstance(isess, Response):
return isess
# A reproducible session must be open, signal about it if not
if isess.reproducible_session_opened():
if isess.state:
code = request.args.get("code", None)
try:
string_to_ast(simple_ident, code)
except:
code = None
if code is None:
r = build_json_response({"error": "Query parameter 'code' is mandatory"}, 401)
else:
cs_path = nexinfosys.get_global_configuration_variable("CASE_STUDIES_DIR")
ensure_dir(cs_path)
# Save state
s = serialize_state(isess.state)
with open(cs_path+os.sep+code+".state_serialized", "wt") as f:
f.write(s)
r = build_json_response({}, 204)
else:
r = build_json_response({}, 204)
else:
r = build_json_response({"error": "Cannot save state, no open reproducible session"}, 401)
return r
@app.route(nis_api_base + "/isession/rsession/state", methods=["DELETE"])
def reproducible_session_delete_state(): # Delete state
"""
Delete a saved state
Receives a "code" Query parameter with the name for the saved state file to delete
:return: Empty if everything is ok, Error if there is an issue
"""
# Recover InteractiveSession
isess = deserialize_isession_and_prepare_db_session()
if isess and isinstance(isess, Response):
return isess
# A reproducible session must be open, signal about it if not
if isess.reproducible_session_opened():
if isess.state:
code = request.args.get("code", None)
try:
string_to_ast(simple_ident, code)
except:
code = None
if code is None:
r = build_json_response({"error": "Query parameter 'code' is mandatory"}, 401)
else:
cs_path = nexinfosys.get_global_configuration_variable("CASE_STUDIES_DIR")
fname = cs_path+os.sep+code+".state_serialized"
if os.path.exists(fname):
os.remove(fname)
r = build_json_response({}, 204)
else:
r = build_json_response({"error": f"A state with code {code} did not exist"}, 401)
else:
r = build_json_response({}, 204)
else:
r = build_json_response({"error": "Cannot delete state, no open reproducible session"}, 401)
return r
@app.route(nis_api_base + "/isession/rsession/state/", methods=["GET"])
def reproducible_session_list_states(): # List available states
"""
List codes of all previously saved states
:return: A JSON with a single entry "codes", with a list of the codes to address the saved states. Error if there is an issue
"""
# Recover InteractiveSession
isess = deserialize_isession_and_prepare_db_session()
if isess and isinstance(isess, Response):
return isess
# A reproducible session must be open, signal about it if not
if isess.reproducible_session_opened():
cs_path = nexinfosys.get_global_configuration_variable("CASE_STUDIES_DIR")
lst = [f for f in os.listdir(cs_path) if os.path.isfile(f"{cs_path}{os.sep}{f}")]
r = build_json_response({"codes": lst}, 204)
else:
r = build_json_response({"error": "Cannot return the list of states, no open reproducible session"}, 401)
return r
@app.route(nis_api_base + "/isession/rsession/state", methods=["GET"])
def reproducible_session_load_state():
"""
Loads a previously saved state in the reproducible session. After this call, output datasets can be retrieved or
new parameters for the dynamic scenario submitted.
A "code" Query parameter must be passed with a code for the saved state.
:return: Empty if everything is ok (the state is on the backend side). Error if there is an issue
"""
# Recover InteractiveSession
isess = deserialize_isession_and_prepare_db_session()
if isess and isinstance(isess, Response):
return isess
# A reproducible session must be open, signal about it if not
if isess.reproducible_session_opened():
cs_path = nexinfosys.get_global_configuration_variable("CASE_STUDIES_DIR")
code = request.args.get("code", None)
try:
string_to_ast(simple_ident, code)
except:
code = None
if code is None:
r = build_json_response({"error": "Query parameter 'code' is mandatory"}, 401)
else:
fname = cs_path + os.sep + code + ".state_serialized"
with open(fname, "rt") as f:
s = f.read()
isess.state = deserialize_state(s)
r = build_json_response({}, 204)
else:
r = build_json_response({"error": "Cannot load state, no open reproducible session"}, 401)
return r
# ----------------------------------------------------------------------------------------------------------------------
@app.route(nis_api_base + "/isession/rsession/state.pickled", methods=["GET"])
def reproducible_session_get_state(): # Return current status of ReproducibleSession
# Recover InteractiveSession
isess = deserialize_isession_and_prepare_db_session()
if isess and isinstance(isess, Response):
return isess
# A reproducible session must be open, signal about it if not
if isess.reproducible_session_opened():
if isess.state:
r = build_json_response(jsonpickle.encode(isess.state), 200)
else:
r = build_json_response({}, 204)
else:
r = build_json_response({"error": "Cannot return state, no open reproducible session"}, 401)
return r
@app.route(nis_api_base + "/isession/rsession/state_query", methods=["GET"])
def reproducible_session_query_state(): # Query aspects of State
# Recover InteractiveSession
isess = deserialize_isession_and_prepare_db_session()
if isess and isinstance(isess, Response):
return isess
# A reproducible session must be open, signal about it if not
if isess.reproducible_session_opened():
if isess.state:
# TODO Parse query, execute it, return results
# TODO By concept: Datasets, processors, factors, factor types, hierarchies, mappings, ISSUES (extra MuSIASEM, errors in some level: syntax, semantics, solving)
# TODO Information: name, quantitites (attached to factors), relations, hierarchy (for hierarchies)
# TODO By observer
pass
else:
r = build_json_response({}, 204)
else:
r = build_json_response({"error": "Cannot return state, no open reproducible session"}, 401)
return r
@app.route(nis_api_base + "/isession/rsession/state_query/issues", methods=["GET"])
def reproducible_session_query_state_list_issues(): # Query list of issues IN the current state
# Recover InteractiveSession
isess = deserialize_isession_and_prepare_db_session()
if isess and isinstance(isess, Response):
return isess
# A reproducible session must be open, signal about it if not
if isess.reproducible_session_opened():
if isess.state:
issues = isess.state.get("_issues")
if not issues:
issues = []
r = build_json_response({"issues": issues}, 200)
else:
r = build_json_response([], 204)
else:
r = build_json_response({"error": "Cannot return state, no open reproducible session"}, 401)
return r
@app.route(nis_api_base + "/isession/rsession/state_query/everything_executed", methods=["GET"])
def reproducible_session_query_state_everything_executed(): # Query if all commands have been executed
# Recover InteractiveSession
isess = deserialize_isession_and_prepare_db_session()
if isess and isinstance(isess, Response):
return isess
# A reproducible session must be open, signal about it if not
if isess.reproducible_session_opened():
first_i = len(isess.reproducible_session.ws_commands)
for i in range(len(isess.reproducible_session.ws_commands) - 1, -1, -1):
c = isess.reproducible_session.ws_commands[i]
if not c.execution_start:
first_i = i
r = build_json_response({"everything_executed": first_i == len(isess.reproducible_session.ws_commands)}, 200)
else:
r = build_json_response({"error": "Cannot return state, no open reproducible session"}, 401)
return r
@app.route(nis_api_base + "/isession/rsession/state_query/outputs", methods=["GET"])
@app.route(nis_api_base + "/isession/rsession/state_query/datasets", methods=["GET"])
def reproducible_session_query_state_list_results(): # Query list of outputs (not only datasets) IN the current state
# Recover InteractiveSession
isess = deserialize_isession_and_prepare_db_session()
if isess and isinstance(isess, Response):
return isess
# A reproducible session must be open, signal about it if not
if isess.reproducible_session_opened():
if isess.state:
r = build_json_response(get_results_in_session(isess), 200)
else:
r = build_json_response([], 204)
else:
r = build_json_response({"error": "Cannot return list of results, no reproducible session open"}, 401)
printNProcessors("LIST OF OUTPUTS", isess.state)
return r
@app.route(nis_api_base + "/isession/rsession/state_query/webdav", methods=["PUT"])
def copy_resource_to_webdav():
"""
Read a resource and put the result into WebDAV server
PROBABLY REQUIRES MULTIPLE WORKERS because datasets are obtained via a recursive "RESTful call"
:return:
"""
# Recover InteractiveSession
isess = deserialize_isession_and_prepare_db_session()
if isess and isinstance(isess, Response):
return isess
tmp = request.get_json()
source_url = tmp["sourceURL"]
target_url = tmp["targetURL"]
from urllib.parse import urlparse
pr = urlparse(target_url)
# Check host
wv_host_name = nexinfosys.get_global_configuration_variable("FS_SERVER") \
if nexinfosys.get_global_configuration_variable("FS_SERVER") else "nextcloud.data.magic-nexus.eu"
if wv_host_name.lower() != pr.netloc:
return build_json_response({"error": f"Cannot save the file in the requested server location, {pr.netloc}, which is different from the configured one, {wv_host_name}"}, 401)
# Modify URL
target_url = f"{pr.scheme}://{pr.netloc}{os.path.split(pr.path)[0]}"
pr = urlparse(source_url)
target_url += f"/{os.path.split(pr.path)[1]}"
# READ (reentrant)
self_schema = nexinfosys.get_global_configuration_variable("SELF_SCHEMA") \
if nexinfosys.get_global_configuration_variable("SELF_SCHEMA") else request.host_url
import requests
requested_resource = f"{self_schema}{source_url[1:]}"
logging.debug(f"REENTRANT REQUEST: {requested_resource}")
r = requests.get(requested_resource, cookies=request.cookies, verify=False)
# WRITE
wv_upload_file(io.BytesIO(r.content), target_url)
logging.debug(f"REQUESTED RESOURCE UPLOADED TO NEXTCLOUD at {target_url}")
return build_json_response([], 204)
# -- DYNAMIC PARAMETERS --
@app.route(nis_api_base + "/isession/rsession/state_query/parameters", methods=["GET"])
def get_parameter_definitions():
"""
Obtain a JSON enumerating the definition of all the parameters for the case study
:param format:
:return:
"""
# Recover InteractiveSession
isess = deserialize_isession_and_prepare_db_session()
if isess and isinstance(isess, Response):
return isess
res = get_parameters_in_state(isess.state)
return build_json_response(res, 200)
@app.route(nis_api_base + "/isession/rsession/state_query/parameters", methods=["PUT"])
def set_parameters_and_solve():
"""
Create an "interactive" scenario, composed by a dictionary of parameter values,
passed through a JSON in the request, and SOLVE this single scenario.
As results, create a supermatrix containing only this scenario, and the MatrixIndicators
:return:
"""
isess = deserialize_isession_and_prepare_db_session()
if isess and isinstance(isess, Response):
return isess
parameters = request.get_json()
issues2 = prepare_and_solve_model(isess.state, parameters)
# Return "issues2", issues found during the solving
isess.state.set("_issues", issues2)
# Return outputs (could be a list of binary files)
r = build_json_response({"issues": convert_issues(issues2), "outputs": None}, 200)
# Must serialize in order to later recover the datasets
serialize_isession_and_close_db_session(isess)
return r
@app.route(nis_api_base + "/isession/rsession/state_query/scenarios", methods=["GET"])
def get_scenarios():
"""
Return a list scenarios and values for parameters in each of them
:return:
"""
# Recover InteractiveSession
isess = deserialize_isession_and_prepare_db_session()
if isess and isinstance(isess, Response):
return isess
scenarios = get_scenarios_in_state(isess.state)
return build_json_response(scenarios, 200)
@app.route(nis_api_base + "/isession/rsession/state_query/geolayer.<format>", methods=["GET"])
def get_geolayer_service(format):
isess = deserialize_isession_and_prepare_db_session()
if isess and isinstance(isess, Response):
return isess
content, content_type, ok = get_geolayer(isess.state, format)
return Response(content, mimetype=content_type, status=200 if ok else 401)
@app.route(nis_api_base + "/isession/rsession/state_query/ontology.<format>", methods=["GET"])
def get_ontology_service(format):
# TODO OWLREADY2 installation on the Docker image issues a problem
# Recover InteractiveSession
isess = deserialize_isession_and_prepare_db_session()
if isess and isinstance(isess, Response):
return isess
content, content_type, ok = get_ontology(isess.state, format)
return Response(content, mimetype=content_type, status=200 if ok else 401)
@app.route(nis_api_base + "/isession/rsession/state_query/python_script.<format>", methods=["GET"])
def get_python_script(format):
"""
script capaz de reproducir lo ejecutado
* login
* open
* load_workbook
* load_workbook desde Nextcloud, sin credenciales
* mostrar cómo obtener cada uno de los datasets, comentado (llamar a "query_state_list_results(isess)")
* mostrar cómo utilizar cada uno de los datasets, comentado también
* Jupyter sólo: script capaz de relanzar, selección de parámetros, reejecución, recogida de datasets (igual)
:param format:
:return:
"""
# Recover InteractiveSession
isess = deserialize_isession_and_prepare_db_session()
if isess and isinstance(isess, Response):
return isess
output = None
# Generate graph from State
if isess.state:
if format == "python":
# TODO Prepare Python file
output = io.StringIO()
mimetype = "application/x-python-code" # or text/x-python
elif format == "jupyternotebook":
output = generate_jupyter_notebook_python(isess.state)
mimetype = "application/x-ipynb+json" # TODO
if output:
return Response(output, mimetype=mimetype, status=200)
else:
return build_json_response({"error": F"Cannot return Python script, format '{format}' not recognized"}, 401)
@app.route(nis_api_base + "/isession/rsession/state_query/r_script.<format>", methods=["GET"])
def get_r_script(format):
# Recover InteractiveSession
isess = deserialize_isession_and_prepare_db_session()
if isess and isinstance(isess, Response):
return isess
output = None
# Generate graph from State
if isess.state:
if format == "r":
# TODO Prepare R file
output = io.StringIO()
mimetype = "application/r-system" # TODO
elif format == "jupyternotebook":
output = generate_jupyter_notebook_r(isess.state)
mimetype = "application/x-ipynb+json" # TODO
if output:
return Response(output, mimetype=mimetype, status=200)
else:
return build_json_response({"error": F"Cannot return R script, format '{format}' not recognized"}, 401)
@app.route(nis_api_base + "/isession/rsession/state_query/commands_reference_document.<format>", methods=["GET"])
def get_commands_reference_document(format):
if format=="html":
mimetype = "text/html"
return Response(obtain_commands_help(format.lower()), mimetype)
@app.route(nis_api_base + "/isession/rsession/state_query/model.<format>", methods=["GET"])
def get_model_service(format):
isess = deserialize_isession_and_prepare_db_session()
if isess and isinstance(isess, Response):
return isess
content, content_type, ok = get_model(isess.state, format)
return Response(content, mimetype=content_type, status=200 if ok else 401)
@app.route(nis_api_base + '/isession/rsession/state_query/flow_graph.<format>', methods=["GET"])
def obtain_flow_graph(format):
# Recover InteractiveSession
isess = deserialize_isession_and_prepare_db_session()
if isess and isinstance(isess, Response):
return isess
output = None
# Generate graph from State
if isess.state:
output, mimetype, ok = get_graph_from_state(isess.state, f"interfaces_graph.{format}")
if output:
r = Response(output, mimetype=mimetype, status=200)
else:
r = build_json_response({}, 200)
return r
@app.route(nis_api_base + '/isession/rsession/state_query/processors_graph.<format>', methods=["GET"])
@app.route(nis_api_base + '/isession/rsession/query/processors_graph.<format>', methods=["GET"])
def obtain_processors_graph_visjs_format(format):
# Recover InteractiveSession
isess = deserialize_isession_and_prepare_db_session()
if isess and isinstance(isess, Response):
return isess
# Generate graph from State
output = None
if isess.state:
output, mimetype, ok = get_graph_from_state(isess.state, f"processors_graph.{format}")
if output:
r = Response(output, mimetype=mimetype, status=200)
else:
r = build_json_response({}, 200)
return r
@app.route(nis_api_base + '/isession/rsession/state_query/sankey_graph.json', methods=["GET"])
@app.route(nis_api_base + '/isession/rsession/query/sankey_graph.json', methods=["GET"])
def obtain_sankey_graph():
# Recover InteractiveSession
isess = deserialize_isession_and_prepare_db_session()
if isess and isinstance(isess, Response):
return isess
# Generate sanskey dictionary ready for plotly from State
if isess.state:
_, _, _, datasets, _ = get_case_study_registry_objects(isess.state)
if datasets["flow_graph_matrix"]:
df = datasets.get("flow_graph_matrix").data
sankey = {}
for p in list(set(df['Period'])):
df_period = df[df['Period'] == p]
tmp = {}
for s in list(set(df_period['Scenario'])):
ds_scenario = df_period[df_period['Scenario'] == s]
processors = list(set(ds_scenario['source_processor'].append(ds_scenario['target_processor'])))
source = [processors.index(i) for i in list(ds_scenario['source_processor'])]
target = [processors.index(i) for i in list(ds_scenario['target_processor'])]
label = list(ds_scenario['source'] + ' to ' + ds_scenario['target'])
data = dict(
type='sankey',
node=dict(
pad=50,
thickness=100,
line=dict(
color="black",
width=0.5
),
label=processors,
),
link=dict(
source=source,
target=target,
value=list(ds_scenario['Value']),
label=label
))
tmp[s] = data
sankey[p] = tmp
r = build_json_response(sankey, 200)
else:
r = build_json_response({}, 200)
else:
r = build_json_response({}, 200)
return r
@gzipped
@app.route(nis_api_base + "/isession/rsession/state_query/datasets/<name>.<format>", methods=["GET"])
def reproducible_session_query_state_get_dataset(name, format): # Query list of datasets IN the current state
# Recover InteractiveSession
isess = deserialize_isession_and_prepare_db_session()
if isess and isinstance(isess, Response):
return isess
# A reproducible session must be open, signal about it if not
if isess.reproducible_session_opened():
if isess.state:
labels_enabled = request.args.get("labels", "True") == "True"
content, content_type, ok = get_dataset_from_state(isess.state, name, format, labels_enabled)
r = Response(content, mimetype=content_type, status=200 if ok else 401)
else:
r = build_json_response([], 204)
else:
r = build_json_response({"error": "Cannot return state, no open reproducible session"}, 401)
return r
# ----------------------------------------------------------------------------------------------------------------------
@app.route(nis_api_base + "/isession/rsession/command", methods=["POST"])
def reproducible_session_append_single_command(): # Receive a JSON or CSV command from some externally executed generator
# Recover InteractiveSession
isess = deserialize_isession_and_prepare_db_session()
if isess and isinstance(isess, Response):
return isess
# A reproducible session must be open
if isess.reproducible_session_opened():
# Read content type header AND infer "generator_type"
content_type = request.headers["Content-Type"]
if content_type.lower() in ["application/json", "text/json", "text/csv"]:
generator_type = "native"
# Read binary content
if len(request.files) > 0:
for k in request.files:
buffer = bytes(request.files[k].stream.getbuffer())
break
else:
buffer = bytes(io.BytesIO(request.get_data()).getbuffer())
# Read Execute and Register parameters
execute = request.args.get("execute", "True")
if execute:
execute = str2bool(execute)
register = request.args.get("register", "True")
if register:
register = str2bool(register)
if isinstance(buffer, bytes):
d = buffer.decode("utf-8")
else:
d = buffer
d = json.loads(d)
if isinstance(d, dict) and "command" in d and "content" in d:
if "label" in d:
n = d["label"]
else:
n = None
cmd, syntax_issues = create_command(d["command"], n, d["content"])
if register:
isess.register_executable_command(cmd)
if execute:
issues, output = isess.execute_executable_command(cmd)
# TODO Process "ret". Add issues to an issues list. Add output to an outputs list.
r = build_json_response({}, 204)
serialize_isession_and_close_db_session(isess)
else:
r = build_json_response({"error": "A reproducible session must be open in order to submit a command"}, 400)
return r
def receive_file_submission(req):
"""
Receive file submitted using multipart/form-data
Return variables for the processing of the file as a command_executors generator
:param req: The "request" object
:return: A tuple (generator_type -str-, content_type -str-, buffer -bytes-, execute -bool-, register -bool-)
"""
def parse_data_url(url):
scheme, data = url.split(":", 1)
assert scheme == "data", "unsupported scheme: " + scheme
mediatype, data = data.split(",", 1)
# base64 urls might have a padding which might (should) be quoted:
data = urllib.parse.unquote_to_bytes(data)
if mediatype.endswith(";base64"):
return binascii.a2b_base64(data), mediatype[:-7] or None
else:
return data, mediatype or None
# Read binary content
if len(req.files) > 0:
for k in req.files:
buffer = bytes(req.files[k].stream.getbuffer())
content_type = req.files[k].content_type
it_is_url = False
break
else:
buffer = bytes(io.BytesIO(req.get_data()).getbuffer())
content_type = req.content_type
it_is_url = buffer.startswith(b"data") or buffer.startswith(b"http")
if it_is_url:
url = buffer.decode("utf-8")
if not url.startswith("data"):
# Try a download from the URL
# Check if it is a Google Drive file, a Nextcloud file or a freely downloadable file
data = download_file(url)
buffer = data.getvalue()
content_type = "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet"
else:
# It may be a DATA URL
buffer, content_type = parse_data_url(url)
# except:
# content_type = req.headers["Content-Type"]
# Infer "generator_type" from content type
if content_type.lower() in ["application/json", "text/csv"]:
generator_type = "primitive"
elif content_type.lower() in ["application/excel",
"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet"]:
generator_type = "spreadsheet"
elif content_type.lower() in ["text/x-r-source"]:
generator_type = "R-script"
elif content_type.lower() in ["text/x-python", "text/x-python3", "application/x-python3"]:
generator_type = "python-script"
# # Write to file
# with open("/home/rnebot/output_file.xlsx", "wb") as f:
# f.write(buffer)
# Read Register and Execute parameters
register = req.form.get("register")
if not register:
register = req.args.get("register")
if not register:
register = False
execute = req.form.get("execute")
if not execute:
execute = req.args.get("execute")
if not execute:
execute = False
execute = str2bool(execute)
register = str2bool(register)
return generator_type, content_type, buffer, execute, register
def reset_state_and_reproducible_session(isess: InteractiveSession):
"""
Simple approach: reset state on every submission
:param isess:
:return:
"""
isess.reset_state()
isess.close_reproducible_session(issues=None, output=None, save=False, from_web_service=False, cs_uuid=None, cs_name=None)
isess.open_reproducible_session(case_study_version_uuid=None,
recover_previous_state=False,
cr_new=True,
allow_saving=True
)
# #################################################################################################################### #
# MAIN POINT OF EXECUTION BY THE GENERIC CLIENT ("ANGULAR FRONTEND") #
# #################################################################################################################### #
def convert_issues(iss_lst):
"""
Convert issues generated by the backend into a list of dictionaries as expected by the frontend
:param iss_lst: Issues list
:return: Issue list in frontend compatible format
"""
out = []
for i in iss_lst:
location = dict(sheet_name="", row=None, col=None)
i_type = "Error" if i.itype.value == 3 else ("Warning" if i.itype.value == 2 else "Info")
if isinstance(i, Issue):
if i.location is not None:
location = dict(sheet_name=i.location.sheet_name, row=str(i.location.row), col=str(i.location.column))
out.append(dict(**location, message=i_type + ": " +i.description, type=i.itype.value))
else:
out.append(dict(**location, message="Issue type unknown", type=3))
return out
@app.route(nis_api_base + "/isession/rsession/generator", methods=["POST"])
def reproducible_session_append_command_generator(): # Receive a command_executors generator, like a Spreadsheet file, an R script, or a full JSON command_executors list (or other)
import time
logging.debug("### SUBMISSION STARTS ###")
start = time.time()
# Recover InteractiveSession
isess = deserialize_isession_and_prepare_db_session()
if isess and isinstance(isess, Response):
return isess
# A reproducible session must be open
if isess.reproducible_session_opened():
# Reset!!
# TODO Maybe do this only when some parameter is True
reset_state_and_reproducible_session(isess)
# Add system-level entities from JSON definition in "default_cmds"
ret = isess.register_andor_execute_command_generator("json", "application/json", nexinfosys.default_cmds, False, True)
# Check that objects have been properly registered
# glb_idx, p_sets, hh, datasets, mappings = get_case_study_registry_objects(isess._state)
# ps = glb_idx.get(Parameter.partial_key())
# hs = glb_idx.get(Hierarchy.partial_key())
# PARSE AND BUILD!!!
generator_type, content_type, buffer, execute, register = receive_file_submission(request)
try:
ret = isess.register_andor_execute_command_generator(generator_type, content_type, buffer, register, execute)
if isinstance(ret, tuple):
issues = ret[0]
else:
issues = []
# TODO CHECK SEMANTIC INCONSISTENCIES. Referred to values in Interfaces use either Parameters
# SOLVE !!!!
if not any_error_issue(issues):
issues2 = prepare_and_solve_model(isess.state)
issues.extend(issues2)
except Exception as e:
traceback.print_exc() # Print the Exception to std output
# Obtain trace as string; split lines in string; take the last three lines
tmp = traceback.format_exc().splitlines()
for i in range(len(tmp)-3, 0, -2):
if tmp[i].find("nexinfosys") != -1:
tmp = [tmp[-1], tmp[i], tmp[i+1]]
break
else:
tmp = [tmp[-1], "Nexinfosys module not found", "Line not found"]
exc_info = ' :: '.join([s.strip() for s in tmp])
# Error Issue with the extracted Exception text
issues = [Issue(itype=IType.ERROR,
description=f"UNCONTROLLED CONDITION: {exc_info}. Please, contact the development team.",
location=None)]
# STORE the issues in the state
# TODO If issues are produced by different generators, this will overwrite results from the previous generator
isess.state.set("_issues", issues)
# Return the issues if there were any.
# TODO Return outputs (could be a list of binary files)
r = build_json_response({"issues": convert_issues(issues), "outputs": None}, 200)
# TODO Important!!! The R script generator can be executed remotely and locally. In the first case, it
# TODO could be desired to store commands. But the library, when executed at the server, will be passed a flag
# TODO to perform every call with the registering disabled.
printNProcessors("SUBMISSION", isess.state)
serialize_isession_and_close_db_session(isess)
else:
r = build_json_response({"error": "A reproducible session must be open in order to submit a generator"}, 400)
endt = time.time()
logging.debug(F"### SUBMISSION FINISHED: {endt-start} ###")
return r
@app.route(nis_api_base + "/isession/rsession/ensure_executed", methods=["PUT"])
def reproducible_session_execute_not_executed_command_generators(): # Executes commands pending execution
# Recover InteractiveSession
isess = deserialize_isession_and_prepare_db_session()
if isess and isinstance(isess, Response):
return isess
# A reproducible session must be open
if isess.reproducible_session_opened():
# TODO From last to first generator, find the first one NOT executed
# TODO Execute them, one after the other
# TODO If the case study is persisted, Store it again
first_i = len(isess.reproducible_session.ws_commands)
for i in range(len(isess.reproducible_session.ws_commands)-1, -1, -1):
c = isess.reproducible_session.ws_commands[i]
if not c.execution_start:
first_i = i
if first_i < len(isess.reproducible_session.ws_commands):
# Execute!
persist_version_state = None
executed_cmds = []
for i in range(first_i, len(isess.reproducible_session.ws_commands)):
c = isess.reproducible_session.ws_commands[i]
if persist_version_state is None:
persist_version_state = c.id is not None # Persist if the command is already persisted
# The state is modified
try:
ret = isess.register_andor_execute_command_generator1(c, register=False, execute=True)
executed_cmds.append(c)
except:
ret = ([("error", "Command execution did not end due to an Exception")])
if isinstance(ret, tuple):
issues = ret[0]
else:
issues = []
# STORE the issues in the state
# TODO If issues are produced by different generators, this will overwrite results from the previous generator
isess.state.set("_issues", issues)
if persist_version_state: # TODO Does this work as expected?
isess.reproducible_session.update_current_version_state(executed_cmds)
# Return the issues if there were any.
# TODO Return outputs (could be a list of binary files)
r = build_json_response({"issues": issues, "outputs": None, "everything_executed": False}, 200)
else:
r = build_json_response({"everything_executed": True}, 200)
# TODO Important!!! The R script generator can be executed remotely and locally. In the first case, it
# TODO could be desired to store commands. But the library, when executed at the server, will be passed a flag
# TODO to perform every call with the registering disabled.
serialize_isession_and_close_db_session(isess)
else:
r = build_json_response({"error": "A reproducible session must be open in order to execute generators"}, 400)
return r
# -- Reproducible Session Query --
# - INSTEAD OF COMMANDS, DIRECT EXECUTION (NOT REGISTERED)
# -----
@app.route(nis_api_base + '/nis_files.json', methods=['GET'])
def list_of_registered_nis_files():
"""
Return a list of either importable or example NIS files
:return:
"""
# Elaborate a list of dictionaries
files = []
if "NIS_FILES_LIST" in app.config:
example_files = app.config["NIS_FILES_LIST"].split(",")
for url in example_files:
try:
tmp = download_file(url).getvalue().decode("UTF-8")
df = pd.read_csv(io.StringIO(tmp), skipinitialspace=True, quoting=csv.QUOTE_ALL)
df.columns = [c.strip().lower() for c in df.columns]
for t in df.iterrows():
desc = ""
files.append(dict(name=t[1]["name"], url=t[1]["url"], example=t[1]["example"], description=desc))
except Exception as e:
traceback.print_exc() # Print the Exception to std output
if len(files) == 0:
files = [
dict(name="MuSIASEM hierarchies", url="https://nextcloud.data.magic-nexus.eu/remote.php/webdav/NIS_internal/WP4/D4.3%20global%20food%20supply%20and%20diets/README.md", example=False, description=""),
dict(name="Water grammar", url="https://nextcloud.data.magic-nexus.eu/remote.php/webdav/NIS_internal/WP4/D4.3%20global%20food%20supply%20and%20diets/README.md", example=False, description=""),
dict(name="Energy grammar", url="https://nextcloud.data.magic-nexus.eu/remote.php/webdav/NIS_internal/WP4/D4.3%20global%20food%20supply%20and%20diets/README.md", example=False, description=""),
dict(name="Food grammar", url="https://nextcloud.data.magic-nexus.eu/remote.php/webdav/NIS_internal/WP4/D4.3%20global%20food%20supply%20and%20diets/README.md", example=False, description=""),
dict(name="Biofuel", url="https://nextcloud.data.magic-nexus.eu/remote.php/webdav/NIS_internal/WP6/CS6_2_Biofuels/README.md", example=True, description=""),
dict(name="Gran Canaria", url="https://nextcloud.data.magic-nexus.eu/remote.php/webdav/NIS_internal/WP6/CS6_6_Alternative_water_sources/README.md", example=True, description=""),
dict(name="Tenerife", url="https://nextcloud.data.magic-nexus.eu/remote.php/webdav/NIS_internal/WP6/CS6_6_Alternative_water_sources/README.md", example=True, description=""),
]
return build_json_response(files, 200)
# -- Case studies --
@app.route(nis_api_base + '/case_studies/', methods=['POST'])
def new_case_study_from_file():
"""
Check that the user is authorized to submit a new case study
Open a reproducible session
Send the file to the service
Close the reproducible session
:return:
"""
# Check Interactive Session is Open. If not, open it
isess = deserialize_isession_and_prepare_db_session(False)
if not isess:
isess = InteractiveSession(DBSession)
# TODO Check User Credentials (from Token)
testing = is_testing_enabled()
if testing:
result = isess.identify({"user": "test_user", "password": None}, testing=True)
# TODO Check User has Create New Case Study permissions
# Receive file
generator_type, content_type, buffer, execute, register = receive_file_submission(request)
# Open Reproducible Session, NEW case study
try:
isess.open_reproducible_session(case_study_version_uuid=None,
recover_previous_state=False,
cr_new=CreateNew.CASE_STUDY,
allow_saving=register
)
except Exception as e:
s = "Exception trying to open reproducible session: "+str(e)
logger.error(s)
return build_json_response({"error": s}, 401)
# Submit file to the Interactive Session (which has the open reproducible session)
issues, output = isess.register_andor_execute_command_generator(generator_type, content_type, buffer, register, execute)
# Close Reproducible Session
isess.close_reproducible_session(issues=issues, output=output, save=register, from_web_service=False)
# TODO Return the issues if there were any. Return outputs (could be a list of binary files)
r = build_json_response({}, 204)
serialize_isession_and_close_db_session(isess)
return r
@app.route(nis_api_base + "/case_studies/", methods=["GET"])
def case_studies(): # List case studies available for current user
"""
Example:
[
{"resource": "/case_studies/<case study uuid>",
"uuid": "<uuid>",
"name": "Food in the EU",
"oid": "zenodo.org/2098235",
"internal_code": "CS1_F_E",
"description": "...",
"stats":
{
"n_versions": "<# of versions>",
"n_commands": "<# of command_executors latest version>",
"n_hierarchies": <# of hierarchies latest version>",
}
"versions": "/case_studies/<uuid>/short.json"
"thumbnail": "/case_studies/<uuid>/thumbnail.svg|html|png"
},
...
]
:return:
"""
def get_avatar_path(cstudy):
"""
From the areas of a case study, obtain the file name representing these areas
:param cstudy: CaseStudy object
:return: String with the URL subpath to the file name
"""
areas = cstudy.areas
if areas:
name = ""
if "W" in areas:
name += "Water"
if "E" in areas:
name += "Energy"
if "F" in areas:
name += "Food"
return "/static/images/" + name + "Nexus.png"
else:
return "/static/images/NoNexusAreas.png" # TODO create this image
def get_version_dict(vs):
uuid3 = str(vs.uuid)
authors = ", ".join([ss.who.name for ss in vs.sessions if ss.who])
version = {"uuid": uuid3,
"cs_uuid": str(vs.case_study.uuid), # Redundant but helps in the user interface, to obtain the CS UUID
"authors": authors,
"creation_date": vs.creation_instant.isoformat(timespec="seconds") if vs.creation_instant else "<no date available>",
"cs_name": vs.name,
"name": vs.creation_instant.isoformat(sep=" ", timespec="seconds") + " [" +authors + "]",
"resource": nis_api_base + "/case_studies/"+uuid2+"/versions/"+uuid3,
"detail": nis_api_base + "/case_studies/"+uuid2+"/versions/"+uuid3,
"issues": None, # [{"type": "error", "description": "syntax error in command ..."}],
}
return version
# Recover InteractiveSession
isess = deserialize_isession_and_prepare_db_session()
if isess and isinstance(isess, Response):
return isess
user = isess.get_identity_id()
if not user:
user = "_anonymous"
# Recover case studies READABLE by current user (or "anonymous")
if not isess:
session = DBSession()
else:
session = isess.open_db_session()
# TODO Obtain case studies FILTERED by current user permissions. Show case studies with READ access enabled
# TODO Access Control
# TODO CS in acl and user in acl.detail and acl.detail is READ, WRITE,
# TODO CS in acl and group acl.detail and user in group
lst = session.query(CaseStudy).all()
lst2 = []
for cs in lst:
uuid2 = str(cs.uuid)
vs_lst = sorted([get_version_dict(v) for v in cs.versions], key=lambda v: v["creation_date"], reverse=True)
name = vs_lst[0]["cs_name"]
d = {"resource": nis_api_base + "/case_studies/"+uuid2,
"uuid": uuid2,
"name": name if name else "<empty>",
"oid": cs.oid if cs.oid else "<empty>", # TODO
"internal_code": cs.internal_code if cs.internal_code else "", # TODO
"description": cs.description if cs.description else "", # TODO
"stats": {
"n_versions": str(len(cs.versions)),
"n_commands": str(len([])), # TODO
"n_hierarchies": str(len([])), # TODO
},
"versions": vs_lst,
"thumbnail": nis_api_base + "/case_studies/" + uuid2 + "/default_view.png",
"thumbnail_png": nis_api_base + "/case_studies/" + uuid2 + "/default_view.png",
"thumbnail_svg": nis_api_base + "/case_studies/" + uuid2 + "/default_view.svg",
"avatar": nis_api_base + get_avatar_path(cs), # Icon representing the type of Nexus study
"case_study_permissions":
{
"read": True,
"annotate": True,
"contribute": True,
"share": False,
"delete": False
}
}
lst2.append(d)
# print(json.dumps(lst2, default=json_serial, sort_keys=True, indent=JSON_INDENT, ensure_ascii=ENSURE_ASCII, separators=(',', ': '))
# )
r = build_json_response(lst2) # TODO Improve it, it must return the number of versions. See document !!!
if isess:
isess.close_db_session()
else:
DBSession.remove()
return r
@app.route(nis_api_base + "/case_studies2/", methods=["GET"])
def case_studies2(): # List case studies
"""
Example:
[
{"resource": "/case_studies/<case study uuid>",
"uuid": "<uuid>",
"name": "Food in the EU",
"oid": "zenodo.org/2098235",
"internal_code": "CS1_F_E",
"description": "...",
"stats":
{
"n_versions": "<# of versions>",
"n_commands": "<# of command_executors latest version>",
"n_hierarchies": <# of hierarchies latest version>",
}
"versions": "/case_studies/<uuid>/short.json"
"thumbnail": "/case_studies/<uuid>/thumbnail.svg|html|png"
},
...
]
:return:
"""
def get_avatar_path(cstudy):
"""
From the areas of a case study, obtain the file name representing these areas
:param cstudy: CaseStudy object
:return: String with the URL subpath to the file name
"""
areas = cstudy.areas
if areas:
name = ""
if "W" in areas:
name += "Water"
if "E" in areas:
name += "Energy"
if "F" in areas:
name += "Food"
return "/static/images/" + name + "Nexus.png"
else:
return "/static/images/NoNexusAreas.png" # TODO create this image
# Recover InteractiveSession
isess = deserialize_isession_and_prepare_db_session()
if isess and isinstance(isess, Response):
return isess
user = isess.get_identity_id()
if not user:
user = "_anonymous"
# Recover case studies READABLE by current user (or "anonymous")
if not isess:
session = DBSession()
else:
session = isess.open_db_session()
# TODO Obtain case studies FILTERED by current user permissions. Show case studies with READ access enabled
# Access Control
# CS in acl and user in acl.detail and acl.detail is READ, WRITE,
# CS in acl and group acl.detail and user in group
base = app.config["APPLICATION_ROOT"]
lst = session.query(CaseStudy).all()
lst2 = []
for cs in lst:
uuid2 = str(cs.uuid)
d = {"resource": nis_api_base + "/case_studies/"+uuid2,
"uuid": uuid2,
"name": cs.name if cs.name else "<empty>",
"oid": cs.oid if cs.oid else "<empty>", # TODO
"internal_code": cs.internal_code if cs.internal_code else "", # TODO
"description": cs.description if cs.description else "", # TODO
"stats": {
"n_versions": str(len(cs.versions)),
"n_commands": str(len([])), # TODO
"n_hierarchies": str(len([])), # TODO
},
"versions": nis_api_base + "/case_studies/" + uuid2 + "/versions/",
"thumbnail": nis_api_base + "/case_studies/" + uuid2 + "/default_view.png",
"thumbnail_png": nis_api_base + "/case_studies/" + uuid2 + "/default_view.png",
"thumbnail_svg": nis_api_base + "/case_studies/" + uuid2 + "/default_view.svg",
"avatar": nis_api_base + get_avatar_path(cs), # Icon representing the type of Nexus study
"case_study_permissions":
{
"read": True,
"annotate": True,
"contribute": True,
"share": False,
"delete": False
}
}
lst2.append(d)
# print(json.dumps(lst2, default=json_serial, sort_keys=True, indent=JSON_INDENT, ensure_ascii=ENSURE_ASCII, separators=(',', ': '))
# )
r = build_json_response(lst2) # TODO Improve it, it must return the number of versions. See document !!!
if isess:
isess.close_db_session()
else:
DBSession.remove()
return r
@app.route(nis_api_base + "/case_studies/<cs_uuid>", methods=["GET"])
@app.route(nis_api_base + "/case_studies/<cs_uuid>/versions/", methods=["GET"])
def case_study(cs_uuid): # Information about case study
"""
{"case_study": "<uuid>",
"name": "Food in the EU",
"oid": "zenodo.org/2098235",
"internal_code": "CS1_F_E",
"resource": "/case_studies/<case study uuid>",
"description": "...",
"versions":
[
{"uuid": "<uuid>",
"resource": "/case_studies/<case study uuid>/<version uuid>",
"tag": "v0.1",
"sessions":
[
{"uuid": "<uuid>",
"open_date": "2017-09-20T10:00:00Z",
"close_date": "2017-09-20T10:00:10Z",
"client": "spreadsheet",
"restart": True,
"author": "<uuid>",
},
...
]
"detail": "/case_studies/<case study uuid>/<version uuid>/long.json"
"generator": "/case_studies/<case study uuid>/<version uuid>/generator.xlsx",
"state": "/case_studies/<case study uuid>/<version uuid>/state.xlsx",
"issues": [{"type": "error", "description": "syntax error in command ..."}, ...],
},
...
]
}
:param cs_uuid:
:return:
"""
def get_version_dict(vs):
# [
# {"uuid": "<uuid>",
# "resource": "/case_studies/<case study uuid>/<version uuid>",
# "tag": "v0.1",
# "sessions":
# [
# {"uuid": "<uuid>",
# "open_date": "2017-09-20T10:00:00Z",
# "close_date": "2017-09-20T10:00:10Z",
# "client": "spreadsheet",
# "restart": True,
# "author": "<uuid>",
# },
# ],
# "detail": "/case_studies/<case study uuid>/<version uuid>/long.json",
# "generator": "/case_studies/<case study uuid>/<version uuid>/generator.xlsx",
# },
# ],
def get_session_dict(ss):
uuid4 = str(ss.uuid)
v_session = {"uuid": uuid4,
"open_date": str(ss.open_instant),
"close_date": str(ss.close_instant),
"client": "spreadsheet", # TODO Spreadsheet, R script, Python script, <Direct?>
"restart": ss.restarts,
"author": ss.who.name
}
if mode == "tree":
v_session = {"data": v_session}
else:
pass
return v_session
uuid3 = str(vs.uuid)
version = {"uuid": uuid3,
"resource": nis_api_base + "/case_studies/"+uuid2+"/versions/"+uuid3,
"tag": "v0.1",
"detail": nis_api_base + "/case_studies/"+uuid2+"/versions/"+uuid3,
"state": nis_api_base + "/case_studies/" + uuid2 + "/versions/"+uuid3+"/state.xlsx",
"issues": None, # [{"type": "error", "description": "syntax error in command ..."}],
"generator": nis_api_base + "/case_studies/"+uuid2+"/versions/"+uuid3+"/generator.xlsx",
}
if mode == "tree":
version = {"data": version, "children": [get_session_dict(s) for s in vs.sessions]}
else:
version["sessions"] = [get_session_dict(s) for s in vs.sessions]
return version
# Recover InteractiveSession
isess = deserialize_isession_and_prepare_db_session()
if isess and isinstance(isess, Response):
return isess
user = isess.get_identity_id()
if not user:
user = "_anonymous"
mode = "tree"
# Recover case studies READABLE by current user (or "anonymous")
session = isess.open_db_session()
# TODO Obtain case study, filtered by current user permissions
# Access Control
# CS in acl and user in acl.detail and acl.detail is READ, WRITE,
# CS in acl and group acl.detail and user in group
cs = session.query(CaseStudy).filter(CaseStudy.uuid == cs_uuid).first()
if cs:
uuid2 = str(cs.uuid)
d = {"uuid": uuid2,
"name": cs.name if cs.name else "<empty>",
"oid": cs.oid if cs.oid else "<empty>",
"internal_code": cs.internal_code if cs.internal_code else "", # TODO
"description": cs.description if cs.description else "", # TODO
"resource": nis_api_base + "/case_studies/"+uuid2,
"versions": [get_version_dict(v) for v in cs.versions],
"case_study_permissions":
{
"read": True,
"annotate": True,
"contribute": True,
"share": False,
"delete": False
},
}
# print(json.dumps(d, default=json_serial, sort_keys=True, indent=JSON_INDENT, ensure_ascii=ENSURE_ASCII, separators=(',', ': ')))
r = build_json_response(d)
else:
r = build_json_response({"error": "The case study '"+cs_uuid+"' does not exist."}, 404)
isess.close_db_session()
return r
@app.route(nis_api_base + "/case_studies/<cs_uuid>", methods=["POST"])
@app.route(nis_api_base + "/case_studies/<cs_uuid>/versions/", methods=["POST"])
def new_case_study_version_from_file(cs_uuid):
"""
Check that the user is authorized to submit a new case study version
Open a reproducible session
Send the file to the service
Close the reproducible session
:param cs_uuid: UUID of case study
:return:
"""
# Check Interactive Session is Open. If not, open it
isess = deserialize_isession_and_prepare_db_session(False)
if not isess:
isess = InteractiveSession(DBSession)
# TODO Check User Credentials (from Token)
testing = is_testing_enabled()
if testing:
result = isess.identify({"user": "test_user", "password": None}, testing=True)
# TODO Check User has Write Case Study permissions
# Receive file
generator_type, content_type, buffer, execute, register = receive_file_submission(request)
# Open Reproducible Session, NEW case study
try:
isess.open_reproducible_session(case_study_version_uuid=cs_uuid,
recover_previous_state=False,
cr_new=CreateNew.VERSION,
allow_saving=register
)
except Exception as e:
s = "Exception trying to open reproducible session: "+str(e)
logger.error(s)
return build_json_response({"error": s}, 401)
# Submit file to the Interactive Session (which has the open reproducible session)
issues, output = isess.register_andor_execute_command_generator(generator_type, content_type, buffer, register, execute)
# Close Reproducible Session
isess.close_reproducible_session(issues=issues, output=output, save=register, from_web_service=False)
# TODO Return the issues if there were any. Return outputs (could be a list of binary files)
r = build_json_response({}, 204)
serialize_isession_and_close_db_session(isess)
return r
# @app.route(nis_api_base + "/case_studies/<cs_uuid>", methods=["DELETE"])
# def case_study_delete(cs_uuid): # DELETE a case study
# # Recover InteractiveSession
# isess = deserialize_isession_and_prepare_db_session()
# if isess and isinstance(isess, Response):
# return isess
#
# # TODO Check permissions
# # TODO If possible, deleet ALL the case study
@app.route(nis_api_base + "/case_studies/<cs_uuid>/default_view.png", methods=["GET"])
def case_study_default_view_png(cs_uuid): # Return a view of the case study in PNG format, for preview purposes
# Recover InteractiveSession
isess = deserialize_isession_and_prepare_db_session()
if isess and isinstance(isess, Response):
return isess
user = isess.get_identity_id()
if not user:
user = "_anonymous"
return send_static_file("images/case_study_preview_placeholder.png")
# # Recover case studies READABLE by current user (or "anonymous")
# session = isess.open_db_session()
# # TODO Obtain case study, filtered by current user permissions
# # Access Control
# # CS in acl and user in acl.detail and acl.detail is READ, WRITE,
# # CS in acl and group acl.detail and user in group
# cs = session.query(CaseStudy).filter(CaseStudy.uuid == cs_uuid).first()
# # TODO Scan variables. Look for the ones most interesting: grammar, data. Maybe cut processors.
# # TODO Scan also for hints to the elaboration of this thumbnail
# # TODO Elaborate View in PNG format
# isess.close_db_session()
# # TODO Return PNG image
@app.route(nis_api_base + "/case_studies/<cs_uuid>/default_view.svg", methods=["GET"])
def case_study_default_view_svg(cs_uuid): # Return a view of the case study in SVG format, for preview purposes
# Recover InteractiveSession
isess = deserialize_isession_and_prepare_db_session()
if isess and isinstance(isess, Response):
return isess
user = isess.get_identity_id()
if not user:
user = "_anonymous"
# Recover case studies READABLE by current user (or "anonymous")
session = isess.open_db_session()
# TODO Obtain case study, filtered by current user permissions
# Access Control
# CS in acl and user in acl.detail and acl.detail is READ, WRITE,
# CS in acl and group acl.detail and user in group
cs = session.query(CaseStudy).filter(CaseStudy.uuid == cs_uuid).first()
# TODO Scan variables. Look for the ones most interesting: grammar, data. Maybe cut processors.
# TODO Scan also for hints to the elaboration of this thumbnail
# TODO Elaborate View in SVG format
isess.close_db_session()
# TODO Return SVG image
@app.route(nis_api_base + "/case_studies/<cs_uuid>/versions/<v_uuid>", methods=["GET"])
def case_study_version(cs_uuid, v_uuid): # Information about a case study version
"""
{"case_study": "<uuid>",
"version": "<uuid>",
"resource": "/case_studies/<case study uuid>/<version uuid>",
"tag": "v0.1",
"sessions":
[
{"uuid": "<uuid>",
"open_date": "2017-09-20T10:00:00Z",
"close_date": "2017-09-20T10:00:10Z",
"client": "spreadsheet",
"restart": True,
"author": "<uuid>",
"generator": "/case_studies/<case study uuid>/<version uuid>/<session uuid>/generator.xlsx",
"state": "/case_studies/<case study uuid>/<version uuid>/<session uuid>/state.xlsx",
"issues": [{"type": "error", "description": "syntax error in command ..."}, ...],
},
...
]
"command_executors":
[
{"type": "...",
"label": "...",
"definition": "/case_studies/<case study uuid>/<version uuid>/1.json"
},
...
],
"generator": "/case_studies/<case study uuid>/<version uuid>/generator.xlsx",
"state": "/case_studies/<case study uuid>/<version uuid>/state.xlsx",
"issues": [{"type": "error", "description": "syntax error in command ..."}, ...],
}
:param cs_uuid:
:param v_uuid:
:return:
"""
def get_version_dict(vs):
# [
# {"uuid": "<uuid>",
# "resource": "/case_studies/<case study uuid>/<version uuid>",
# "tag": "v0.1",
# "sessions":
# [
# {"uuid": "<uuid>",
# "open_date": "2017-09-20T10:00:00Z",
# "close_date": "2017-09-20T10:00:10Z",
# "client": "spreadsheet",
# "restart": True,
# "author": "<uuid>",
# },
# ],
# "detail": "/case_studies/<case study uuid>/<version uuid>/long.json",
# "generator": "/case_studies/<case study uuid>/<version uuid>/generator.xlsx",
# },
# ],
def get_session_dict(ss):
uuid4 = str(ss.uuid)
v_session = {"uuid": uuid4,
"open_date": str(ss.open_instant),
"close_date": str(ss.close_instant),
"client": "spreadsheet", # TODO Spreadsheet, R script, Python script, <Direct?>
"restart": ss.restarts,
"author": ss.who.name
}
if mode == "tree":
v_session = {"data": v_session}
else:
pass
return v_session
# Case Study UUID, Case Study Version UUID
uuid2 = str(vs.case_study.uuid)
uuid3 = str(vs.uuid)
# Get active sessions
act_sess = []
for s in vs.sessions:
if s.restarts:
act_sess = []
act_sess.append(s)
# Load state (or EXECUTE IT!!! -CAN BE VERY SLOW!!-)
if vs.state:
# Deserialize
st = deserialize_to_object(vs.state)
else:
st = State() # Zero State, execute all commands in sequence
for ws in act_sess:
for c in ws.commands:
execute_command_container(st, c)
# List command_executors lista -> diccionario ("data": {}, "children": [ ... ])
lst_cmds = []
for ws in act_sess:
for c in ws.commands:
d = {"type": c.generator_type,
"label": c.name if c.name else "<empty>",
"definition": nis_api_base + "/case_studies/" + uuid2 + "/versions/"+uuid3+"/sessions/"+str(ws.uuid)+"/command/"+str(c.order)
}
if mode == "tree":
d = {"data": d}
lst_cmds.append(d)
# List of variables
lst_vars = []
for n in st.list_namespaces():
for t in st.list_namespace_variables(n):
d = {"name": t[0],
"type": str(type(t[1])),
"view": nis_api_base + "/case_studies/" + uuid2 + "/versions/"+uuid3+"/variables/"+str(t[0]),
"namespace": n
}
if mode == "tree":
d = {"data": d}
lst_vars.append(d)
version = {"case_study": uuid2,
"version": uuid3,
"resource": nis_api_base + "/case_studies/"+uuid2+"/versions/"+uuid3,
"tag": "v0.1",
"generator": nis_api_base + "/case_studies/"+uuid2+"/versions/"+uuid3+"/generator.xlsx",
"state": nis_api_base + "/case_studies/" + uuid2 + "/versions/"+uuid3+"/state.xlsx",
"issues": [{"type": "error", "description": "syntax error in command ..."}
],
"sessions": [get_session_dict(s) for s in vs.sessions],
"command_executors": lst_cmds,
"variables": lst_vars
}
# if mode == "tree":
# version = {"data": version, "children": [get_session_dict(s) for s in vs.sessions]}
# else:
# version["sessions"] = [get_session_dict(s) for s in vs.sessions]
return version
mode = "tree"
# Recover InteractiveSession
isess = deserialize_isession_and_prepare_db_session()
if isess and isinstance(isess, Response):
return isess
user = isess.get_identity_id()
if not user:
user = "_anonymous"
# Recover case studies READABLE by current user (or "anonymous")
session = isess.open_db_session()
# TODO Obtain case study version, filtered by current user permissions
# Access Control
# CS in acl and user in acl.detail and acl.detail is READ, WRITE,
# CS in acl and group acl.detail and user in group
vs = session.query(CaseStudyVersion).filter(CaseStudyVersion.uuid == v_uuid).first()
if not vs:
r = build_json_response({"error": "The case study version '"+v_uuid+"' does not exist."}, 404)
else:
if str(vs.case_study.uuid) != cs_uuid:
r = build_json_response({"error": "The case study '" + cs_uuid + "' does not exist."}, 404)
else:
r = build_json_response(get_version_dict(vs))
isess.close_db_session()
return r
@app.route(nis_api_base + "/case_studies/<cs_uuid>/versions/<v_uuid>", methods=["DELETE"])
def case_study_version_delete(cs_uuid, v_uuid): # DELETE a case study version
# Recover InteractiveSession
isess = deserialize_isession_and_prepare_db_session()
if isess and isinstance(isess, Response):
return isess
# TODO Check user permissions
# TODO If authorized, delete a case study version and all its sessions and commands
@app.route(nis_api_base + "/case_studies/<cs_uuid>/versions/<v_uuid>/sessions/<s_uuid>", methods=["GET"])
def case_study_version_session(cs_uuid, v_uuid, s_uuid): # Information about a session in a case study version
"""
{"case_study": "<uuid>",
"version": "<uuid>",
"session": "<uuid>",
"resource": "/case_studies/<case study uuid>/<version uuid>/<session uuid>",
"open_date": "2017-09-20T10:00:00Z",
"close_date": "2017-09-20T10:00:10Z",
"client": "spreadsheet",
"restart": True,
"author": "<uuid>",
"generator": "/case_studies/<case study uuid>/<version uuid>/<session uuid>/generator.xlsx",
"state": "/case_studies/<case study uuid>/<version uuid>/<session uuid>/state.xlsx",
"issues": [{"type": "error", "description": "syntax error in command ..."}, ...],
"command_executors":
[
{"type": "...",
"label": "...",
"definition": "/case_studies/<case study uuid>/<version uuid>/1.json"
},
...
]
}
:param cs_uuid:
:param v_uuid:
:param s_uuid:
:return:
"""
# Recover InteractiveSession
isess = deserialize_isession_and_prepare_db_session()
if isess and isinstance(isess, Response):
return isess
user = isess.get_identity_id()
if not user:
user = "_anonymous"
# Recover case studies READABLE by current user (or "anonymous")
session = isess.open_db_session()
# TODO Obtain case study version session, filtered by current user permissions
# Access Control
# CS in acl and user in acl.detail and acl.detail is READ, WRITE,
# CS in acl and group acl.detail and user in group
ss = session.query(CaseStudyVersionSession).filter(CaseStudyVersionSession.uuid == s_uuid).first()
if not ss:
r = build_json_response({"error": "The case study version session '"+s_uuid+"' does not exist."}, 404)
else:
if ss.version.uuid != v_uuid:
r = build_json_response({"error": "The case study version '" + v_uuid + "' does not exist."}, 404)
elif ss.version.case_study.uuid != cs_uuid:
r = build_json_response({"error": "The case study '" + cs_uuid + "' does not exist."}, 404)
else:
# TODO Return the command OR generator
# TODO The generator can be text or BINARY
r = build_json_response(ss) # TODO Improve it, it must return the number of versions. See document !!!
isess.close_db_session()
return r
@app.route(nis_api_base + "/case_studies/<cs_uuid>/versions/<v_uuid>/sessions/<s_uuid>", methods=["DELETE"])
def case_study_version_session_delete(cs_uuid, v_uuid, s_uuid): # DELETE a session in a case study version
# Recover InteractiveSession
isess = deserialize_isession_and_prepare_db_session()
if isess and isinstance(isess, Response):
return isess
# TODO Check user permissions
# TODO If authorized, delete a case study version SESSION and all its commands
@app.route(nis_api_base + "/case_studies/<cs_uuid>/versions/<v_uuid>/sessions/<s_uuid>/<command_order>", methods=["GET"])
def case_study_version_session_command(cs_uuid, v_uuid, s_uuid, command_order):
"""
DOWNLOAD a command or generator, using the order, from 0 to number of command_executors - 1
Commands are enumerated using "case_study_version_session()"
(URL: "/case_studies/<cs_uuid>/versions/<v_uuid>/sessions/<s_uuid>")
:param cs_uuid:
:param v_uuid:
:param s_uuid:
:param command_order:
:return:
"""
# Recover InteractiveSession
isess = deserialize_isession_and_prepare_db_session()
if isess and isinstance(isess, Response):
return isess
user = isess.get_identity_id()
if not user:
user = "_anonymous"
# Recover case studies READABLE by current user (or "anonymous")
session = isess.open_db_session()
# TODO Obtain case study version session, filtered by current user permissions
# Access Control
# CS in acl and user in acl.detail and acl.detail is READ, WRITE,
# CS in acl and group acl.detail and user in group
if s_uuid == "-":
vs = session.query(CaseStudyVersion).filter(CaseStudyVersion.uuid == v_uuid).first()
if vs:
ss = vs.sessions[0]
s_uuid = ss.uuid
else:
ss = session.query(CaseStudyVersionSession).filter(CaseStudyVersionSession.uuid == s_uuid).first()
if not ss:
r = build_json_response({"error": "The case study version session '"+s_uuid+"' does not exist."}, 404)
else:
# if ss.version.uuid != v_uuid:
# r = build_json_response({"error": "The case study version '" + v_uuid + "' does not exist."}, 404)
# elif ss.version.case_study.uuid != cs_uuid:
# r = build_json_response({"error": "The case study '" + cs_uuid + "' does not exist."}, 404)
order = int(command_order)
if order < len(ss.commands):
c = ss.commands[order]
r = Response(c.content, mimetype=c.content_type)
else:
r = build_json_response({"error":
"Command number " + str(order) +
" requested. The session '"+s_uuid+"' only has "+str(len(ss.commands))+"."})
# r.headers['Access-Control-Allow-Origin'] = "*"
isess.close_db_session()
return r
@app.route(nis_api_base + "/case_studies/<cs_uuid>/versions/<v_uuid>/variables/", methods=["GET"])
def case_study_version_variables(cs_uuid, v_uuid): # List of variables defined in a case study version
"""
Return the list of ALL variables defined in the case study version
:param cs_uuid:
:param v_uuid:
:return:
"""
# Recover InteractiveSession
isess = deserialize_isession_and_prepare_db_session()
if isess and isinstance(isess, Response):
return isess
# TODO Check READ permission of the user to the case study
# Open temporary reproducible session
try:
isess.open_reproducible_session(case_study_version_uuid=v_uuid,
recover_previous_state=True,
cr_new=CreateNew.NO,
allow_saving=False
)
# A reproducible session must be open, signal about it if not
if isess.state:
# List all available variables, from state. A list of dictionaries "name", "type" and "namespace"
lst = []
for n in isess.state.list_namespaces():
# Add ALL variables EXCEPT the internal ones (which start with "_")
lst.extend([{"name": t[0],
"type": str(type(t[1])),
"namespace": n} for t in isess.state.list_namespace_variables(n) if not t[0].startswith("_")
]
)
r = build_json_response(lst, 200)
else:
r = build_json_response({"error": "No state available for Case Study Version '"+v_uuid+"'"}, 404)
# Close temporary reproducible session
isess.close_reproducible_session(issues=None, output=None, save=False, from_web_service=True)
except Exception as e:
r = build_json_response({"error": str(e)}, 404)
return r
@app.route(nis_api_base + "/case_studies/<cs_uuid>/versions/<v_uuid>/variables/<name>", methods=["GET"])
def case_study_version_variable(cs_uuid, v_uuid, name): # Information about a case study version variable
"""
Return the value of the requested variable
:param cs_uuid: Case Study UUID
:param v_uuid: Version UUID
:param name: Variable name
:return:
"""
# Recover InteractiveSession
isess = deserialize_isession_and_prepare_db_session()
if isess and isinstance(isess, Response):
return isess
# TODO Check READ permission of the user to the case study
# Open temporary reproducible session
try:
isess.open_reproducible_session(case_study_version_uuid=v_uuid,
recover_previous_state=True,
cr_new=CreateNew.NO,
allow_saving=False
)
# A reproducible session must be open, signal about it if not
if isess.state:
# TODO Parse Variable name can be "namespace'::'name"
# TODO For now, just the variable name
v = isess.state.get(name)
if v:
r = build_json_response({name: v}, 200)
else:
r = build_json_response(
{"error": "The requested variable name ('"+name+"') has not "
"been found in the Case Study Version '" + v_uuid + "'"}, 404)
else:
r = build_json_response({"error": "No state available for Case Study Version '" + v_uuid + "'"}, 404)
# Close temporary reproducible session
isess.close_reproducible_session(issues=None, output=None, save=False, from_web_service=True)
except Exception as e:
r = build_json_response({"error": str(e)}, 404)
return r
@app.route(nis_api_base + "/case_studies/<cs_uuid>/versions/<v_uuid>/variables/<name>/views/", methods=["GET"])
def case_study_version_variable_views(cs_uuid, v_uuid, name): # Information about a case study version variable views
# Recover InteractiveSession
isess = deserialize_isession_and_prepare_db_session()
if isess and isinstance(isess, Response):
return isess
# TODO Check READ permission of the user to the case study
# TODO Return the different views on a variable
@app.route(nis_api_base + "/case_studies/<cs_uuid>/versions/<v_uuid>/variables/<name>/views/<view_type>", methods=["GET"])
def case_study_version_variable_view(cs_uuid, v_uuid, name, view_type): # A view of case study version variable
# Recover InteractiveSession
isess = deserialize_isession_and_prepare_db_session()
if isess and isinstance(isess, Response):
return isess
# TODO Check READ permission of the user to the case study
# TODO Return a view of the requested variable
# -- Users --
@app.route(nis_api_base + "/users/", methods=["GET"])
def list_users():
# Recover InteractiveSession
isess = deserialize_isession_and_prepare_db_session()
if isess and isinstance(isess, Response):
return isess
user = isess.get_identity_id()
if user and user == "admin":
session = isess.open_db_session()
lst = session.query(User).all()
r = build_json_response(lst)
isess.close_db_session()
else:
r = build_json_response({"error": "Users list can be obtained only by 'admin' user"}, 401)
return r
@app.route(nis_api_base + "/users/<id>", methods=["GET"])
def get_user(id):
# Recover InteractiveSession
isess = deserialize_isession_and_prepare_db_session()
if isess and isinstance(isess, Response):
return isess
user = isess.get_identity_id()
if user and user == "admin" or user == id:
session = isess.open_db_session()
u = session.query(User).filter(User.name == id).first()
r = build_json_response(u) # TODO Improve it !!!
isess.close_db_session()
else:
r = build_json_response({"error": "User '"+id+"' can be obtained only by 'admin' or '"+id+"' user"}, 401)
return r
@app.route(nis_api_base + "/users/<id>", methods=["PUT"])
def put_user(id): # Used also to deactivate user
# Recover InteractiveSession
isess = deserialize_isession_and_prepare_db_session()
if isess and isinstance(isess, Response):
return isess
user = isess.get_identity_id()
if user and user == "admin" or user == id:
session = isess.open_db_session()
u = session.query(User).filter(User.name == id).first()
if not u:
r = build_json_response({"error": "User '"+id+"' does not exist"}, 404)
else:
# TODO Update "u" fields
session.commit()
r = build_json_response(u) # TODO Improve it !!!
isess.close_db_session()
else:
r = build_json_response({"error": "User '"+id+"' can be modified only by 'admin' or '"+id+"' user"}, 401)
return r
@app.route(nis_api_base + "/users/", methods=["POST"])
def post_user():
# Recover InteractiveSession
isess = deserialize_isession_and_prepare_db_session()
if isess and isinstance(isess, Response):
return isess
user = isess.get_identity_id()
if user and user == "admin":
session = isess.open_db_session()
# Read JSON
if request.content_type != "application/json":
raise Exception("Only application/json data is allowed")
if not request.data:
raise Exception("No data received")
j = request.data.decode()
j = json.loads(j)
u = session.query(User).filter(User.name == j["name"]).first()
if not u:
# Create User
u = User()
u.name = j["name"]
session.add(u)
session.commit()
r = build_json_response(u)
else:
r = build_json_response({"error": "User '"+j["name"]+"' already exists"}, 422)
isess.close_db_session()
else:
r = build_json_response({"error": "A user can be created only by 'admin'"}, 401)
return r
# -- Groups --
@app.route(nis_api_base + "/groups/", methods=["GET"])
def list_groups():
# Recover InteractiveSession
isess = deserialize_isession_and_prepare_db_session()
if isess and isinstance(isess, Response):
return isess
@app.route(nis_api_base + "/groups/<id>", methods=["GET"])
def get_group(id):
# Recover InteractiveSession
isess = deserialize_isession_and_prepare_db_session()
if isess and isinstance(isess, Response):
return isess
user = isess.get_identity_id()
if user and user == "admin" or user == id:
session = isess.open_db_session()
u = session.query(Group).filter(Group.name == id).first()
r = build_json_response(u) # TODO Improve it !!!
isess.close_db_session()
else:
r = build_json_response({"error": "Group '" + id + "' can be obtained only by 'admin' or '" + id + "' user"},
401)
return r
@app.route(nis_api_base + "/groups/<id>", methods=["PUT"])
def put_group(id):
# Recover InteractiveSession
isess = deserialize_isession_and_prepare_db_session()
if isess and isinstance(isess, Response):
return isess
user = isess.get_identity_id()
if user and user == "admin" or user == id:
session = isess.open_db_session()
u = session.query(Group).filter(Group.name == id).first()
if not u:
r = build_json_response({"error": "Group '" + id + "' does not exist"}, 404)
else:
# TODO Update "u" fields
session.commit()
r = build_json_response(u) # TODO Improve it !!!
isess.close_db_session()
else:
r = build_json_response({"error": "Group '" + id + "' can be modified only by 'admin' or '" + id + "' user"},
401)
return r
@app.route(nis_api_base + "/users/", methods=["POST"])
def post_group():
# Recover InteractiveSession
isess = deserialize_isession_and_prepare_db_session()
if isess and isinstance(isess, Response):
return isess
user = isess.get_identity_id()
if user and user == "admin":
session = isess.open_db_session()
# Read JSON
if request.content_type != "application/json":
raise Exception("Only application/json data is allowed")
if not request.data:
raise Exception("No data received")
j = request.data.decode()
j = json.loads(j)
u = session.query(Group).filter(Group.name == j["name"]).first()
if not u:
# TODO Create Group
u = Group()
u.name = j["name"]
session.add(u)
session.commit()
r = build_json_response(u)
else:
r = build_json_response({"error": "Group '" + j["name"] + "' already exists"}, 422)
isess.close_db_session()
else:
r = build_json_response({"error": "A group can be created only by 'admin'"}, 401)
return r
# -- Permissions --
def acl():
pass
# -- Reusable objects --
@app.route(nis_api_base + "/sources/", methods=["GET"])
def data_sources():
# Recover InteractiveSession
isess = deserialize_isession_and_prepare_db_session()
if isess and isinstance(isess, Response):
return isess
# TODO Authentication, authorization
# Enumerate sources
dsm = nexinfosys.data_source_manager
lst = dsm.get_supported_sources()
return build_json_response(dict(sources=lst))
@app.route(nis_api_base + "/sources/<source_id>", methods=["GET"])
@app.route(nis_api_base + "/sources/<source_id>/databases/", methods=["GET"])
def data_source_databases(source_id):
# Recover InteractiveSession
isess = deserialize_isession_and_prepare_db_session()
if isess and isinstance(isess, Response):
return isess
# Enumerate source databases
dsm = nexinfosys.data_source_manager
if source_id == "-":
source_id = None
lst = dsm.get_databases(source_id)
ret_lst = []
for i in lst:
ret_lst.append(dict(source=i[0], databases=[dict(code=c.code, description=c.description) for c in i[1]]))
return build_json_response(ret_lst)
@app.route(nis_api_base + "/sources/<source_id>/databases/<database_id>", methods=["GET"])
@app.route(nis_api_base + "/sources/<source_id>/databases/<database_id>/datasets/", methods=["GET"])
def data_source_database_datasets(source_id, database_id):
# Recover InteractiveSession
isess = deserialize_isession_and_prepare_db_session()
if isess and isinstance(isess, Response):
return isess
# Enumerate source+database datasets
dsm = nexinfosys.data_source_manager
if source_id == "-":
source_id = None
database_id = None
if database_id in ("-", "None"):
database_id = None
lst = dsm.get_datasets(source_id, database_id)
base = request.base_url+"/datasets/"
return build_json_response([dict(source=i[0],
datasets=[dict(code=j[0], description=j[1], info_url=base+j[0]) for j in i[1]]) for i in lst]
)
@app.route(nis_api_base + "/sources/<source_id>/databases/<database_id>/datasets/<dataset_id>", methods=["GET"])
def data_source_database_dataset_detail(source_id, database_id, dataset_id):
"""
Return a JSON with the method "GET" and the possible values for the dimensions
Also parameters to return a table of tuples or a precomputed pivot table
Also return the address of the endpoint to query the dataset using SDMX. This be
:param id:
:param database_id:
:param dataset_id:
:return:
"""
# Recover InteractiveSession
isess = deserialize_isession_and_prepare_db_session()
if isess and isinstance(isess, Response):
return isess
dsm = nexinfosys.data_source_manager
if source_id == "-":
source_id = None
database_id = None
if database_id == "-":
database_id = None
if not dataset_id:
raise Exception("It is mandatory to define the dataset name when requesting the dataset parameters")
if not source_id:
_, _, _, datasets, _ = get_case_study_registry_objects(isess.state)
from nexinfosys.ie_imports.data_source_manager import DataSourceManager
source_id = DataSourceManager.obtain_dataset_source(dataset_id, datasets)
ds = dsm.get_dataset_structure(source_id, dataset_id)
dims = []
for d in ds.dimensions:
cl = []
if d.get_hierarchy():
# CodeList has one or more levels ".levels" property
# CodeListLevel has zero or more Codes ".codes" property
if isinstance(d.get_hierarchy, list):
for v in d.get_hierarchy().codes:
cl.append(dict(code=v.name, description=v.description, level=v.level.name if v.level else None))
else: # Fix: codes can be in a dictionary
for v in d.get_hierarchy().codes.values():
cl.append(dict(code=v.name, description=v.description, level=v.level.name if v.level else None))
dims.append(dict(code=d.code, description=d.description, is_time=d.is_time, is_measure=d.is_measure, attributes=d.attributes, code_list=cl))
d = dict(id=ds.id, code=ds.code, description=ds.description, data_dictionary=ds.data_dictionary, attributes=ds.attributes,
dimensions=dims)
return build_json_response(d)
@app.route(nis_api_base + "/isession/external_xslx", methods=["PUT"])
def download_external_xlsx(): # From the URL of an external XLSX, obtain it and return it
# Recover InteractiveSession
isess = deserialize_isession_and_prepare_db_session()
if isess and isinstance(isess, Response):
return isess
buffer = bytes(io.BytesIO(request.get_data()).getbuffer())
url = buffer.decode("utf-8")
data = download_file(url)
try:
xl = openpyxl.load_workbook(data, data_only=True)
rewrite_xlsx_file(xl, copy_style=False)
except Exception as e:
logging.error("Exception rewriting XLSX. Is openpyxl==2.4.8 installed?. Check with 'pip freeze | grep "
"openpyxl'. If that is the case, fix with 'pip install openpyxl==2.4.8'")
raise e
data = save_virtual_workbook(xl)
r = Response(data,
mimetype="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
status=200)
return r
@app.route(nis_api_base + "/isession/regenerate_xlsx", methods=["POST"])
def regenerate_xlsx_file(): # Receive an XLSX workbook, regenerate it
# Recover InteractiveSession
isess = deserialize_isession_and_prepare_db_session()
if isess and isinstance(isess, Response):
return isess
generator_type, content_type, buffer, execute, register = receive_file_submission(request)
try:
xl = openpyxl.load_workbook(io.BytesIO(buffer), data_only=True)
rewrite_xlsx_file(xl)
# rewrite_xlsx_file(xl, copy_style=False)
except Exception as e:
print("Exception rewriting XLSX. Is openpyxl==2.4.8 installed?. Check with 'pip freeze | grep openpyxl'. "
"If that is the case, fix with 'pip install openpyxl==2.4.8'")
raise e
tmp = save_virtual_workbook(xl)
r = Response(tmp,
mimetype="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
status=200)
return r
@app.route(nis_api_base + "/commands_and_fields", methods=["GET"])
def obtain_commands_and_their_fields():
# Recover InteractiveSession
isess = deserialize_isession_and_prepare_db_session()
if isess and isinstance(isess, Response):
return isess
j = {}
for k, v in command_fields.items():
j["name"] = k
flds = []
for f in v:
flds.append(f.allowed_names)
j["fields"] = flds
return j
@app.route(nis_api_base + "/validate_command_record", methods=["POST"])
def validate_command_record():
"""
A function for on-line, field by field or row by row validation of syntax
(the client can send what the user just entered, the server, this function, will respond
None the field is ok, and an error message if not)
The input comes in a JSON field "content":
{"command": "<command name",
"fields": {"<field name>": "<value", ...}
}
:return: A dictionary with the same fields of the input dictionary, whose values are the diagnosis, None being
everything-ok, and a string being a message describing the problem.
"""
# Recover InteractiveSession
isess = deserialize_isession_and_prepare_db_session()
if isess and isinstance(isess, Response):
return isess
# Read request
command_content_to_validate = request.get_json()
result, status = validate_command(command_content_to_validate)
return build_json_response(result, 200 if status else 400)
def get_misc_cmd_help(cmd_name):
if cmd_name == "metadata":
return {"type": "Metadata", "by_rows": False, "name": "Metadata", "template":
"Case study code\n\
Case study name\n\
Title\n\
Subject, topic and/or keywords\n\
Description\n\
Geographical level\n\
Dimensions\n\
Reference documentation\n\
Authors\n\
Date of elaboration\n\
Temporal situation\n\
Geographical location\n\
DOI\n\
Language\n\
Restriction level\n\
Version", "examples": [
"Case study code\tCS3_R_WEF_P-0.1\n\
Case study name\n\
Title\tSoslaires\n\
Subject, topic and/or keywords\n\
Description\tA small scale system combining Energy, Water and Food\n\
Geographical level\tLocal\n\
Dimensions\tEnergy\tWater\tFood\n\
Reference documentation\n\
Authors\tAna Musicki\tBaltasar Peñate\tTarik Serrrano\n\
Date of elaboration\t2016\n\
Temporal situation\t2016\n\
Geographical location\tGran Canaria\n\
DOI\n\
Language\tEnglish\n\
Restriction level\tPublic\n\
Version\tV0.1"]}
elif cmd_name == "pedigree_matrix":
return {"type": "Metadata", "name": "Pedigree", "template":
"Code\t<Phase name #1>\t<Phase name #2>\t<Phase name #3>\t...",
"examples": [
"Code\tTheoreticalStructures\tDataInput\tPeerAcceptance\tColleagueConsensus\n\
4\tEstablishedTheory\tExperimentalData\tTotal\tAllButCranks\n\
3\tTheoreticallyBasedModel\tHistoricFieldData\tHigh\tAllButRebels\n\
2\tComputationalModel\tCalculatedData\tMedium\tCompetingSchools\n\
1\tStatisticalProcessing\tEducatedGuess\tLow\tEmbryonicField\n\
0\tDefinitions\tUneducatedGuess\tNone\tNoOpinion",
"Code\tModelStructure\tDataInput\tTesting\n\
4\tComprehensive\tReview\tCorroboration\n\
3\tFiniteElementApproximation\tHistoricField\tComparison\n\
2\tTransferFunction\tExperimental\tUncertaintyAnalysis\n\
1\tStatisticalProcessing\tCalculated\tSensitivityAnalysis\n\
0\tDefinitions\tExpertGuess\tNone",
"Code\tDefinitionsAndStandards\tDataCollectionAndAnalysis\tInstitutionalCulture\tReview\n\
5\tNegotiation\tTaskForce\tDialogue\tExternal\n\
4\tScience\tDirectSurvey\tAccomodation\tIndependent\n\
3\tConvenience\tIndirectEstimate\tObedience\tRegular\n\
2\tSymbolism\tEducatedGuess\tEvasion\tOccasional\n\
1\tInertia\tFiat\tNoContact\tNone\n\
0\tUnknown\tUnknown\tUnknown\tUnknown"
]
}
elif cmd_name == "datasetdata":
return {"type": "Input", "name": "DatasetData", "template":
"<Dataset concept #1>\t<Dataset concept #2>\t<Dataset concept #3>\t...",
"examples": [
"Country\tYear\tWaterConsumption\n\
ES\t2015\t102\n\
ES\t2016\t110\n\
IT\t2015\t130\n\
IT\t2016\t140\n",
"Tech\tScale\tUnitEnergyConsumption\n\
Coal\tMiddle\t1.4\n\
Coal\tLarge\t1.3\n\
Coal\tVeryLarge\t1.2\n\
Nuclear\tLarge\t1.3\n\
Nuclear\tVeryLarge\t1.15\n"
]
}
else:
return None
def get_regular_cmd_help(cmd: nexinfosys.Command):
ctype = str(cmd.cmd_type)
cmdflds = command_fields.get(cmd.name, None)
examples = cmd.direct_examples
files = cmd.files
return dict(type=ctype,
name=cmd.allowed_names[0],
template="\t".join([f.allowed_names[0] for f in cmdflds if "@" not in f.allowed_names[0] and not f.deprecated]),
examples=[]
)
@app.route(nis_api_base + "/commands_reference.json", methods=["GET"])
def obtain_commands_reference():
# Recover InteractiveSession
isess = deserialize_isession_and_prepare_db_session()
if isess and isinstance(isess, Response):
return isess
d = []
sequence = [nexinfosys.CommandType.core,
nexinfosys.CommandType.input,
nexinfosys.CommandType.analysis,
nexinfosys.CommandType.metadata,
nexinfosys.CommandType.convenience,
nexinfosys.CommandType.misc]
for ctype in sequence:
for cmd in commands:
if cmd.is_v2 and cmd.cmd_type == ctype:
tmp = get_misc_cmd_help(cmd.name)
if tmp:
d.append(tmp)
elif command_fields.get(cmd.name, None):
d.append(get_regular_cmd_help(cmd))
return build_json_response([e for e in d if e])
"""
d = [
{"type": "External dataset", "name": "Mapping", "template":
"<Source dimension from external dataset>\t<Target internal taxonomy>\t<Weight (optional, default 1 (many-to-one), <1 for many-to-many mappings)>",
"examples": [
"nrg_110a.PRODUCT\tMuSIASEM_EC\n\
2100\tHeat\n\
2200\tHeat\n\
2410\tHeat\n\
3214\tHeat\n\
3215\tHeat\n\
3215\tFeedstock\n\
3220\tHeat\n\
3234\tFuel\n\
3235\tFuel\n\
3244\tFuel\n\
3246\tFuel\n\
3247\tFuel\n\“n-2”“n-2”
3250\tFeedstock\n\
3260\tFuel\n\
3270A\tHeat\n\
3280\tFeedstock\n\
3285\tHeat\n\
4000\tHeat\n\
5532\tHeat\n\
5541\tHeat\n\
5542\tHeat\n\
55431\tHeat\n\
55432\tHeat\n\
5544\tHeat\n\
5545\tFuel\n\
5550\tHeat\n\
6000\tElectricity\n\
7100\tHeat\n\
7200\tHeat\n\
",
"nrg_110a.INDIC_NRG\tMuSIASEM_Sector\n\
B_101300\tES\n\
B_101825\tMQ\n\
B_102030\tAFO\n\
B_102020\tFI\n\
B_101805\tIS\n\
B_101810\tNF\n\
B_101815\tCP\n\
B_101820\tNM\n\
B_101830\tFT\n\
B_101835\tTL\n\
B_101840\tPPP\n\
B_101846\tTE\n\
B_101847\tMA\n\
B_101851\tWWP\n\
B_101852\tCO\n\
B_101853\tNS\n\
B_102035\tSG"
]
},
{"type": "External dataset", "name": "Parameters", "template":
"Name\tValue\tType\tGroup\tDescription",
"examples": [
"Name\tValue\tType\tGroup\tDescription\n\
p1\t3\tnumber\t\tParameter # 1\n\
p2\t3.5\tnumber\t\tParameter two"
]
},
{"type": "Specification", "by_rows": False, "name": "Metadata", "template":
"Case study code\n\
Case study name\n\
Title\n\
Subject, topic and/or keywords\n\
Description\n\
Geographical level\n\
Dimensions\n\
Reference documentation\n\
Authors\n\
Date of elaboration\n\
Temporal situation\n\
Geographical location\n\
DOI\n\
Language\n\
Restriction level\n\
Version", "examples": [
"Case study code\tCS3_R_WEF_P-0.1\n\
Case study name\n\
Title\tSoslaires\n\
Subject, topic and/or keywords\n\
Description\tA small scale system combining Energy, Water and Food\n\
Geographical level\tLocal\n\
Dimensions\tEnergy\tWater\tFood\n\
Reference documentation\n\
Authors\tAna Musicki\tBaltasar Peñate\tTarik Serrrano\n\
Date of elaboration\t2016\n\
Temporal situation\t2016\n\
Geographical location\tGran Canaria\n\
DOI\n\
Language\tEnglish\n\
Restriction level\tPublic\n\
Version\tV0.1"]},
{"type": "Specification", "name": "Processors", "template":
"Name\tLevel\tFF_TYPE\tVAR\tVALUE\tUNIT\tRELATIVE TO\tUNCERTAINTY\tASSESSMENT\tPEDIGREE\\nMATRIX\tPEDIGREE\tTIME\tGEO\tSCALE\tSOURCE\tCOMMENTS",
"examples": [
"Name\tLevel\tFF_TYPE\tVAR\tVALUE\tUNIT\tRELATIVE TO\tUNCERTAINTY\tASSESSMENT\tPEDIGREE\\nMATRIX\tPEDIGREE\tTIME\tGEO\tSCALE\tSOURCE\tCOMMENTS\n\
WindFarm\tN-1\tInt_In_Fund\tHA\t660\thours\t\t\t\t\t\tYear\t\t\t\t\n\
WindFarm\tN-1\tInt_In_Fund\tHA_cost\t1800\t€\t\t\t\t\t\t2016\t\t\t\t\n\
WindFarm\tN-1\tInt_Out_Flow\tWindElectricity\t9.28\tGWh\t\t\t\t\t\tYear\t\t\t\t11,8% Energy transformation efficiency from wind to electricity\n\
ElectricGrid\tN\tExt_In_Flow\tGridElectricity\t6.6\tGWh\t\t\t\t\t\tYear\t\t\t\t0.429 M€ income from energy sale"]},
{"type": "Specification", "name": "Upscale", "template":
"<factor name>\t\n\
<child processor type> / <parent processor type>\t<one or more codes from predefined categories. One or more rows allowed, from this row upwards>\n\
<one or more codes from predefined categories. One or more columns allowed, from this column to the left>\
",
"examples": [
"LU\tGH\tGH\tOF\n\
Farm / AgrarianRegion\tMCR1\tMCR2\tMCR1\n\
AR1\t0.00\t0.06\t0.94\n\
AR2\t0.15\t0.85\t0.00\n\
AR3\t0.19\t0.77\t0.04\n\
AR4\t0.03\t0.05\t0.92\n\
AR5\t0.00\t0.00\t1.00\n\
AR6\t0.00\t0.87\t0.13"
]
},
{"type": "Specification", "name": "Structure", "template":
"Origin\tRelation\tDestination\tDestination\tDestination",
"examples": [
"Origin\tRelation\tDestination\tDestination\tDestination\tDestination\tDestination\tDestination\tDestination\tDestination\n\
WindFarm:WindElectricity\t>\t1/(0.5*p1)>DesalinationPlant:WindElectricity\tElectricGrid\t\t\t\t\t\t\n\
ElectricGrid\t>\tDesalinationPlant:GridElectricity\t\t\t\t\t\t\t\n\
DesalinationPlant:DesalinatedWater\t>\tFarm:BlueWater\t\t\t\t\t\t\t\n\
Farm\t|\tCantaloupe\tWatermelon\tTomato\tZucchini\tBeans\tPumpkin\tBanana\tMoringa\n\
Farm:LU\t>\tCantaloupe\tWatermelon\tTomato\tZucchini\tBeans\tPumpkin\tBanana\tMoringa\n\
Farm:HA\t>\tCantaloupe\tWatermelon\tTomato\tZucchini\tBeans\tPumpkin\tBanana\tMoringa\n\
Farm:IrrigationCapacity\t>\tCantaloupe\tWatermelon\tTomato\tZucchini\tBeans\tPumpkin\tBanana\tMoringa\n\
Farm:BlueWater\t>\tCantaloupe\tWatermelon\tTomato\tZucchini\tBeans\tPumpkin\tBanana\tMoringa\n\
Farm:Agrochemicals\t>\tCantaloupe\tWatermelon\tTomato\tZucchini\tBeans\tPumpkin\tBanana\tMoringa\n\
Farm:Fuel\t>\tCantaloupe\tWatermelon\tTomato\tZucchini\tBeans\tPumpkin\tBanana\tMoringa\n\
Farm:GreenWater\t<\tCantaloupe\tWatermelon\tTomato\tZucchini\tBeans\tPumpkin\tBanana\tMoringa\n\
Farm:MaterialWaste\t<\tCantaloupe\tWatermelon\tTomato\tZucchini\tBeans\tPumpkin\tBanana\tMoringa\n\
Farm:DiffusivePollution\t<\tCantaloupe\tWatermelon\tTomato\tZucchini\tBeans\tPumpkin\tBanana\tMoringa\n\
Farm:CO2\t<\tCantaloupe\tWatermelon\tTomato\tZucchini\tBeans\tPumpkin\tBanana\tMoringa\n\
Farm\t<\tCantaloupe:Cantaloupe\tWatermelon:Watermelon\tTomato:Tomato\tZucchini:Zucchini\tBeans:Beans\tPumpkin:Pumpkin\tBanana:Banana\tMoringa:Moringa"
]
},
{"type": "Specification", "name": "Taxonomy_F", "template":
"Code\tDescription\tCode\tDescription\tExpression",
"examples": [
"Code\tDescription\tCode\tDescription\tExpression\n\
Vegetables\tAll kinds of vegetables\n\
\t\tCantaloupe\n\
\t\tWatermelon\t\n\
\t\tTomato\n\
\t\tZucchini\n\
\t\tBeans\n\
\t\tPumpkin\n\
\t\tBanana\n\
\t\tMoringa"
]
},
{"type": "Specification", "name": "Pedigree", "template":
"Code\t<Phase name #1>\t<Phase name #2>\t<Phase name #3>\t...",
"examples": [
"Code\tTheoreticalStructures\tDataInput\tPeerAcceptance\tColleagueConsensus\n\
4\tEstablishedTheory\tExperimentalData\tTotal\tAllButCranks\n\
3\tTheoreticallyBasedModel\tHistoricFieldData\tHigh\tAllButRebels\n\
2\tComputationalModel\tCalculatedData\tMedium\tCompetingSchools\n\
1\tStatisticalProcessing\tEducatedGuess\tLow\tEmbryonicField\n\
0\tDefinitions\tUneducatedGuess\tNone\tNoOpinion",
"Code\tModelStructure\tDataInput\tTesting\n\
4\tComprehensive\tReview\tCorroboration\n\
3\tFiniteElementApproximation\tHistoricField\tComparison\n\
2\tTransferFunction\tExperimental\tUncertaintyAnalysis\n\
1\tStatisticalProcessing\tCalculated\tSensitivityAnalysis\n\
0\tDefinitions\tExpertGuess\tNone",
"Code\tDefinitionsAndStandards\tDataCollectionAndAnalysis\tInstitutionalCulture\tReview\n\
5\tNegotiation\tTaskForce\tDialogue\tExternal\n\
4\tScience\tDirectSurvey\tAccomodation\tIndependent\n\
3\tConvenience\tIndirectEstimate\tObedience\tRegular\n\
2\tSymbolism\tEducatedGuess\tEvasion\tOccasional\n\
1\tInertia\tFiat\tNoContact\tNone\n\
0\tUnknown\tUnknown\tUnknown\tUnknown"
]
},
{"type": "Specification", "name": "Composition_P", "template":
"Code\tDescription\tCode\tDescription",
"examples": [
"Code\tDescription\tCode\tDescription\tCode\tDescription\tCode\tDescription\tCode\tDescription\n\
Society\tEncompassess the human realm\n\
\t\tHH\tHousehold Sector\n\
\t\tPW\tPaid Work Sector\n\
\t\t\t\tSG\tService & Government\n\
\t\t\t\tPS\tPrimary & Secondary\n\
\t\t\t\t\t\tBM\tBuilding & Manufacturing\n\
\t\t\t\t\t\tPF\tPrimary flows\n\
\t\t\t\t\t\t\t\tAG\tAgriculture\n\
\t\t\t\t\t\t\t\tEM\tEnergy & Mining"
]
},
{"type": "Specification", "name": "Taxonomy_C", "template":
"Code\tDescription\tCode\tDescription\tExpression",
"examples": [
]
},
{"type": "Specification", "name": "References", "template":
"ref_id\t<list of columns depending on the type reference (bibliographic, geographic, provenance, see examples)>",
"examples": [
"ref_id\tTitle\tDate\tBoundingBox\tTopicCategory\tDescription\tMetadataPointOfContact\tAnnote\tDataLocation",
"ref_id\tEntry_Type\tAddress\tAnnote\tBookTitle\tChapter\tCrossRef\tEdition\tEditor\tHowPublished\tInstitution\tJournal\tKey\tMonth\tNote\tNumber\tOrganization\tPages\tPublisher\tSchool\tSeries\tTitle\tType\tURL\tVolume\tYear",
"ref_id\tAgentType\tAgent\tActivities\tEntities"
]
},
{"type": "Specification", "name": "Scale", "template":
"<A matrix having as row starts the origin factor type names, as column headers the target factor type names",
"examples": []
},
{"type": "Analysis", "name": "Indicators", "template":
"Name\tFormula\tDescription\tBenchmark\tBenchmark\tBenchmark\tBenchmark",
"examples": []
}
]
"""
@app.route(nis_api_base + "/command_reference.json", methods=["POST"])
def command_help():
"""
A function for on-line help for a command
The input comes in a JSON field "content":
{"command": "<command name"
}
:return: A dictionary with the same fields passed in the input dictionary, whose values are the help divided in
sections: explanation, allowed_values, formal syntax and examples
"""
# Read request
command_content_to_validate = request.get_json()
result, status = comm_help(command_content_to_validate)
return build_json_response(result, status)
@app.route(nis_api_base + "/command_fields_reference.json", methods=["POST"])
def command_fields_help():
"""
A function for on-line, field by field help
The input comes in a JSON field "content":
{"command": "<command name",
"fields": ["<field name>", "<field_name>"]
}
:return: A dictionary with the same fields passed in the input dictionary, whose values are the help divided in
sections: explanation, allowed_values, formal syntax and examples
"""
# Read request
command_content_to_validate = request.get_json()
result, status = command_field_help(command_content_to_validate)
return build_json_response(result, 200 if status else 400)
# @app.route(nis_api_base + "/sources/<id>/databases/<database_id>/datasets/<dataset_id>", methods=["GET"])
# def data_source_database_dataset_query(id, database_id, dataset_id):
# """
# This is the most powerful data method, allowing to
#
# :param id:
# :param database_id:
# :param dataset_id:
# :return:
# """
# # Recover InteractiveSession
# isess = deserialize_isession_and_prepare_db_session()
# if isess and isinstance(isess, Response):
# return isess
def data_processes():
pass
def nusap_data_pedigree():
pass
def grammars():
pass
def mappings():
"""
From an external dataset to internal categories
:return:
"""
pass
def hierarchies():
a= 6
pass
# -- Test --
@app.route('/test', methods=['GET'])
@app.route(nis_api_base + '/test', methods=['GET'])
def hello():
logger.debug("LOG!!!")
return build_json_response({"hello": "world"})
if __name__ == '__main__':
# xl = openpyxl.load_workbook("/home/rnebot/Dropbox/nis-internal-tests/issue_report.xlsx", data_only=True)
# rewrite_xlsx_file(xl)
# xl.save("/home/rnebot/Downloads/borrame.xlsx")
# sys.exit(0)
# from tasks import add
# from celery.task.control import inspect
# import time
# def f():
# t = []
# for i in range(10):
# t.append(add.delay(i, i + 1))
# i = inspect()
# st = [ti.ready() for ti in t]
# while not all(st):
# print(f"Completos: {sum(st)}; quedan {len(st)-sum(st)}")
# print(i.active())
# time.sleep(1)
# st = [ti.ready() for ti in t]
# f()
# 1) GUNICORN
# (start REDIS first at localhost:6379. E.g.: docker run --rm --name redis-local -p 6379:6379 redis:alpine)
#
# cd ~/AA_MAGIC/nis-nexinfosys
# export MAGIC_NIS_SERVICE_CONFIG_FILE=/home/rnebot/Dropbox/nis-nexinfosys-config/nis_local.conf
# gunicorn --bind 0.0.0.0:8080 --workers 3 nexinfosys.restful_service.service_main:app
# 2) DOCKER. BASIC DEPLOYMENT
#
# PREVIOUSLY, COMPILE FRONTEND
# cd ~/GoogleDrive/AA_MAGIC/nis-frontend
# npm install
# rm dist -fr
# node --max_old_space_size=8192 node_modules/@angular/cli/bin/ng build --prod -c production_local --aot --base-href /nis_client/
# rm ~/GoogleDrive/AA_MAGIC/nis-nexinfosys/frontend/* -fr
# cp -r ~/GoogleDrive/AA_MAGIC/nis-frontend/dist/* ~/GoogleDrive/AA_MAGIC/nis-nexinfosys/frontend
#
# 2) (continuation) DOCKER COMMANDS (example)
# docker network create nis-net
# docker run --rm --name redis-local --net nis-net -p 6379:6379 redis:alpine
# docker create --name nis-local --net nis-net -p 5000:80 -v /home/rnebot/DATOS/docker_magic_nis:/srv -e MAGIC_NIS_SERVICE_CONFIG_FILE="nis_local_redis_docker.conf" magic-nis
# cs = CaseStudy()
# vs1 = CaseStudyVersion()
# vs1.case_study = cs
# vs2 = CaseStudyVersion()
# vs2.case_study = cs
#
# lst = [cs, vs1, vs2]
# d_list = serialize(lst)
# lst2 = deserialize(d_list)
# sys.exit(1)
# >>>>>>>>>> IMPORTANT <<<<<<<<<
# For debugging in local mode, prepare an environment variable "MAGIC_NIS_SERVICE_CONFIG_FILE", with value "./nis_local.conf"
# >>>>>>>>>> IMPORTANT <<<<<<<<<
# >>>>>>>>>> IMPORTANT <<<<<<<<<
# "cannot connect to X server" error when remote debugging?
# Execute "Xvfb :99 -ac -noreset" in the remote server and uncomment the following line
# os.environ["DISPLAY"] = ":99"
app.run(host='0.0.0.0',
debug=True,
use_reloader=False, # Avoid loading twice the application
processes=1,
threaded=False) # Default port, 5000
| 38.585632 | 227 | 0.625427 | 0 | 0 | 0 | 0 | 106,110 | 0.751046 | 0 | 0 | 63,905 | 0.452319 |
525f8b5dc9574fa7b8d0c0648ce46ac800350ccf | 3,198 | py | Python | readthedocs/doc_builder/backends/mkdocs.py | storagebot/readthedocs.org | caaec6c341cfb0e99838386ae65e3042d690e38e | [
"MIT"
] | null | null | null | readthedocs/doc_builder/backends/mkdocs.py | storagebot/readthedocs.org | caaec6c341cfb0e99838386ae65e3042d690e38e | [
"MIT"
] | null | null | null | readthedocs/doc_builder/backends/mkdocs.py | storagebot/readthedocs.org | caaec6c341cfb0e99838386ae65e3042d690e38e | [
"MIT"
] | null | null | null | import os
import shutil
import codecs
import logging
import zipfile
from django.template import Template, Context
from django.contrib.auth.models import SiteProfileNotAvailable
from django.core.exceptions import ObjectDoesNotExist
from django.conf import settings
from builds import utils as version_utils
from core.utils import copy_to_app_servers, copy_file_to_app_servers
from doc_builder.base import BaseBuilder, restoring_chdir
from projects.utils import run
from tastyapi import apiv2
log = logging.getLogger(__name__)
class Builder(BaseBuilder):
"""
Mkdocs builder
"""
def clean(self):
pass
def find_conf_file(self, project, version='latest'):
if project.conf_py_file:
log.debug('Inserting conf.py file path from model')
return os.path.join(self.checkout_path(version), self.conf_py_file)
files = project.find('mkdocs.yml', version)
if not files:
files = project.full_find('mkdocs.yml', version)
if len(files) == 1:
return files[0]
elif len(files) > 1:
for file in files:
if file.find('doc', 70) != -1:
return file
else:
# Having this be translatable causes this odd error:
# ProjectImportError(<django.utils.functional.__proxy__ object at 0x1090cded0>,)
raise ProjectImportError(u"Conf File Missing. Please make sure you have a mkdocs.yml in your project.")
@restoring_chdir
def build(self, **kwargs):
project = self.version.project
os.chdir(project.checkout_path(self.version.slug))
if project.use_virtualenv:
build_command = "%s build --theme=readthedocs" % (
project.venv_bin(version=self.version.slug,
bin='mkdocs')
)
else:
build_command = "mkdocs build --theme=readthedocs"
build_results = run(build_command, shell=True)
return build_results
def move(self, **kwargs):
project = self.version.project
build_path = os.path.join(project.checkout_path(self.version.slug), 'site')
if os.path.exists(build_path):
#Copy the html files.
target = project.rtd_build_path(self.version.slug)
if "_" in project.slug:
new_slug = project.slug.replace('_', '-')
new_target = target.replace(project.slug, new_slug)
#Only replace 1, so user_builds doesn't get replaced >:x
targets = [target, new_target]
else:
targets = [target]
for target in targets:
if getattr(settings, "MULTIPLE_APP_SERVERS", None):
log.info("Copying docs to remote server.")
copy_to_app_servers(build_path, target)
else:
if os.path.exists(target):
shutil.rmtree(target)
log.info("Copying docs on the local filesystem")
shutil.copytree(build_path, target)
else:
log.warning("Not moving docs, because the build dir is unknown.")
| 38.071429 | 115 | 0.613196 | 2,668 | 0.834271 | 0 | 0 | 529 | 0.165416 | 0 | 0 | 624 | 0.195122 |
52600d13d8ef93a468c1d0ff6267509d796630cf | 1,108 | py | Python | resources/trials/maya/rotateOrderSym.py | adrienparis/Gapalion | 35d66c2d0de05ffb493a4d8753f675999ff9eaab | [
"MIT"
] | null | null | null | resources/trials/maya/rotateOrderSym.py | adrienparis/Gapalion | 35d66c2d0de05ffb493a4d8753f675999ff9eaab | [
"MIT"
] | null | null | null | resources/trials/maya/rotateOrderSym.py | adrienparis/Gapalion | 35d66c2d0de05ffb493a4d8753f675999ff9eaab | [
"MIT"
] | null | null | null | #!/bin/env mayapy
# -- coding: utf-8 --
u"""Ce test vérifie que le transform [LEFT] a bien le même rotateOrder que le transform [RIGHT]
Les [does not exist] indique un problème dans la nomenclature des noms et donc qu'il ne peut tester la symétrie des rotateOrders"""
__author__ = "Adrien PARIS"
__email__ = "a.paris.cs@gmail.com"
import maya.cmds as cmds
title = u"Vérification de la symétrie des rotates orders"
image = ""
def test():
passed = True
errors = []
for s in cmds.ls(type="transform"):
if s[-2:] == "_L":
r = s[:-2] + "_R"
if not cmds.objExists(r):
errors.append("does not exists : " + r)
continue
if cmds.getAttr(s + ".rotateOrder") != cmds.getAttr(r + ".rotateOrder"):
passed = False
errors.append("not symetric : {0: <20} -> \t \t {1: <24}".format(s, r))
if s[-2:] == "_R":
r = s[:-2] + "_L"
if not cmds.objExists(r):
errors.append("does not exists : " + r)
continue
return passed, errors | 34.625 | 131 | 0.548736 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 496 | 0.445242 |
5260b4128c7ce3de3c890338aef16057091e2b2c | 898 | py | Python | prompt2.py | lreicher/astr-19 | 1fed38bdc76c2033d97084e35fce3585148f0917 | [
"MIT"
] | null | null | null | prompt2.py | lreicher/astr-19 | 1fed38bdc76c2033d97084e35fce3585148f0917 | [
"MIT"
] | 1 | 2022-03-31T17:57:09.000Z | 2022-03-31T18:04:56.000Z | prompt2.py | lreicher/astr-19 | 1fed38bdc76c2033d97084e35fce3585148f0917 | [
"MIT"
] | null | null | null | # Lucas Reicher Prompt #3 - Coding Journal #1
def main():
# Uses formatted strings to output the required info (result & type)
# Adds floats
x = 1.342 # float
y = 3.433 # float
z = x + y # float
float_sum_string = "Addition: {0} + {1} = {2} with type = {3}".format(str(x),str(y),str(z),str(type(z)))
print(float_sum_string)
# Subtracts integers
x = 5 # int
y = 2 # int
z = x - y # int
float_difference_string = "Subtraction: {0} - {1} = {2} with type = {3}".format(str(x),str(y),str(z),str(type(z)))
print(float_difference_string)
# Multiplies float and int
x = 1.342 # float
y = 3 # int
z = x * y # float
float_product_string = "Multiplication: {0} * {1} = {2} with type = {3}".format(str(x),str(y),str(z),str(type(z)))
print(float_product_string)
if __name__ == "__main__":
main()
| 29.933333 | 119 | 0.567929 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 389 | 0.433185 |
5261c07136e65ccd2846c650bf8fdec8aaae6fb9 | 7,575 | py | Python | kinbot/hir.py | rubenvdvijver/KinBot | 5bbb29c087b53bc63ffbb1d393ab4e217390eb82 | [
"BSD-3-Clause"
] | null | null | null | kinbot/hir.py | rubenvdvijver/KinBot | 5bbb29c087b53bc63ffbb1d393ab4e217390eb82 | [
"BSD-3-Clause"
] | null | null | null | kinbot/hir.py | rubenvdvijver/KinBot | 5bbb29c087b53bc63ffbb1d393ab4e217390eb82 | [
"BSD-3-Clause"
] | null | null | null | ###################################################
## ##
## This file is part of the KinBot code v2.0 ##
## ##
## The contents are covered by the terms of the ##
## BSD 3-clause license included in the LICENSE ##
## file, found at the root. ##
## ##
## Copyright 2018 National Technology & ##
## Engineering Solutions of Sandia, LLC (NTESS). ##
## Under the terms of Contract DE-NA0003525 with ##
## NTESS, the U.S. Government retains certain ##
## rights to this software. ##
## ##
## Authors: ##
## Judit Zador ##
## Ruben Van de Vijver ##
## ##
###################################################
import os,sys
import time
import logging
import numpy as np
import matplotlib.pyplot as plt
from constants import *
from stationary_pt import *
from zmat import *
from qc import *
import par
def generate_hir_geoms(species, natom, atom, mult, charge, cart, wellorts):
species.hir_status = []
species.hir_energies = []
species.hir_geoms = []
while len(species.hir_status) < len(species.dihed):
species.hir_status.append([-1 for i in range(par.nrotation)])
species.hir_energies.append([-1 for i in range(par.nrotation)])
species.hir_geoms.append([[] for i in range(par.nrotation)])
for rotor in range(len(species.dihed)):
cart = np.asarray(cart)
zmat_atom, zmat_ref, zmat, zmatorder = make_zmat_from_cart(species, rotor, natom, atom, cart, 0)
#first element has same geometry ( TODO: this shouldn't be recalculated)
cart_new = make_cart_from_zmat(zmat, zmat_atom, zmat_ref, natom, atom, zmatorder)
fi = [(zi+1) for zi in zmatorder[:4]]
qc_hir(species,cart_new,wellorts,natom,atom,mult,charge,rotor,0,[fi])
for ai in range(1,par.nrotation):
ang = 360. / float(par.nrotation)
zmat[3][2] += ang
for i in range(4, natom):
if zmat_ref[i][2] == 4:
zmat[i][2] += ang
if zmat_ref[i][2] == 1:
zmat[i][2] += ang
cart_new = make_cart_from_zmat(zmat, zmat_atom, zmat_ref, natom, atom, zmatorder)
qc_hir(species,cart_new,wellorts,natom,atom,mult,charge,rotor,ai,[fi])
return 0
def test_hir(species,natom,atom,mult,charge,wellorts):
for rotor in range(len(species.dihed)):
for ai in range(par.nrotation):
if species.hir_status[rotor][ai] == -1:
if wellorts:
job = 'hir/' + species.name + '_hir_' + str(rotor) + '_' + str(ai).zfill(2)
else:
job = 'hir/' + str(species.chemid) + '_hir_' + str(rotor) + '_' + str(ai).zfill(2)
err, geom = get_qc_geom(job, natom)
if err == 1: #still running
continue
elif err == -1: #failed
species.hir_status[rotor][ai] = 1
species.hir_energies[rotor][ai] = -1
species.hir_geoms[rotor][ai] = geom
else:
#check if all the bond lenghts are within 15% or the original bond lengths
if equal_geom(species.bond,species.geom,geom,0.15):
err, energy = get_qc_energy(job)
species.hir_status[rotor][ai] = 0
species.hir_energies[rotor][ai] = energy
species.hir_geoms[rotor][ai] = geom
else:
species.hir_status[rotor][ai] = 1
species.hir_energies[rotor][ai] = -1
species.hir_geoms[rotor][ai] = geom
return 0
def check_hir(species, natom, atom, mult, charge, wellorts, wait = 0):
"""
Check for hir calculations and optionally wait for them to finish
"""
while 1:
#check if all the calculations are finished
test_hir(species,natom,atom,mult,charge,wellorts)
if all([all([test >= 0 for test in status]) for status in species.hir_status]):
for rotor in range(len(species.dihed)):
if wellorts:
job = species.name + '_hir_' + str(rotor)
else:
job = str(species.chemid) + '_hir_' + str(rotor)
angles = [i * 2 * np.pi / float(par.nrotation) for i in range(par.nrotation)]
#write profile to file
write_profile(species,rotor,job,atom,natom)
species.hir_fourier.append(fourier_fit(job,angles,species.hir_energies[rotor],species.hir_status[rotor],plot_fit = 0))
return 1
else:
if wait:
time.sleep(1)
else:
return 0
def write_profile(species,rotor,job,atom,natom):
"""
Write a molden-readable file with the HIR scan (geometries and energies)
"""
file = open('hir/' + job + '.xyz','w')
for i in range(par.nrotation):
s = str(natom) + '\n'
s += 'energy = ' + str(species.hir_energies[rotor][i]) + '\n'
for j,at in enumerate(atom):
x,y,z = species.hir_geoms[rotor][i][j]
s += '{} {:.8f} {:.8f} {:.8f}\n'.format(at,x,y,z)
file.write(s)
file.close()
def fourier_fit(job,angles,energies,status,plot_fit = 0):
"""
Create a alternative fourier formulation of a hindered rotor
(Vanspeybroeck et al.)
profile, the angles are in radians and the eneries in
kcal per mol
plot_fit: plot the profile and the fit to a png
"""
n_terms = 6 #the number of sine and cosine terms
ang = [angles[i] for i in range(len(status)) if status[i] == 0]
ens = [(energies[i] - energies[0])*AUtoKCAL for i in range(len(status)) if status[i] == 0]
if len(ens) < par.par.nrotation - 2:
#more than two points are off
logging.warning("Hindered rotor potential has more than 2 failures for " + job)
X = np.zeros((len(ang), 2 * n_terms))
for i,ai in enumerate(ang):
for j in range(n_terms):
X[i][j] = (1 - np.cos((j+1) * ai))
X[i][j+n_terms] = np.sin((j+1) * ai)
A = np.linalg.lstsq(X,np.array(ens))[0]
for i,si in enumerate(status):
if si == 1:
energies[i] = energies[0] + get_fit_value(A,angles[i])/AUtoKCAL
if plot_fit:
#fit the plot to a png file
plt.plot(ang,ens,'ro')
fit_angles = [i * 2. * np.pi / 360 for i in range(360)]
fit_energies = [get_fit_value(A,ai) for ai in fit_angles]
plt.plot(fit_angles,fit_energies)
plt.xlabel('Dihedral angle [radians]')
plt.ylabel('Energy [kcal/mol]')
plt.savefig('hir_profiles/{}.png'.format(job))
plt.clf()
return A
def get_fit_value(A,ai):
"""
Get the fitted energy
"""
e = 0.
n_terms = (len(A)) / 2
for j in range(n_terms):
e += A[j] * ( 1 - np.cos((j+1) * ai))
e += A[j+n_terms] * np.sin((j+1) * ai)
return e
def main():
"""
Calculate the 1D hindered rotor profiles
Create a fourier fit representation of the profile
"""
if __name__ == "__main__":
main()
| 38.451777 | 134 | 0.525413 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,089 | 0.275776 |
52634bcaed64b83728892e97207f26eb73bea3e6 | 21,504 | py | Python | data/ffnet.py | starrybouquet/sj-fanfic-recs | c2120c1461f53947cba4b5c5bdec634ae2b0b144 | [
"MIT"
] | null | null | null | data/ffnet.py | starrybouquet/sj-fanfic-recs | c2120c1461f53947cba4b5c5bdec634ae2b0b144 | [
"MIT"
] | null | null | null | data/ffnet.py | starrybouquet/sj-fanfic-recs | c2120c1461f53947cba4b5c5bdec634ae2b0b144 | [
"MIT"
] | null | null | null | import re, requests, bs4, unicodedata
from datetime import timedelta, date, datetime
from time import time
# Constants
root = 'https://www.fanfiction.net'
# REGEX MATCHES
# STORY REGEX
_STORYID_REGEX = r"var\s+storyid\s*=\s*(\d+);"
_CHAPTER_REGEX = r"var\s+chapter\s*=\s*(\d+);"
_CHAPTERS_REGEX = r"Chapters:\s*(\d+)\s*"
_WORDS_REGEX = r"Words:\s*([\d,]+)\s*"
_TITLE_REGEX = r"var\s+title\s*=\s*'(.+)';"
_DATEP_REGEX = r"Published:\s*<span.+?='(\d+)'>"
_DATEU_REGEX = r"Updated:\s*<span.+?='(\d+)'>"
# USER REGEX
_USERID_REGEX = r"var\s+userid\s*=\s*(\d+);"
_USERID_URL_EXTRACT = r".*/u/(\d+)"
_USERNAME_REGEX = r"<link rel=\"canonical\" href=\"//www.fanfiction.net/u/\d+/(.+)\">"
_USER_STORY_COUNT_REGEX = r"My Stories\s*<span class=badge>(\d+)<"
_USER_FAVOURITE_COUNT_REGEX = r"Favorite Stories\s*<span class=badge>(\d+)<"
_USER_FAVOURITE_AUTHOR_COUNT_REGEX = r"Favorite Authors\s*<span class=badge>(\d+)<"
# Useful for generating a review URL later on
_STORYTEXTID_REGEX = r"var\s+storytextid\s*=\s*storytextid=(\d+);"
# REGEX that used to parse reviews page
_REVIEW_COMPLETE_INFO_REGEX = r"img class=.*?</div"
_REVIEW_USER_NAME_REGEX = r"> *([^< ][^<]*)<"
_REVIEW_CHAPTER_REGEX = r"<small style=[^>]*>([^<]*)<"
_REVIEW_TIME_REGEX = r"<span data[^>]*>([^<]*)<"
_REVIEW_TEXT_REGEX = r"<div[^>]*>([^<]*)<"
# Used to parse the attributes which aren't directly contained in the
# JavaScript and hence need to be parsed manually
_NON_JAVASCRIPT_REGEX = r'Rated:(.+?)</div>'
_HTML_TAG_REGEX = r'<.*?>'
# Needed to properly decide if a token contains a genre or a character name
_GENRES = [
'General', 'Romance', 'Humor', 'Drama', 'Poetry', 'Adventure', 'Mystery',
'Horror', 'Parody', 'Angst', 'Supernatural', 'Suspense', 'Sci-Fi',
'Fantasy', 'Spiritual', 'Tragedy', 'Western', 'Crime', 'Family', 'Hurt',
'Comfort', 'Friendship'
]
# TEMPLATES
_STORY_URL_TEMPLATE = 'https://www.fanfiction.net/s/%d'
_CHAPTER_URL_TEMPLATE = 'https://www.fanfiction.net/s/%d/%d'
_USERID_URL_TEMPLATE = 'https://www.fanfiction.net/u/%d'
_DATE_COMPARISON = date(1970, 1, 1)
_DATE_FORMAT = '%Y%m%d'
def _parse_string(regex, source):
"""Returns first group of matched regular expression as string."""
return re.search(regex, source).group(1)
def _parse_integer(regex, source):
"""Returns first group of matched regular expression as integer."""
match = re.search(regex, source).group(1)
match = match.replace(',', '')
return int(match)
def _parse_date(regex, source):
xutime = _parse_integer(regex, source)
delta = timedelta(seconds=xutime)
return _DATE_COMPARISON + delta
def _unescape_javascript_string(string_):
"""Removes JavaScript-specific string escaping characters."""
return string_.replace("\\'", "'").replace('\\"', '"').replace('\\\\', '\\')
def _visible_filter(element):
if element.parent.name in ['style', 'script', '[document]', 'head', 'title']:
return False
element = unicodedata.normalize('NFKD', element).encode('ascii', 'ignore')
if re.match(r'<!--.*-->', str(element)):
return False
return True
def _get_int_value_from_token(token, prefix):
if not token.startswith(prefix):
raise ValueError("int token doesn't starts with given prefix")
else:
return int(token[len(prefix):].replace(',', ''))
def _get_date_value_from_token(token, prefix):
if not token.startswith(prefix):
raise ValueError("date token doesn't starts with given prefix")
else:
try:
return datetime.strptime(token[len(prefix):], '%m/%d/%Y')
except ValueError:
return datetime.today()
def _get_key_of_first_positive(f, d):
"""
returns key k of first item in l for which f(k) == True
or None
"""
for key, value in d.items():
if f(key) == True:
return key
return None
class Story(object):
SERIALIZED_ATTRS = [
'title',
'id',
'timestamp',
'description',
'fandoms',
'author_id',
'chapter_count',
'word_count',
'date_published',
'date_updated',
'rated',
'language',
'genre',
'characters',
'reviews',
'favs',
'followers',
'complete'
]
DATE_ATTRS = [
'timestamp',
'date_published',
'date_updated'
]
def __init__(self, url=None, id=None):
""" A story on fanfiction.net
If both url, and id are provided, url is used.
:type id: int
:param url: The url of the story.
:param id: The story id of the story.
Attributes:
id (int): The story id.
description (str): The text description of the story
timestamp: The timestamp of moment when data was consistent with site
fandoms [str]: The fandoms to which the story belongs
chapter_count (int); The number of chapters.
word_count (int): The number of words.
author_id (int): The user id of the author.
title (str): The title of the story.
date_published (date): The date the story was published.
date_updated (date): The date of the most recent update.
rated (str): The story rating.
language (str): The story language.
genre [str]: The genre(s) of the story.
characters [str]: The character(s) of the story.
reviews (int): The number of reviews of the story.
favs (int): The number of user which has this story in favorite list
followers (int): The number of users who follow the story
complete (bool): True if the story is complete, else False.
"""
self.inited = False
self.id = id
self.url = url
if id is None:
if url is None:
raise ValueError("There must be a url or an id.")
else:
self.id = _parse_integer(_STORYID_REGEX, source)
else:
self.url = _STORY_URL_TEMPLATE % int(self.id)
self.id = int(self.id)
def download_data(self, timeout=5):
self.timestamp = datetime.now()
source = requests.get(self.url, timeout=timeout)
source = source.text
soup = bs4.BeautifulSoup(source, 'html.parser')
self.author_id = _parse_integer(_USERID_REGEX, source)
self.title = _unescape_javascript_string(_parse_string(_TITLE_REGEX, source).replace('+', ' '))
fandom_chunk = soup.find('div', id='pre_story_links').find_all('a')[-1].get_text().replace('Crossover', '')
self.fandoms = [fandom.strip() for fandom in fandom_chunk.split('+')]
self.description = soup.find('div', {'style': 'margin-top:2px'}).get_text()
# Tokens of information that aren't directly contained in the
# JavaScript, need to manually parse and filter those
tags = re.search(_NON_JAVASCRIPT_REGEX, source.replace('\n', ' ')).group(0)
tokens = [token.strip() for token in
re.sub(_HTML_TAG_REGEX, '', tags).split('-')]
self._parse_tags(tokens)
self.inited = True
def _parse_tags(self, tokens):
"""
parse desription of story such as 'Rated: T - English - Humor/Adventure - Chapters: 2 - Words: 131,097 - Reviews: 537 - Favs: 2,515 - Follows: 2,207 - Updated: Jul 27, 2016 - Published: Dec 17, 2009 - Harry P.'
splitted into tokens list by '-' character
This functions fill all field of the self object except: id, author_id, title, fandoms, timestamp
"""
# skipping tokens 'Crossover' and token which contains fandoms
while not tokens[0].startswith('Rated:'):
tokens = tokens[1:]
# Both tokens are constant and always available
self.rated = tokens[0].replace('Rated:', '').replace('Fiction', '').strip()
self.language = tokens[1]
tokens = tokens[2:]
# there can be token with the list of genres
if tokens[0] in _GENRES or '/' in tokens[0] and all(token in _GENRES for token in tokens[0].split('/')):
self.genre = tokens[0].split('/')
tokens = tokens[1:]
else:
self.genre = []
# deleting useless 'id: ...' token
if tokens[-1].startswith('id:'):
tokens = tokens[:-1]
# and if story is complete the last token contain 'Complete'
if 'Complete' in tokens[-1]:
self.complete = True
tokens = tokens[:-1]
else:
self.complete = False
# except those there are 4 possible kind of tokens: tokens with int data, tokens with date data, story id token,
# and token with characters/pairings
int_tokens = {'Chapters: ': 'chapter_count', 'Words: ': 'word_count', 'Reviews: ': 'reviews',
'Favs: ': 'favs', 'Follows: ': 'followers'}
date_tokens = {'Updated: ': 'date_updated', 'Published: ': 'date_published'}
for token in tokens:
int_k = _get_key_of_first_positive(lambda s: token.startswith(s), int_tokens)
date_k = _get_key_of_first_positive(lambda s: token.startswith(s), date_tokens)
if int_k is not None:
setattr(self, int_tokens[int_k], _get_int_value_from_token(token, int_k))
elif date_k is not None:
setattr(self, date_tokens[date_k], _get_date_value_from_token(token, date_k))
else:
self.characters = [c.translate(str.maketrans('', '', '[]')).strip() for c in token.split(',')]
# now we have to fill field which could be left empty
if not hasattr(self, 'chapter_count'):
self.chapter_count = 1
for field in int_tokens.values():
if not hasattr(self, field):
setattr(self, field, 0)
if not hasattr(self, 'date_updated'):
self.date_updated = self.date_published
if not hasattr(self, 'characters'):
self.characters = []
def _parse_from_storylist_format(self, story_chunk, author_id):
"""
Parse story from html chunk
"""
if author_id:
self.author_id = author_id
else:
self.author_id = _parse_integer(_USERID_URL_EXTRACT, str(story_chunk))
self.timestamp = datetime.now()
self.fandoms = [s.strip() for s in story_chunk.get('data-category').split('&')]
self.title = story_chunk.get('data-title')
self.description = str(story_chunk.find('div', {'class': 'z-indent z-padtop'}))
# save only parts between div tags
self.description = self.description[self.description.find('>') + 1:]
self.description = self.description[:self.description.find('<div', 4)]
tags = story_chunk.find('div', {'class': 'z-padtop2 xgray'}).get_text()
self._parse_tags([token.strip() for token in tags.split('-')])
self.inited = True
def get_chapters(self):
"""
A generator for all chapters in the story.
:return: A generator to fetch chapter objects.
"""
for number in range(1, self.chapter_count + 1):
yield Chapter(story_id=self.id, chapter=number)
def get_user(self):
"""
:return: The user object of the author of the story.
"""
return User(id=self.author_id)
def get_json_dump(self, attrs=None):
result = {}
for attr in attrs or self.SERIALIZED_ATTRS:
if attr in self.DATE_ATTRS:
result[attr] = getattr(self, attr).strftime(_DATE_FORMAT)
else:
result[attr] = getattr(self, attr)
return result
def print_info(self, attrs=None):
"""
Print information held about the story.
:param attrs: A list of attribute names to print information for.
:return: void
"""
assert self.inited
if not attrs:
attrs = self.SERIALIZED_ATTRS
for attr in attrs:
print("%12s\t%s" % (attr, getattr(self, attr)))
def get_reviews(self):
"""
A generator for all reviews in the story.
:return: A generator to fetch reviews.
"""
return ReviewsGenerator(self.id)
# Method alias which allows the user to treat the get_chapters method like
# a normal property if no manual opener is to be specified.
chapters = property(get_chapters)
class ReviewsGenerator(object):
"""
Class that generates review in chronological order
Attributes:
base_url (int): storys review url without specified page number
page_number (int): number of current review page
reviews_cache List(str): list of already downloaded (and partially processed) reviews
skip_reviews_number (int): length of already processed review from review_cache
"""
def __init__(self, story_id, chapter=0):
"""
If chapter unspecified then generator generates review for all chapters
"""
self.story_id = story_id
self.base_url = root + '/r/' + str(story_id) + '/' + str(chapter) + '/'
def __iter__(self):
self.page_number = 0
self.reviews_cache = []
self.skip_reviews_number = 0
return self
def __next__(self):
self.skip_reviews_number += 1
if len(self.reviews_cache) >= self.skip_reviews_number:
return Review(self.story_id, self.reviews_cache[self.skip_reviews_number - 1])
self.page_number += 1
page = self._downloadReviewPage(self.page_number)
self.reviews_cache = re.findall(_REVIEW_COMPLETE_INFO_REGEX, page, re.DOTALL)
if len(self.reviews_cache) == 0:
raise StopIteration
self.skip_reviews_number = 1
return Review(self.story_id, self.reviews_cache[0])
def _downloadReviewPage(self, page_number):
url = self.base_url + str(page_number) + '/'
return requests.get(url).text
class Review(object):
"""
A single review of fanfiction story, on fanfiction.net
Attributes:
story_id (int): story ID
user_id (int): ID of user who submited review (may be None if review is anonymous)
user_name (str): user name (or pseudonym for anonymous review)
chapter (str): chapter name
time_ago (str): how much time passed since review submit (format may be inconsistent with what you see in browser just because fanfiction.net sends different pages depend on do you download page from browser or from console/that library
text (str): review text
"""
def __init__(self, story_id, unparsed_info):
"""
That method should not be invoked outside of Story and Chapter classes
:param story_id (int): story ID
:param unparsed_info (int): string that contain the rest info
"""
self.story_id = story_id
self.user_name = _parse_string(_REVIEW_USER_NAME_REGEX, unparsed_info)
self.chapter = _parse_string(_REVIEW_CHAPTER_REGEX, unparsed_info)
self.text = _parse_string(_REVIEW_TEXT_REGEX, unparsed_info)
self.time_ago = _parse_string(_REVIEW_TIME_REGEX, unparsed_info)
# fanfiction.net provide strange format, instead of '8 hours ago' it show '8h'
# so let's add ' ago' suffix if review submitted hours or minutes ago
if self.time_ago[-1] == 'h' or self.time_ago[-1] == 'm':
self.time_ago += ' ago'
if re.search(_USERID_URL_EXTRACT, unparsed_info) == None:
self.user_id = None
else:
self.user_id = _parse_integer(_USERID_URL_EXTRACT, unparsed_info)
class Chapter(object):
def __init__(self, url=None, story_id=None, chapter=None):
""" A single chapter in a fanfiction story, on fanfiction.net
:param url: The url of the chapter.
:param story_id: The story id of the story of the chapter.
:param chapter: The chapter number of the story.
Attributes:
story_id (int): Story ID
number (int): Chapter number
story_text_id (int): ?
title (str): Title of the chapter, or title of the story.
raw_text (str): The raw HTML of the story.
text_list List(str): List of unicode strings for each paragraph.
text (str): Visible text of the story.
"""
if url is None:
if story_id is None:
raise Exception('A URL or story id must be entered.')
elif chapter is None:
raise Exception('Both a stroy id and chapter number must be provided')
elif story_id and chapter:
url = _CHAPTER_URL_TEMPLATE % (story_id, chapter)
source = requests.get(url)
source = source.text
self.story_id = _parse_integer(_STORYID_REGEX, source)
self.number = _parse_integer(_CHAPTER_REGEX, source)
self.story_text_id = _parse_integer(_STORYTEXTID_REGEX, source)
soup = bs4.BeautifulSoup(source, 'html.parser')
select = soup.find('select', {'name': 'chapter'})
if select:
# There are multiple chapters available, use chapter's title
self.title = select.find('option', selected=True).string.split(None, 1)[1]
else:
# No multiple chapters, one-shot or only a single chapter released
# until now; for the lack of a proper chapter title use the story's
self.title = _unescape_javascript_string(_parse_string(_TITLE_REGEX, source)).decode()
soup = soup.find('div', id='storytext')
# Try to remove AddToAny share buttons
try:
soup.find('div', {'class': lambda class_: class_ and 'a2a_kit' in class_}).extract()
except AttributeError:
pass
# Normalize HTML tag attributes
for hr in soup('hr'):
del hr['size']
del hr['noshade']
self.raw_text = soup.decode()
texts = soup.findAll(text=True)
self.text_list = list(filter(_visible_filter, texts))
self.text = '\n'.join(self.text_list)
def get_reviews(self):
"""
A generator for all reviews for that chapter
:return: A generator to fetch reviews.
"""
return ReviewsGenerator(self.story_id, self.number)
class User(object):
def __init__(self, url=None, id=None):
""" A user page on fanfiction.net
:param url: The url of user profile.
:param id: The url of user profile.
Attributes:
id (int): User id
timestamp (int): Timestamp of last update of downloaded profile
stories [Story]: The list of stories written by user
favorite_stories [Story]: The list of user favorite stories
favorite_authors [User]: The list of user favorite stories
username (str):
"""
self.id = id
self.url = url
if id is None:
if url is None:
raise ValueError("There must be a url or an id.")
else:
self.id = _parse_integer(_USERID_URL_EXTRACT, url)
else:
self.url = _USERID_URL_TEMPLATE % int(self.id)
self.id = int(self.id)
def download_data(self, timeout=5):
self.timestamp = datetime.now()
source = requests.get(self.url, timeout=timeout)
source = source.text
soup = bs4.BeautifulSoup(source, 'html.parser')
self.username = _parse_string(_USERNAME_REGEX, source)
self.stories = self._get_stories_from_profile(soup, fav_stories=False)
self.favorite_stories = self._get_stories_from_profile(soup, fav_stories=True)
self.favorite_authors = self._get_favorite_authors(soup)
def get_json_dump(self):
return {
'id': self.id,
'timestamp': self.timestamp.strftime(_DATE_FORMAT),
'username': self.username,
'stories': [story.id for story in self.stories],
'favorite_stories': [story.id for story in self.favorite_stories],
'favorite_authors': [user.id for user in self.favorite_authors]
}
def _get_stories_from_profile(self, soup, fav_stories=True):
if fav_stories:
target_class = 'favstories'
else:
target_class = 'mystories'
favourite_stories = soup.findAll('div', {'class': target_class})
result = []
for story_chunk in favourite_stories:
story = Story(id=story_chunk.get('data-storyid'))
story._parse_from_storylist_format(story_chunk, author_id=None if fav_stories else self.id)
result.append(story)
return result
def _get_favorite_authors(self, soup):
result = []
for column in soup.findAll('td', {'style': 'line-height:150%'}):
for author_tag in column.findAll('a', href=re.compile(r".*/u/(\d+)/.*")):
author_url = author_tag.get('href')
author_url = root + author_url
result.append(User(author_url))
return result
| 39.098182 | 248 | 0.606213 | 17,623 | 0.819522 | 269 | 0.012509 | 0 | 0 | 0 | 0 | 9,177 | 0.426758 |
52638e3a09f767e9f0fd09e303642391fc9189e4 | 1,506 | py | Python | maglearn_back/database.py | maglearn/maglearn-back | cb5d8623f26e207b870c09c80cbc59911ab23794 | [
"MIT"
] | null | null | null | maglearn_back/database.py | maglearn/maglearn-back | cb5d8623f26e207b870c09c80cbc59911ab23794 | [
"MIT"
] | null | null | null | maglearn_back/database.py | maglearn/maglearn-back | cb5d8623f26e207b870c09c80cbc59911ab23794 | [
"MIT"
] | null | null | null | import sqlite3
from datetime import datetime
import click
from flask import current_app, g
from flask.cli import with_appcontext
from flask_sqlalchemy import SQLAlchemy, Model
from sqlalchemy import Column, Integer, TIMESTAMP, Boolean
class Base(Model):
# this id implementation does not support inheritance
id = Column(Integer, primary_key=True, autoincrement=True)
create_ts = Column(TIMESTAMP, default=datetime.now)
update_ts = Column(TIMESTAMP, default=datetime.now, onupdate=datetime.now)
deleted = Column(Boolean, default=False)
def __repr__(self):
return f'<{type(self).__name__} #{self.id} >'
db = SQLAlchemy(model_class=Base)
def get_db():
"""Creates if needed, stores in g object and returns db connection."""
if 'db' not in g:
g.db = sqlite3.connect(
current_app.config['DATABASE'],
detect_types=sqlite3.PARSE_DECLTYPES
)
g.db.row_factory = sqlite3.Row
return g.db
def close_db(e=None):
"""Closes database connection."""
db = g.pop('db', None)
if db is not None:
db.close()
def init_db():
"""Initialize database schema."""
db.drop_all()
db.create_all()
@click.command('init-db')
@with_appcontext
def init_db_command():
"""Clear the existing data and create new tables."""
init_db()
click.echo('Initialized the database.')
def init_app(app):
db.init_app(app)
app.teardown_appcontext(close_db)
app.cli.add_command(init_db_command)
| 23.904762 | 78 | 0.687915 | 398 | 0.264276 | 0 | 0 | 180 | 0.119522 | 0 | 0 | 333 | 0.221116 |
52643d3c51f0d9cbab7251a5238d3b6d7605e95b | 5,667 | py | Python | legacy/models/ioc_mlp.py | gsc2001/ConvexNet | a17609bd5bca0a02b6330b1ad8035f2b280109f0 | [
"MIT"
] | null | null | null | legacy/models/ioc_mlp.py | gsc2001/ConvexNet | a17609bd5bca0a02b6330b1ad8035f2b280109f0 | [
"MIT"
] | null | null | null | legacy/models/ioc_mlp.py | gsc2001/ConvexNet | a17609bd5bca0a02b6330b1ad8035f2b280109f0 | [
"MIT"
] | null | null | null | import torch.utils.data
from torch.utils.tensorboard import SummaryWriter
from torch import nn
from tqdm import tqdm
import numpy as np
from datasets.preprocess import DatasetWrapper
from utils import AverageMeter
class IOC_MLP(torch.nn.Module):
def __init__(self, input_features, out_classes):
super().__init__()
self.model = nn.Sequential(
nn.Flatten(),
nn.Linear(input_features, 800),
nn.BatchNorm1d(800),
nn.ELU(),
nn.Linear(800, 800),
nn.BatchNorm1d(800),
nn.ELU(),
nn.Linear(800, 800),
nn.BatchNorm1d(800),
nn.ELU(),
nn.Linear(800, out_classes),
)
def forward(self, x):
output = self.model(x)
return output
def train_epoch(model: nn.Module, optimizer, loss_func, dataset, train_loader,
epoch,
n_epochs):
model.train()
losses = AverageMeter()
errors = AverageMeter()
with tqdm(total=len(dataset.train_set),
desc=f"Epoch {epoch + 1} / {n_epochs}") as pbar:
for data, targets in train_loader:
if torch.cuda.is_available():
data = data.cuda()
targets = targets.cuda()
optimizer.zero_grad()
outputs = model(data)
loss = loss_func(outputs, targets)
loss.backward()
optimizer.step()
# convex ensuring step:
for name, param in model.named_parameters():
split = name.split('.')
if int(split[1]) >= 2 and split[2] == 'weight':
param_data = param.data.cpu().numpy()
param_data[param_data < 0] = np.exp(
param_data[param_data < 0] - 5)
#
param.data.copy_(torch.tensor(param_data))
batch_size = targets.size(0)
_, pred = outputs.data.cpu().topk(1, dim=1)
error = torch.ne(pred.squeeze(),
targets.cpu()).float().sum().item() / batch_size
errors.update(error, batch_size)
losses.update(loss.item())
pbar.update(data.size(0))
pbar.set_postfix(**{
'[Train/Loss]': losses.avg,
'[Train/Error]': errors.avg
})
return losses.avg, errors.avg
#
#
def test_epoch(model: nn.Module, dataset: DatasetWrapper,
test_loader: torch.utils.data.DataLoader):
model.eval()
# losses = AverageMeter()
errors = AverageMeter()
with tqdm(total=len(dataset.test_set),
desc=f"Valid") as pbar:
with torch.no_grad():
for data, targets in test_loader:
if torch.cuda.is_available():
data = data.cuda()
targets = targets.cuda()
outputs = model(data)
# loss = loss_func(outputs, targets)
batch_size = targets.size(0)
_, pred = outputs.data.cpu().topk(1, dim=1)
error = torch.ne(pred.squeeze(),
targets.cpu()).float().sum().item() / batch_size
errors.update(error, batch_size)
# losses.update(loss.item())
pbar.update(data.shape[0])
pbar.set_postfix(**{
'[Valid/Error]': errors.avg
})
return errors.avg
def fit(model: IOC_MLP, dataset: DatasetWrapper, lr=0.0001, batch_size=64,
n_epochs=10, path=None):
if path is None:
path = f'trained_models/ioc_mlp.{dataset.name}'
writer = SummaryWriter(f'runs/ioc_mlp.{dataset.name}')
if torch.cuda.is_available():
model.cuda()
model.train()
train_loader = torch.utils.data.DataLoader(dataset.train_set,
batch_size=batch_size,
shuffle=True,
)
test_loader = torch.utils.data.DataLoader(dataset.test_set,
batch_size=batch_size,
)
valid_loader = torch.utils.data.DataLoader(dataset.valid_set,
batch_size=batch_size,
)
loss_func = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
best_error = np.inf
counter = 0
for epoch in range(n_epochs):
train_loss, train_error = train_epoch(model=model, optimizer=optimizer,
loss_func=loss_func,
dataset=dataset,
train_loader=train_loader,
epoch=epoch,
n_epochs=n_epochs)
valid_error = test_epoch(model, dataset, valid_loader)
writer.add_scalars('loss', {'train': train_loss}, epoch)
writer.add_scalars('accuracy', {'train': (1 - train_error) * 100,
'valid': (1 - valid_error) * 100},
epoch)
print(valid_error)
if valid_error < best_error:
print('Saving!')
torch.save(model.state_dict(), path)
best_error = valid_error
counter = 0
else:
counter += 1
if counter > 7:
print("Patience came ending now")
break
writer.close()
| 34.554878 | 81 | 0.498853 | 579 | 0.10217 | 0 | 0 | 0 | 0 | 0 | 0 | 353 | 0.06229 |
5265a4488bcea1fc3304e5e8203a8bdc21c56960 | 485 | py | Python | Lesson04_CrypoTools_pt1/1_julius/solution.py | Alyoninthecity/Cybersecurity2021UNIPD | 456a0ff70357266902556627522cc8f41888907c | [
"BSD-3-Clause"
] | null | null | null | Lesson04_CrypoTools_pt1/1_julius/solution.py | Alyoninthecity/Cybersecurity2021UNIPD | 456a0ff70357266902556627522cc8f41888907c | [
"BSD-3-Clause"
] | null | null | null | Lesson04_CrypoTools_pt1/1_julius/solution.py | Alyoninthecity/Cybersecurity2021UNIPD | 456a0ff70357266902556627522cc8f41888907c | [
"BSD-3-Clause"
] | null | null | null | import base64
import re
def base64ToString(base64):
reg = "\\x[a-z0-9][a-z0-9]"
base64.b64decode()
message_bytes = message.encode("ascii")
base64_bytes = base64.b64encode(message_bytes)
base64_message = base64_bytes.decode("ascii")
def shiftASCII(string, n):
r = ""
for i in string:
r += str(chr((ord(i) + n) % 127))
return r
for i in range(0, 127):
message = "}{[l^KlwOmwZjmOKW9"
print(shiftASCII(message, i))
# ecCTF3T_7U_BRU73?! | 21.086957 | 50 | 0.635052 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 77 | 0.158763 |
5265f7fb300d1d6611ad4be7e87983eb5c5ee653 | 568 | py | Python | freyr_app/core/forms.py | blanchefort/freyrmonitoring | 5bf10ba86d3f88390f91106426dd964289f5aee6 | [
"MIT"
] | 2 | 2021-06-01T20:27:14.000Z | 2021-10-01T23:24:45.000Z | freyr_app/core/forms.py | blanchefort/freyrmonitoring | 5bf10ba86d3f88390f91106426dd964289f5aee6 | [
"MIT"
] | null | null | null | freyr_app/core/forms.py | blanchefort/freyrmonitoring | 5bf10ba86d3f88390f91106426dd964289f5aee6 | [
"MIT"
] | null | null | null | from django import forms
class AddUrlForm(forms.Form):
"""Добавляем УРЛ для проверки
"""
url = forms.URLField(initial='https://', label='Введите ссылку для проверки')
class UploadHappinessIndex(forms.Form):
"""Форма для загрузки файла с кастомным индексом счастья
"""
name = forms.CharField(max_length=256, label='Название отчёта')
file = forms.FileField(label='CSV-файл с отчётом')
class SearchItem(forms.Form):
"""Поиск по проиндексированным материалам
"""
search_query = forms.CharField(max_length=512, label='✨✨✨Поиск!')
| 31.555556 | 81 | 0.705986 | 702 | 0.957708 | 0 | 0 | 0 | 0 | 0 | 0 | 402 | 0.548431 |
52681931ece663f1a9a6fa741b530f332c045a60 | 2,264 | py | Python | src/putil/test/test_timer.py | scionrep/scioncc_new | 086be085b69711ee24c4c86ed42f2109ca0db027 | [
"BSD-2-Clause"
] | 2 | 2015-10-05T20:36:35.000Z | 2018-11-21T11:45:24.000Z | src/putil/test/test_timer.py | scionrep/scioncc_new | 086be085b69711ee24c4c86ed42f2109ca0db027 | [
"BSD-2-Clause"
] | 21 | 2015-03-18T14:39:32.000Z | 2016-07-01T17:16:29.000Z | src/putil/test/test_timer.py | scionrep/scioncc_new | 086be085b69711ee24c4c86ed42f2109ca0db027 | [
"BSD-2-Clause"
] | 12 | 2015-03-18T10:53:49.000Z | 2018-06-21T11:19:57.000Z | import time
from math import fabs
import putil.timer
from putil.testing import UtilTest
class TestTimer(UtilTest):
def setUp(self):
self.op1_times = iter([ .01, .02 ])
self.a1 = putil.timer.Accumulator()
self.op2_step1_times = iter([ .005, .015, .005, .005])
self.op2_step2_times = iter([ .01, .02, .01, .01])
self.a2 = putil.timer.Accumulator()
def test_found_caller(self):
import importable.create_timer
t = importable.create_timer.t
self.assertEquals('timing.putil.test.importable.create_timer', t.logger.name)
def test_time_event(self):
t = putil.timer.Timer()
time.sleep(0.01)
t.complete_step('pause')
time.sleep(0.02)
t.complete_step()
self.assertEquals(3, len(t.times))
def one_step_operation(self):
t = putil.timer.Timer()
time.sleep(self.op1_times.next())
t.complete_step()
self.a1.add(t)
def test_stats_one_step(self):
try:
while True:
self.one_step_operation()
except StopIteration:
pass
self.assertEquals(2, self.a1.get_count())
self.assertAlmostEqual(self.a1.get_average(), 0.015, places=2)
self.assertTrue( fabs(self.a1.get_average()-0.015) < .002 )
self.assertAlmostEqual(self.a1.get_standard_deviation(), 0.005, places=2)
def two_step_operation(self):
t = putil.timer.Timer()
time.sleep(self.op2_step1_times.next())
t.complete_step('one')
time.sleep(self.op2_step2_times.next())
t.complete_step('two')
self.a2.add(t)
def test_stats_two_steps(self):
try:
while True:
self.two_step_operation()
except StopIteration:
pass
self.assertEquals(8, self.a2.get_count())
self.assertEquals(4, self.a2.get_count("one"))
self.assertEquals(4, self.a2.get_count("two"))
self.assertAlmostEqual(self.a2.get_average(), 0.01, places=2)
self.assertAlmostEqual(self.a2.get_average("one"), 0.008, places=2)
self.assertAlmostEqual(self.a2.get_average("two"), 0.013, places=2)
self.assertNotEquals(0, self.a2.get_standard_deviation())
| 30.186667 | 85 | 0.619258 | 2,172 | 0.959364 | 0 | 0 | 0 | 0 | 0 | 0 | 80 | 0.035336 |
52685c541e22ce454e9ae9ee85d7869bba20eee6 | 5,122 | py | Python | google/api/endpoint_pb2.py | LaudateCorpus1/python-api-common-protos | d9d8a9fc3543ef90b69bc1a79467daca3a0d220e | [
"Apache-2.0"
] | null | null | null | google/api/endpoint_pb2.py | LaudateCorpus1/python-api-common-protos | d9d8a9fc3543ef90b69bc1a79467daca3a0d220e | [
"Apache-2.0"
] | 1 | 2022-03-18T21:45:58.000Z | 2022-03-18T21:45:58.000Z | google/api/endpoint_pb2.py | LaudateCorpus1/python-api-common-protos | d9d8a9fc3543ef90b69bc1a79467daca3a0d220e | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/api/endpoint.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name="google/api/endpoint.proto",
package="google.api",
syntax="proto3",
serialized_options=b"\n\016com.google.apiB\rEndpointProtoP\001ZEgoogle.golang.org/genproto/googleapis/api/serviceconfig;serviceconfig\242\002\004GAPI",
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x19google/api/endpoint.proto\x12\ngoogle.api"Q\n\x08\x45ndpoint\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x13\n\x07\x61liases\x18\x02 \x03(\tB\x02\x18\x01\x12\x0e\n\x06target\x18\x65 \x01(\t\x12\x12\n\nallow_cors\x18\x05 \x01(\x08\x42o\n\x0e\x63om.google.apiB\rEndpointProtoP\x01ZEgoogle.golang.org/genproto/googleapis/api/serviceconfig;serviceconfig\xa2\x02\x04GAPIb\x06proto3',
)
_ENDPOINT = _descriptor.Descriptor(
name="Endpoint",
full_name="google.api.Endpoint",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="name",
full_name="google.api.Endpoint.name",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="aliases",
full_name="google.api.Endpoint.aliases",
index=1,
number=2,
type=9,
cpp_type=9,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=b"\030\001",
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="target",
full_name="google.api.Endpoint.target",
index=2,
number=101,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="allow_cors",
full_name="google.api.Endpoint.allow_cors",
index=3,
number=5,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=41,
serialized_end=122,
)
DESCRIPTOR.message_types_by_name["Endpoint"] = _ENDPOINT
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Endpoint = _reflection.GeneratedProtocolMessageType(
"Endpoint",
(_message.Message,),
{
"DESCRIPTOR": _ENDPOINT,
"__module__": "google.api.endpoint_pb2"
# @@protoc_insertion_point(class_scope:google.api.Endpoint)
},
)
_sym_db.RegisterMessage(Endpoint)
DESCRIPTOR._options = None
_ENDPOINT.fields_by_name["aliases"]._options = None
# @@protoc_insertion_point(module_scope)
| 33.045161 | 398 | 0.641742 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,703 | 0.332487 |
5268e2e7c321088280b18102a1f9b67bb8260b9f | 966 | py | Python | flexget/plugins/filter/imdb_required.py | Konubinix/Flexget | 1379e346370ef144b9a02f761fc187497b6097e2 | [
"MIT"
] | 1 | 2017-08-25T07:17:04.000Z | 2017-08-25T07:17:04.000Z | flexget/plugins/filter/imdb_required.py | Konubinix/Flexget | 1379e346370ef144b9a02f761fc187497b6097e2 | [
"MIT"
] | 1 | 2015-11-10T01:07:54.000Z | 2015-11-10T01:07:54.000Z | flexget/plugins/filter/imdb_required.py | Konubinix/Flexget | 1379e346370ef144b9a02f761fc187497b6097e2 | [
"MIT"
] | null | null | null | from __future__ import unicode_literals, division, absolute_import
import logging
from flexget import plugin
from flexget.event import event
log = logging.getLogger('imdb_required')
class FilterImdbRequired(object):
"""
Rejects entries without imdb_url or imdb_id.
Makes imdb lookup / search if necessary.
Example::
imdb_required: yes
"""
schema = {'type': 'boolean'}
@plugin.priority(32)
def on_task_filter(self, task, config):
if not config:
return
for entry in task.entries:
try:
plugin.get_plugin_by_name('imdb_lookup').instance.lookup(entry)
except plugin.PluginError:
entry.reject('imdb required')
if 'imdb_id' not in entry and 'imdb_url' not in entry:
entry.reject('imdb required')
@event('plugin.register')
def register_plugin():
plugin.register(FilterImdbRequired, 'imdb_required', api_ver=2)
| 25.421053 | 79 | 0.656315 | 660 | 0.68323 | 0 | 0 | 552 | 0.571429 | 0 | 0 | 270 | 0.279503 |
5269a1caf0be2c82d1b57ef29ea750514c38cbea | 1,795 | py | Python | tools/make_folders.py | leonzucchini/recipes | d11d1ee589bbea89b0d587e056c17718de25e9f3 | [
"MIT"
] | 8 | 2017-09-17T10:39:37.000Z | 2021-12-29T11:46:03.000Z | tools/make_folders.py | leonzucchini/Recipes | d11d1ee589bbea89b0d587e056c17718de25e9f3 | [
"MIT"
] | null | null | null | tools/make_folders.py | leonzucchini/Recipes | d11d1ee589bbea89b0d587e056c17718de25e9f3 | [
"MIT"
] | null | null | null | import os
import sys
import shutil
import re
def make_output_folder(folder_path, debug=False):
""" Make folder for output, checking for previous results """
# Skip if debug (avoids replace prompt)
if debug:
print "FolderSetup warning: Not creating directory because debug = True"
pass
else:
# If destination folder does not exist then create it
if not os.path.exists(folder_path):
os.mkdir(folder_path)
else:
# Otherwise give a choice to replace (overwrite), use, or exit
confirm_prompt = "The following folder exists:" + "\n" + \
str(folder_path) + "\n" + \
"Would you like to add to it ('a'), overwrite ('o'), or exit ('e'): "
confirm = raw_input(confirm_prompt)
# Prompt for correctly formatted input (y/n)
while not re.search(r'[aeo]', confirm):
confirm_prompt = "Please confirm what you want to do." + "\n" + \
"Would you like to add to it ('a'), overwrite ('o'), or exit ('e'):"
confirm = raw_input(confirm_prompt)
# If exit
if confirm == "e":
print "OK exiting."
sys.exit(1)
# Else if overwrite
elif confirm == "o":
# Make folder path
shutil.rmtree(folder_path)
os.mkdir(folder_path)
print "Created output folder: %s" %(folder_path)
# Else if add
elif confirm == "a":
print "OK adding to folder"
return None | 35.9 | 93 | 0.485794 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 680 | 0.37883 |
526b26f211436d7f0e5e0e9e0897c31bf38dfd65 | 6,748 | py | Python | emit_main/workflow/l3_tasks.py | emit-sds/emit-main | 7c2f6feacc68b8a962309d8a3a7f6decc571c69d | [
"Apache-2.0"
] | 1 | 2022-02-11T17:05:52.000Z | 2022-02-11T17:05:52.000Z | emit_main/workflow/l3_tasks.py | emit-sds/emit-main | 7c2f6feacc68b8a962309d8a3a7f6decc571c69d | [
"Apache-2.0"
] | null | null | null | emit_main/workflow/l3_tasks.py | emit-sds/emit-main | 7c2f6feacc68b8a962309d8a3a7f6decc571c69d | [
"Apache-2.0"
] | null | null | null | """
This code contains tasks for executing EMIT Level 3 PGEs and helper utilities.
Author: Philip G. Brodrick, philip.brodrick@jpl.nasa.gov
"""
import datetime
import logging
import os
import luigi
import spectral.io.envi as envi
from emit_main.workflow.output_targets import AcquisitionTarget
from emit_main.workflow.workflow_manager import WorkflowManager
from emit_main.workflow.l1b_tasks import L1BGeolocate
from emit_main.workflow.l2a_tasks import L2AMask, L2AReflectance
from emit_main.workflow.slurm import SlurmJobTask
logger = logging.getLogger("emit-main")
class L3Unmix(SlurmJobTask):
"""
Creates L3 fractional cover estimates
:returns: Fractional cover file and uncertainties
"""
config_path = luigi.Parameter()
acquisition_id = luigi.Parameter()
level = luigi.Parameter()
partition = luigi.Parameter()
n_cores = 40
memory = 180000
task_namespace = "emit"
def requires(self):
logger.debug(self.task_family + " requires")
return (L2AReflectance(config_path=self.config_path, acquisition_id=self.acquisition_id, level=self.level,
partition=self.partition),
L2AMask(config_path=self.config_path, acquisition_id=self.acquisition_id, level=self.level,
partition=self.partition))
def output(self):
logger.debug(self.task_family + " output")
wm = WorkflowManager(config_path=self.config_path, acquisition_id=self.acquisition_id)
return AcquisitionTarget(acquisition=wm.acquisition, task_family=self.task_family)
def work(self):
logger.debug(self.task_family + " run")
wm = WorkflowManager(config_path=self.config_path, acquisition_id=self.acquisition_id)
acq = wm.acquisition
pge = wm.pges["SpectralUnmixing"]
# Build PGE commands for run_tetracorder_pge.sh
unmix_exe = os.path.join(pge.repo_dir, "unmix.jl")
endmember_key = "level_1"
tmp_log_path = os.path.join(self.local_tmp_dir,
os.path.basename(acq.cover_img_path).replace(".img", "_pge.log"))
output_base = os.path.join(self.local_tmp_dir, "unmixing_output")
# Set up environment variables
env = os.environ.copy()
env["PATH"] = "/beegfs/store/shared/julia-1.6.5/bin:${PATH}"
env["JULIA_DEPOT_PATH"] = "/beegfs/store/shared/.julia_165_shared"
env["JULIA_PROJECT"] = pge.repo_dir
# Build command
cmd_unmix = ['julia', '-p', str(self.n_cores), unmix_exe, acq.rfl_img_path, wm.config["unmixing_library"],
endmember_key, output_base, "--normalization", "brightness", "--mode", "sma-best",
"--n_mc", "50", "--reflectance_uncertainty_file", acq.uncert_img_path,
"--spectral_starting_column", "8", "--num_endmembers", "20", "--log_file", tmp_log_path]
pge.run(cmd_unmix, tmp_dir=self.tmp_dir, env=env, use_conda_run=False)
wm.copy(f'{output_base}_fractional_cover', acq.cover_img_path)
wm.copy(f'{output_base}_fractional_cover.hdr', acq.cover_hdr_path)
wm.copy(f'{output_base}_fractional_cover_uncertainty', acq.coveruncert_img_path)
wm.copy(f'{output_base}_fractional_cover_uncertainty.hdr', acq.coveruncert_hdr_path)
wm.copy(tmp_log_path, acq.cover_img_path.replace(".img", "_pge.log"))
input_files = {
"reflectance_file": acq.rfl_img_path,
"reflectance_uncertainty_file": acq.uncert_img_path,
"endmember_path": endmember_path,
}
# Update hdr files
for header_to_update in [acq.cover_hdr_path, acq.coveruncert_hdr_path]:
input_files_arr = ["{}={}".format(key, value) for key, value in input_files.items()]
doc_version = "EMIT SDS L3 JPL-D 104238, Rev A" # \todo check
hdr = envi.read_envi_header(header_to_update)
hdr["emit acquisition start time"] = acq.start_time_with_tz.strftime("%Y-%m-%dT%H:%M:%S%z")
hdr["emit acquisition stop time"] = acq.stop_time_with_tz.strftime("%Y-%m-%dT%H:%M:%S%z")
hdr["emit pge name"] = pge.repo_url
hdr["emit pge version"] = pge.version_tag
hdr["emit pge input files"] = input_files_arr
hdr["emit pge run command"] = " ".join(cmd_unmix)
hdr["emit software build version"] = wm.config["extended_build_num"]
hdr["emit documentation version"] = doc_version
creation_time = datetime.datetime.fromtimestamp(
os.path.getmtime(acq.cover_img_path), tz=datetime.timezone.utc)
hdr["emit data product creation time"] = creation_time.strftime("%Y-%m-%dT%H:%M:%S%z")
hdr["emit data product version"] = wm.config["processing_version"]
hdr["emit acquisition daynight"] = acq.daynight
envi.write_envi_header(header_to_update, hdr)
# PGE writes metadata to db
dm = wm.database_manager
product_dict_cover = {
"img_path": acq.cover_img_path,
"hdr_path": acq.cover_hdr_path,
"created": creation_time,
"dimensions": {
"lines": hdr["lines"],
"samples": hdr["samples"],
"bands": hdr["bands"]
}
}
dm.update_acquisition_metadata(acq.acquisition_id, {"products.l3.cover": product_dict_cover})
product_dict_cover_uncert = {
"img_path": acq.coveruncert_img_path,
"hdr_path": acq.coveruncert_hdr_path,
"created": creation_time,
"dimensions": {
"lines": hdr["lines"],
"samples": hdr["samples"],
"bands": hdr["bands"]
}
}
dm.update_acquisition_metadata(acq.acquisition_id, {"products.l3.coveruncert": product_dict_cover_uncert})
log_entry = {
"task": self.task_family,
"pge_name": pge.repo_url,
"pge_version": pge.version_tag,
"pge_input_files": input_files,
"pge_run_command": " ".join(cmd_unmix),
"documentation_version": doc_version,
"product_creation_time": creation_time,
"log_timestamp": datetime.datetime.now(tz=datetime.timezone.utc),
"completion_status": "SUCCESS",
"output": {
"l3_cover_img_path": acq.cover_img_path,
"l3_cover_hdr_path:": acq.cover_hdr_path,
"l3_coveruncert_img_path": acq.coveruncert_img_path,
"l3_coveruncert_hdr_path:": acq.coveruncert_hdr_path
}
}
dm.insert_acquisition_log_entry(self.acquisition_id, log_entry)
| 42.440252 | 114 | 0.636633 | 6,173 | 0.91479 | 0 | 0 | 0 | 0 | 0 | 0 | 1,963 | 0.290901 |
526b485dc264415947150a42644b3ef978ed20b1 | 2,843 | py | Python | src/python/backends/py/runtime/graph/copy.py | andyjost/Sprite | 7ecd6fc7d48d7f62da644e48c12c7b882e1a2929 | [
"MIT"
] | 1 | 2022-03-16T16:37:11.000Z | 2022-03-16T16:37:11.000Z | src/python/backends/py/runtime/graph/copy.py | andyjost/Sprite | 7ecd6fc7d48d7f62da644e48c12c7b882e1a2929 | [
"MIT"
] | null | null | null | src/python/backends/py/runtime/graph/copy.py | andyjost/Sprite | 7ecd6fc7d48d7f62da644e48c12c7b882e1a2929 | [
"MIT"
] | null | null | null | '''
Code to copy Curry expressions.
'''
from __future__ import absolute_import
from .....common import T_SETGRD, T_FWD
from copy import copy, deepcopy
from ..... import inspect
from . import node
__all__ = ['copygraph', 'copynode', 'GraphCopier', 'Skipper']
class GraphCopier(object):
'''
Deep-copies an expression. The skipper can be used to remove nodes. This
can be used to remove forward nodes and set guards from values.
'''
def __init__(self, skipper=None):
self.expr = None
self.skipper = skipper
def __call__(self, expr, memo=None):
self.expr = expr
return deepcopy(self, memo)
def __deepcopy__(self, memo):
if not isinstance(self.expr, node.Node):
return deepcopy(self.expr, memo)
else:
target = self.skipper(self.expr)
if target is not None:
return self(target, memo)
else:
info = self.expr.info
partial = info.arity > len(self.expr.successors)
return node.Node(
info
, *(self(succ) for succ in self.expr.successors)
, partial=partial
)
class Skipper(object):
'''
Indicates which nodes to skip. If a node should be skipped, the
__call__ method should return its replacement.
'''
def __init__(self, skipfwd=False, skipgrds=None):
self.skipfwd = skipfwd
self.skipgrds = set() if skipgrds is None else skipgrds
def __call__(self, expr):
if expr.info.tag == T_FWD:
if skipfwd:
return inspect.fwd_target(expr)
elif expr.info.tag == T_SETGRD:
if inspect.get_set_id(expr) in self.skipgrds:
return inspect.get_setguard_value(expr)
def copygraph(expr, memo=None, **kwds):
'''
Copies a Curry expression with the option to remove certain nodes.
Args:
expr:
An instance of ``graph.Node`` or a built-in type such as ``int``,
``str``, or ``float``.
skipfwd:
Indicates whether to skip FWD nodes.
skipgrds:
A container of set identifer indicating which set guards to skip.
Returns:
A deep copy of ``expr``.
'''
copier = GraphCopier(skipper=Skipper(**kwds))
return copier(expr, memo=memo)
def copynode(expr, mapf=None):
'''
Makes a shallow copy of a Curry expression.
Args:
expr
The expression to copy. Can be an instance of ``graph.Node`` or a
built-in type such as ``int``, ``str``, or ``float``.
mapf
An optional map function. If supplied, this function will be applied
to the successors.
Returns:
A shallow copy of ``expr``.
'''
if isinstance(expr, node.Node):
info = expr.info
partial = info.arity > len(expr.successors)
if mapf is None:
successors = expr.successors
else:
successors = map(mapf, expr.successors)
return node.Node(info, *successors, partial=partial)
else:
return copy(expr)
| 26.324074 | 76 | 0.651425 | 1,371 | 0.482237 | 0 | 0 | 0 | 0 | 0 | 0 | 1,104 | 0.388322 |
526d20bd4beb813f088b8c300d75c3314be9fe9d | 1,325 | py | Python | src/dynamic_programming/python/coins/tests/test_coins.py | djeada/GraphAlgorithms | 0961303ec20430f90053a4efb9074185f96dfddc | [
"MIT"
] | 2 | 2021-05-31T13:01:33.000Z | 2021-12-20T19:48:18.000Z | src/dynamic_programming/python/coins/tests/test_coins.py | djeada/GraphAlgorithms | 0961303ec20430f90053a4efb9074185f96dfddc | [
"MIT"
] | null | null | null | src/dynamic_programming/python/coins/tests/test_coins.py | djeada/GraphAlgorithms | 0961303ec20430f90053a4efb9074185f96dfddc | [
"MIT"
] | null | null | null | import unittest
import os
import sys
file_dir = os.path.dirname(os.path.dirname(__file__))
sys.path.append(file_dir + "/src")
from coins import coin_change_basic, coin_change_memo, coin_change_tab
class TestCoinChangeBasic(unittest.TestCase):
def test_negative(self):
num = 0
coins = [3, 2, 1]
result = 0
self.assertEqual(coin_change_basic(num, coins), result)
def test_positivie(self):
num = 25
coins = [5, 10]
result = 3
self.assertEqual(coin_change_basic(num, coins), result)
class TestCoinChangeMemo(unittest.TestCase):
def test_negative(self):
num = 0
coins = [3, 2, 1]
result = 0
self.assertEqual(coin_change_memo(num, coins), result)
def test_positivie(self):
num = 67
coins = [1, 5, 10, 25]
result = 6
self.assertEqual(coin_change_memo(num, coins), result)
class TestCoinChangeTab(unittest.TestCase):
def test_negative(self):
num = 0
coins = [3, 2, 1]
result = 0
self.assertEqual(coin_change_tab(num, coins), result)
def test_positivie(self):
num = 67
coins = [1, 5, 10, 25]
result = 6
self.assertEqual(coin_change_tab(num, coins), result)
if __name__ == "__main__":
unittest.main()
| 23.660714 | 70 | 0.621132 | 1,067 | 0.805283 | 0 | 0 | 0 | 0 | 0 | 0 | 16 | 0.012075 |
526d4d40cd6f4c695d138adad233c6be53e5ca14 | 359 | py | Python | tutorials/W1D3_MultiLayerPerceptrons/solutions/W1D3_Tutorial1_Solution_d58d2933.py | amita-kapoor/course-content-dl | bba5c8f6fa38a83c4e452be30791f3a6399d5b83 | [
"CC-BY-4.0",
"BSD-3-Clause"
] | 473 | 2021-04-13T18:27:42.000Z | 2022-03-28T14:14:35.000Z | tutorials/W1D3_MultiLayerPerceptrons/solutions/W1D3_Tutorial1_Solution_d58d2933.py | amita-kapoor/course-content-dl | bba5c8f6fa38a83c4e452be30791f3a6399d5b83 | [
"CC-BY-4.0",
"BSD-3-Clause"
] | 399 | 2021-06-07T20:56:59.000Z | 2022-01-26T23:05:06.000Z | tutorials/W1D3_MultiLayerPerceptrons/solutions/W1D3_Tutorial1_Solution_d58d2933.py | amita-kapoor/course-content-dl | bba5c8f6fa38a83c4e452be30791f3a6399d5b83 | [
"CC-BY-4.0",
"BSD-3-Clause"
] | 170 | 2021-04-16T11:09:32.000Z | 2022-03-31T12:13:52.000Z |
"""
At infinite membrane resistance, the Neuron does not leak any current out,
and hence it starts firing with the slightest input current,
This shifts the transfer function towards 0, similar to ReLU activation (centered at 0).
Also, when there is minimal refractory time, the neuron can keep firing
at a high input current which avoids the saturation.
"""; | 44.875 | 88 | 0.788301 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 357 | 0.994429 |
526ee635b3e57242e052bcbb6a2a6c1ce49cfd69 | 1,377 | py | Python | clients/listen.py | devstopfix/elixoids | df4f2ba9ddb3d13c3c86e57f100c3d57fa64ed8d | [
"MIT"
] | 5 | 2016-07-05T13:42:33.000Z | 2020-12-07T14:12:16.000Z | clients/listen.py | devstopfix/elixoids | df4f2ba9ddb3d13c3c86e57f100c3d57fa64ed8d | [
"MIT"
] | 70 | 2016-06-04T11:31:27.000Z | 2020-11-21T20:00:09.000Z | clients/listen.py | devstopfix/elixoids | df4f2ba9ddb3d13c3c86e57f100c3d57fa64ed8d | [
"MIT"
] | 1 | 2016-07-05T17:10:05.000Z | 2016-07-05T17:10:05.000Z | #
# Visualize the audio of an Elixoids game:
#
# pip3 install websocket-client
# python3 clients/listen.py --host example.com
#
import argparse
import sys
import websocket
try:
import thread
except ImportError:
import _thread as thread
import sound_pb2
def on_message(ws, message):
sound = sound_pb2.Sound()
sound.ParseFromString(message)
if (sound.noise == sound_pb2.Sound.FIRE):
sys.stdout.write(".")
sys.stdout.flush()
elif (sound.noise == sound_pb2.Sound.EXPLOSION):
sys.stdout.write("💥")
sys.stdout.flush()
def on_error(ws, error):
sys.stderr.write("{}\n\n".format(str(error)))
def sound_url(host, game_id):
return "ws://{}/{}/sound".format(host, game_id)
def options():
parser = argparse.ArgumentParser()
parser.add_argument("--host", default="localhost:8065",
help="host[:port] of Elixoids server")
parser.add_argument("--game", default=0,
help="Game id")
return parser.parse_args()
if __name__ == "__main__":
args = options()
ws_url = sound_url(args.host, args.game)
ws = websocket.WebSocketApp(ws_url,
header={"Accept": "application/octet-stream"},
on_message=on_message,
on_error=on_error)
ws.run_forever()
| 25.036364 | 78 | 0.606391 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 282 | 0.204348 |
52702373891582e4ba0d662955581802a9e0d1a1 | 12,915 | py | Python | find_duplicates.py | dallascard/LN_tools | 66be00f1fd11517f7bbf2949cc70f9552f3af4f4 | [
"Apache-2.0"
] | 1 | 2019-09-29T20:48:51.000Z | 2019-09-29T20:48:51.000Z | find_duplicates.py | dallascard/LN_tools | 66be00f1fd11517f7bbf2949cc70f9552f3af4f4 | [
"Apache-2.0"
] | null | null | null | find_duplicates.py | dallascard/LN_tools | 66be00f1fd11517f7bbf2949cc70f9552f3af4f4 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
This script looks through the JSON files created by parse_LN_to_JSON.py and looks
for duplicates using the Jaccard Coefficient f k-grams of the article text (body).
It creates a CSV file with all of the cases listed and duplicates marked, and
optionally stores the same information in a JSON file for use by make_sample.py
@author: dcard
"""
#import modules
from os import path, makedirs
from optparse import OptionParser
from json import loads, dump
import codecs
import re
import glob
import csv
import datetime
# This function updates a dictionary if the given value is greater than the present value
# Inputs:
# d: a dictionary
# key: a key for the new dictionary
# value: a value to insert at that key (if it's bigger than the current value)
def insert_max(d, key, value):
# check to see if the key exists
if d.has_key(key):
# if it does, check the value
if value > d[key]:
# if the new value is bigger, update
d[key] = value
# if it doesn't exist, just store the value
else:
d[key] = value
# This function compares two sets of n-grams and reterns the Jaccard Coefficient
# Inputs:
# i_shingles, j_shingles: sets of n-grams
def compare_shingles(i_shingles,j_shingles):
# take the intersection between i and j
shared_shingles = i_shingles.intersection(j_shingles)
# get the size of the intesection
shared_count = len(shared_shingles)
# divide the sum of the individual counts by the shared count (unless 0)
total_count = len(i_shingles) + len(j_shingles) - shared_count
if total_count > 0:
JC = float(shared_count) / float(total_count)
else:
JC = 0
# return the Jaccard Coefficient
return JC
# This function is called when duplicates are found to update a dictionary
# which stores the case ids of all associate duplictes (including itself)
# Inputs:
# duplicates: a dictionary of duplicates
# i_id, j_id: case_ids of the two duplicates
def store_duplicates(duplicates, i_id, j_id):
# start with case_id i
# if the dictionary already has it as a duplicate then update the set
dup_i = set()
dup_j = set()
if duplicates.has_key(i_id):
dup_i = duplicates[i_id]
else:
dup_i = {i_id,j_id}
if duplicates.has_key(j_id):
dup_j = duplicates[j_id]
else:
dup_j = {i_id,j_id}
new_dup = dup_i.union(dup_j)
new_dup = new_dup.union({i_id, j_id})
for d in new_dup:
if duplicates.has_key(d):
duplicates[d].update(new_dup)
else:
duplicates[d] = new_dup
# return the modified dictionary
return duplicates
### MAIN ###
# set up an options parser
usage = "\n%prog project_dir [options]"
parser = OptionParser(usage=usage)
parser.add_option('-k', help='use K-grams for deduplication [default = %default]', metavar='K', default=4)
parser.add_option('-r', help='range (in days) over which to look for duplicates [default = %default]', metavar='RANGE', default = 62)
parser.add_option('-t', help='Threshold above which to consider similar articles as duplicates [default = %default]', metavar='THRESH', default=0.2)
# make a dictionary of months to look for
MONTHS = {u'january':1, u'february':2, u'march':3, u'april':4, u'may':5, u'june':6, u'july':7, u'august':8, u'september':9, u'october':10, u'november':11, u'december':12}
(options, args) = parser.parse_args()
if len(args) < 1:
exit("Error: Please provide a project directory")
# Make sure we can find the input directory
project_dir = args[0]
if not path.exists(project_dir):
exit("Error: Cannot find project directory")
input_dir = project_dir + '/json/'
output_dir = project_dir + '/metadata/'
if not path.exists(output_dir):
makedirs(output_dir)
# Open the csv file for writing
csv_file_name = output_dir + 'duplicates.csv'
csv_file = open(csv_file_name, 'wb')
writer = csv.writer(csv_file)
# Get a list of all the files in the input directory
files = glob.glob(input_dir + '/*.json')
files.sort()
print "Found", len(files), " files."
date_hash = {} # a dictionary of files (articles) indexed by date
case_years = {} # a dictionary of yeas, indexed by case id
shingle_k = int(options.k) # the size of shingles to use (k in k-grams)
shingle_thresh = float(options.t) # the threshold for the JC above which to consider duplicates
date_range = int(options.r) # the range (in days) over which to look for duplicates
# Start an empty list of case_ids
case_ids = []
# Go through all the files one by one
count = 0
for f in files:
# open the file and unpack the json object into dictionary
input_file_name = f
name_parts = input_file_name.split('/')
file_name = name_parts[-1]
input_file = codecs.open(input_file_name, encoding='utf-8')
input_text = input_file.read()
input_file.close()
doc = loads(input_text, encoding='utf-8')
# set default (blank) values for various strings
case_id = u'' # case_id
orig_date = u'' # the date string as written in the article
day = u'' # the day from the date string
month = u'' # the month from the date string
year = u'' # the year from the date string
fulldate = u'' # the date in the format YYYYMMDD
# Look for the case_id from this file and add it to the list
if doc.has_key(u'CASE_ID'):
case_id = doc[u'CASE_ID']
case_ids.append(case_id)
# Look for the date from this file and parse it
if doc.has_key(u'DATE'):
orig_date = doc[u'DATE']
year = doc[u'YEAR']
month = doc[u'MONTH']
if doc.has_key(u'DAY'):
day = doc[u'DAY']
else:
day = 0
if day == 0:
day = 15;
date = datetime.date(int(year), int(month), int(day))
# store this file in the dictionary of files indexed by date
if date_hash.has_key(date):
# if the date exists as a key, add this file to the list at that key
file_list = list(date_hash[date])
file_list.append(file_name)
date_hash[date] = file_list
else:
# otherwise, start a new list
date_hash[date] = [file_name]
# also store the year of this article
case_years[case_id] = int(year)
# keep a count for user feedback
count += 1
if (count%1000 == 0):
print "Processed", count, "files."
# get all the dates for which articles exist and sort them
dates = date_hash.keys()
dates.sort()
# set up some variables
first_date = dates[0] # the earliest date for which we have an article
last_date = dates[-1] # the last date for which we have an article
current_date = first_date # the date we're currently considering
nCases = len(case_ids) # the total number of articles
active_dates = [] # a list of lists of cases currently comparing against
duplicates = {} # a dictionary of duplicates, indexed by case_id
max_JCs = {} # the max Jaccard Coefficient (similarity) indexed by case_id
count = 0 # the number of pairs we have processed
one_day = datetime.timedelta(1) # a constant for incrementing by one day
print first_date, last_date
print "Starting loop"
# go through every day, starting with the first
while current_date <= last_date:
# if our list of active dates is full, pop off the first one added
if len(active_dates) == date_range:
# pop the oldest date
active_dates.pop(0)
# then add a new list for the current date
active_dates.append([])
# look for any files associated with the current date
if (date_hash.has_key(current_date)):
# start an empty list of case_ids
cases = []
# get all the files associatd with the current date
files = date_hash[current_date]
# process each file
for f in files:
# read in the json file and unpack it, as above
input_file_name = input_dir + '/' + f
input_file = codecs.open(input_file_name, encoding='utf-8')
input_text = input_file.read()
input_file.close()
doc = loads(input_text, encoding='utf-8')
# get the case id
if doc.has_key(u'CASE_ID'):
case_id = doc[u'CASE_ID']
# get the text of the article
if doc.has_key(u'BODY'):
body = doc[u'BODY']
text = ''
# combine the paragraphs
for b in body:
text += b + u' '
# split the text into words
words = text.split()
shingles = set()
# create a set of all the n-grams in the article
for w in range(len(words) - shingle_k + 1):
shingle = u''
# create a shingle from k words
for j in range(shingle_k):
shingle += words[w+j] + u''
# add it to the set
shingles.add(shingle)
# add this case and its shingles to the list of cases for this date
cases.append((case_id, shingles))
# add this list of cases from this date to the list of active cases
active_dates[-1] = cases
# compute similarities among new cases and with old cases
# check to see if anything was added this iteration
new_cases = active_dates[-1]
# if at least on case was added
for i in range(len(new_cases)):
# get the case_id from the tuple for this case
i_id = new_cases[i][0]
# get the set of shingles from the tuple for this case
i_shingles = new_cases[i][1]
# compare it to other new cases
for j in range(i+1, len(new_cases)):
j_id = new_cases[j][0]
j_shingles = new_cases[j][1]
# compute the Jaccard Coefficient between shingles
JC = compare_shingles(i_shingles, j_shingles)
# store the max JC for these cases
insert_max(max_JCs, i_id, JC)
insert_max(max_JCs, j_id, JC)
# if the JC is above our threshold, consider these to be duplicates
if (JC > shingle_thresh):
duplicates = store_duplicates(duplicates, i_id, j_id)
# keep a count for user feedback
count += 1
if (count%10000 == 0):
print "Processed", count, "pairs"
# now compare each new case to all old cases in the active range
# go through each date in the active range
for k in range(len(active_dates)-1):
# get each case associated with that date
for j in range(len(active_dates[k])):
# compare as above
j_id = active_dates[k][j][0]
j_shingles = active_dates[k][j][1]
JC = compare_shingles(i_shingles, j_shingles)
insert_max(max_JCs, i_id, JC)
insert_max(max_JCs, j_id, JC)
if (JC > shingle_thresh):
duplicates = store_duplicates(duplicates, i_id, j_id)
count += 1
if (count%10000 == 0):
print "Processed", count, "pairs"
# go to the next date
current_date = current_date + one_day
# output the results as a csv file
case_ids.sort()
# for each case write case_id, max_JC, and list of duplicates
for c in case_ids:
row = [c]
if max_JCs.has_key(c):
row.append(max_JCs[c])
if duplicates.has_key(c):
dup_list = list(duplicates[c])
dup_list.sort()
row.append(dup_list)
writer.writerow(row)
csv_file.close()
# also write this information as a JSON object
output = {}
# for each case, save the case_id, year, and list of duplicates
for c in case_ids:
case = []
if case_years.has_key(c):
case.append(case_years[c])
else:
case.append(0)
if duplicates.has_key(c):
case.append(list(duplicates[c]))
else:
case.append([])
output[c] = case
# save the output to a json file
output_file_name = output_dir + 'duplicates.json'
output_file = codecs.open(output_file_name, mode='w', encoding='utf-8')
dump(output, output_file, ensure_ascii=False, indent=2)
output_file.close()
| 36.176471 | 170 | 0.602787 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5,435 | 0.420828 |
527099d9e70699816c427356c1e9f6bf0065bca5 | 160 | py | Python | QARealtimeCollector/clients/test.py | dizzy21c/QUANTAXIS_RealtimeCollector | bb55a3852fe437b51ce92a9cbbc4e89c5e317f6c | [
"MIT"
] | 1 | 2020-02-03T14:10:38.000Z | 2020-02-03T14:10:38.000Z | QARealtimeCollector/clients/test.py | daqing526533/QUANTAXIS_RealtimeCollector | e234669ac680fa59775598df2ee84a61085b79ed | [
"MIT"
] | null | null | null | QARealtimeCollector/clients/test.py | daqing526533/QUANTAXIS_RealtimeCollector | e234669ac680fa59775598df2ee84a61085b79ed | [
"MIT"
] | null | null | null |
from QAPUBSUB.consumer import subscriber
sub = subscriber(host='192.168.2.116',user='admin', password='admin' ,exchange= 'realtime_60min_rb1910')
sub.start() | 32 | 104 | 0.7625 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 52 | 0.325 |
5270c0102b69687b11a4659208c542e286ce0456 | 356 | py | Python | artifacttemplates/http%3A%2F%2Fopentosca.org%2Fartifacttemplates/PennyLaneAlgo_DA_AT/files/pennylane_app_algo.py | OpenTOSCA/tosca-definitions-public | abc4240499f143b0cb0fdd8a1f1400b2172e95b9 | [
"Apache-2.0"
] | null | null | null | artifacttemplates/http%3A%2F%2Fopentosca.org%2Fartifacttemplates/PennyLaneAlgo_DA_AT/files/pennylane_app_algo.py | OpenTOSCA/tosca-definitions-public | abc4240499f143b0cb0fdd8a1f1400b2172e95b9 | [
"Apache-2.0"
] | null | null | null | artifacttemplates/http%3A%2F%2Fopentosca.org%2Fartifacttemplates/PennyLaneAlgo_DA_AT/files/pennylane_app_algo.py | OpenTOSCA/tosca-definitions-public | abc4240499f143b0cb0fdd8a1f1400b2172e95b9 | [
"Apache-2.0"
] | 1 | 2020-04-30T15:04:24.000Z | 2020-04-30T15:04:24.000Z | import pennylane as qml
import numpy as np
def algo(x, y, z):
qml.RZ(z, wires=[0])
qml.RY(y, wires=[0])
qml.RX(x, wires=[0])
qml.CNOT(wires=[0, 1])
return qml.expval(qml.PauliZ(wires=1))
def run_algo(device, args):
print(args)
circuit = qml.QNode(algo, device)
result = circuit(float(args['X']), float(args['Y']), float(args['Z']))
return result | 23.733333 | 71 | 0.660112 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 9 | 0.025281 |
5272823ab83e4c5f82c505ea22fae1f24153577c | 14,389 | py | Python | baseline/deep_ed_PyTorch/deep_ed_PyTorch/ed/model/model_global.py | yifding/e2e_EL_evaluate | 48e12a93c98daf99bb560efcc267749247f590ba | [
"MIT"
] | 5 | 2021-11-30T02:19:52.000Z | 2021-12-10T14:38:17.000Z | baseline/deep_ed_PyTorch/deep_ed_PyTorch/ed/model/model_global.py | yifding/e2e_EL_evaluate | 48e12a93c98daf99bb560efcc267749247f590ba | [
"MIT"
] | 1 | 2021-11-15T02:18:41.000Z | 2021-12-01T13:36:26.000Z | baseline/deep_ed_PyTorch/deep_ed_PyTorch/ed/model/model_global.py | yifding/e2e_EL_evaluate | 48e12a93c98daf99bb560efcc267749247f590ba | [
"MIT"
] | null | null | null | # **YD** the model to train deep-ed-global for entity disambiguation
import copy
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
from deep_ed_PyTorch.utils import utils
from deep_ed_PyTorch.ed.args import arg_parse
class EntityContext(nn.Module):
def __init__(self, args):
super(EntityContext, self).__init__()
self.args = args
self.A_linear = nn.Parameter(torch.ones(args.ent_vecs_size))
self.B_linear = nn.Parameter(torch.ones(args.ent_vecs_size))
def forward(self, inputs):
"""
:param inputs = [
[ctxt_words, ctxt_words_vec],
[cand_entities, cand_entities_vec],
[p_e_m],
]
# **YD** batch_size = num_mentions: ED task is to classify the correct entity for a given mention.
# local_model only looks at mention (surface format), candidate entities, context words and properties of
# these candidate entities.
# global_model: considers the ED selection as a sequence classification task, naming to for a given document
# with multiple mentions, how to find the matched entities for the whole sequence.
ctxt_words: shape = [num_mentions, ctxt_window]
for each training instance (considering one mention; global?), the context words within the context
window.
ctxt_words_vec: shape = [num_mentions, ctxt_window, word_vecs_size]
corresponding word embeddings for each context word
cand_entities: shape = [num_mentions, max_num_cand]
truncated candidate entities for mentions, entities are represented by their thid
cand_entities_vec: shape = [num_mentions, max_num_cand, ent_vecs_size]
entity embedding for each candidate entities
p_e_m: shape = [num_mentions, max_num_cand], log(p(e|m))
for each mention, the candidate entities are found by the p(e|m) in order. It is also used as a feature
to predict the final local classification score for a mention-entity pair.
:return: entity_context_sim_scores, beta
entity_context_sim_scores: shape = [num_mentions, max_num_cand]
local scores of mention-candidate_entity pairs
beta: shape = [num_mentions, ctxt_window]
attention of mention-ctxt_word, for visualization
"""
[
[ctxt_words, ctxt_words_vec],
[cand_entities, cand_entities_vec],
p_e_m,
] = inputs
# pre_u.shape = [num_mentions, max_num_cand, ent_vecs_size]
# **YD** infer the first dimension of input candidate entity vectors
self.args.num_mentions = cand_entities_vec.size(0)
# print('self.args.num_mentions', self.args.num_mentions)
pre_u = (
cand_entities_vec.view(self.args.num_mentions * self.args.max_num_cand, self.args.ent_vecs_size) *
self.A_linear
).view(self.args.num_mentions, self.args.max_num_cand, self.args.ent_vecs_size)
# assert pre_u.shape == torch.Size([self.args.num_mentions, self.args.max_num_cand, self.args.ent_vecs_size])
# post_u.shape = [num_mentions, word_vecs_size, ctxt_window]
post_u = torch.transpose(ctxt_words_vec, 1, 2)
# assert post_u.shape == torch.Size([self.args.num_mentions, self.args.word_vecs_size, self.args.ctxt_window])
# u_vec.shape = [num_mentions, max_num_cand, ctxt_window]
u_vec = torch.bmm(pre_u, post_u)
# assert u_vec.shape == torch.Size([self.args.num_mentions, self.args.max_num_cand, self.args.ctxt_window])
# u.shape = [num_mentions, ctxt_window]
u = torch.max(u_vec, dim=1).values
# assert u.shape == torch.Size([self.args.num_mentions, self.args.ctxt_window])
# top_k.shape = [num_mentions, R]
top_k = torch.topk(u, k=self.args.R).values
# assert top_k.shape == torch.Size([self.args.num_mentions, self.args.R])
# top_min.shape = [num_mentions]
top_min = torch.min(top_k, dim=1).values
# assert top_min.shape == torch.Size([self.args.num_mentions])
# **YD** the model autograph may cause problem here, not 100% sure which one to choose
sketch = top_min.view(self.args.num_mentions, 1).clone()
# sketch = top_min.view(self.args.num_mentions, 1)
minus_result = u - sketch
nn.Threshold(0, -50, True).forward(minus_result)
minus_result = minus_result + sketch
# beta.shape = [num_mentions, ctxt_window]
beta = F.softmax(minus_result, dim=1)
# assert beta.shape == torch.Size([self.args.num_mentions, self.args.ctxt_window])
# **YD** second step,
# ctxt_full_embeddings.shape = [num_mentions, word_vecs_size]
ctxt_full_embeddings = torch.bmm(
torch.transpose(ctxt_words_vec, 1, 2),
beta.view(self.args.num_mentions, self.args.ctxt_window, 1)
).squeeze(2)
# entity_context_sim_scores_pre.shape = [num_mentions, max_num_cand, ent_vecs_size]
entity_context_sim_scores_pre = cand_entities_vec
# entity_context_sim_scores_post.shape = [num_mentions, word_vecs_size, 1]
entity_context_sim_scores_post = (ctxt_full_embeddings * self.B_linear).view(
self.args.num_mentions, self.args.word_vecs_size, 1
)
# entity_context_sim_scores.shape = [num_mentions, max_num_cand]
entity_context_sim_scores = torch.bmm(
entity_context_sim_scores_pre,
entity_context_sim_scores_post
).squeeze(2)
return entity_context_sim_scores, beta
class MessageOneRound(nn.Module):
def __init__(self, args):
super(MessageOneRound, self).__init__()
self.args = args
self.dummy_param = nn.Parameter(torch.empty(0))
'''
self.mask = torch.ones(self.args.num_mentions, self.args.max_num_cand, self.args.num_mentions)
for i in range(self.args.max_num_cand):
for j in range(self.args.num_mentions):
self.mask[j, i, j] = 0
self.mask.require_grad=False
if args.type == 'cuda':
self.mask = self.mask.cuda()
'''
def forward(self, unary_plus_pairwise, old_message):
self.args.num_mentions = unary_plus_pairwise.size(0)
'''
assert unary_plus_pairwise.shape == torch.Size(
[self.args.num_mentions, self.args.max_num_cand, self.args.num_mentions, self.args.max_num_cand]
)
'''
'''
assert old_message.shape == torch.Size(
[self.args.num_mentions, self.args.max_num_cand, self.args.num_mentions]
)
'''
crf_old_message = (1.0 - self.args.lbp_damp) * torch.exp(old_message)
'''
assert crf_old_message.shape == torch.Size(
[self.args.num_mentions, self.args.max_num_cand, self.args.num_mentions]
)
'''
# equation (10), add sum old message and unary_plus_pairwise
crf_new_message = unary_plus_pairwise + \
old_message.sum(2).repeat(self.args.num_mentions, self.args.max_num_cand, 1, 1)
'''
assert crf_new_message.shape == torch.Size(
[self.args.num_mentions, self.args.max_num_cand, self.args.num_mentions, self.args.max_num_cand]
)
'''
select_crf_new_message = crf_new_message.max(3).values
'''
assert select_crf_new_message.shape == torch.Size(
[self.args.num_mentions, self.args.max_num_cand, self.args.num_mentions]
)
'''
select_crf_new_message = F.log_softmax(select_crf_new_message, dim=1)
'''
assert select_crf_new_message.shape == torch.Size(
[self.args.num_mentions, self.args.max_num_cand, self.args.num_mentions]
)
'''
# **YD** bugs here..
# select_crf_new_message.diagonal(dim1=0, dim2=2).fill_(0)
mask = torch.ones(select_crf_new_message.shape)
for i in range(select_crf_new_message.size(1)):
for j in range(select_crf_new_message.size(0)):
mask[j, i, j] = 0
mask.require_grad = False
'''
if self.args.type == 'cuda':
mask = mask.cuda()
'''
device = self.dummy_param.device
mask = mask.to(device)
select_crf_new_message = select_crf_new_message * mask
'''
assert select_crf_new_message.shape == torch.Size(
[self.args.num_mentions, self.args.max_num_cand, self.args.num_mentions]
)
'''
select_crf_new_message = torch.exp(select_crf_new_message) * self.args.lbp_damp
crf_total_message = torch.log(select_crf_new_message + crf_old_message)
return crf_total_message
class ModelGlobal(nn.Module):
def __init__(self, args):
super(ModelGlobal, self).__init__()
self.args = args
self.entity_context = EntityContext(args)
self.C_linear = nn.Parameter(torch.ones(args.ent_vecs_size))
self.linear1 = nn.Linear(2, self.args.nn_pem_interm_size)
self.linear2 = nn.Linear(self.args.nn_pem_interm_size, 1)
self.message_pass_list = [copy.deepcopy(MessageOneRound(args)) for _ in range(self.args.lbp_iter)]
def forward(self, inputs):
entity_context_sim_scores, beta = self.entity_context(inputs)
[
[ctxt_words, ctxt_words_vec],
[cand_entities, cand_entities_vec],
p_e_m,
] = inputs
# check num_mentions match
self.args.num_mentions = cand_entities_vec.size(0)
# build entity pairwise similarity by dot product
entity_pairwise_pre = cand_entities_vec.view(self.args.num_mentions * self.args.max_num_cand,
self.args.ent_vecs_size) * self.C_linear
'''
assert entity_pairwise_pre.shape == torch.Size(
[self.args.num_mentions * self.args.max_num_cand, self.args.ent_vecs_size]
)
'''
entity_pairwise_post = cand_entities_vec.view(
self.args.num_mentions * self.args.max_num_cand, self.args.ent_vecs_size
).T
'''
assert entity_pairwise_post.shape == torch.Size(
[self.args.ent_vecs_size, self.args.num_mentions * self.args.max_num_cand]
)
'''
entity_pairwise = torch.mm(entity_pairwise_pre, entity_pairwise_post).view(
self.args.num_mentions, self.args.max_num_cand, self.args.num_mentions,self.args.max_num_cand
)
'''
assert entity_pairwise.shape == torch.Size(
[self.args.num_mentions, self.args.max_num_cand, self.args.num_mentions, self.args.max_num_cand]
)
'''
# build unary score by duplicating context score
unary = entity_context_sim_scores.repeat(self.args.num_mentions, self.args.max_num_cand, 1, 1)
'''
assert unary.shape == torch.Size(
[self.args.num_mentions, self.args.max_num_cand, self.args.num_mentions, self.args.max_num_cand]
)
'''
# sum up the unary and pairwise
unary_plus_pairwise = unary + entity_pairwise
assert unary_plus_pairwise.shape == torch.Size(
[self.args.num_mentions, self.args.max_num_cand, self.args.num_mentions, self.args.max_num_cand]
)
# initial message is '0', message is defined from a mention to another mention's certain candidate entity
round_message = unary_plus_pairwise.max(3).values * 0
'''
assert round_message.shape == torch.Size(
[self.args.num_mentions, self.args.max_num_cand, self.args.num_mentions]
)
'''
for message_pass_layer in self.message_pass_list:
round_message = message_pass_layer(unary_plus_pairwise, round_message)
'''
assert round_message.shape == torch.Size(
[self.args.num_mentions, self.args.max_num_cand, self.args.num_mentions]
)
'''
global_message = round_message.sum(2)
'''
assert global_message.shape == torch.Size(
[self.args.num_mentions, self.args.max_num_cand]
)
'''
final_global_score = F.log_softmax(entity_context_sim_scores + global_message, dim=-1)
x = torch.cat(
[
final_global_score.view(self.args.num_mentions * self.args.max_num_cand, 1),
p_e_m.view(self.args.num_mentions * self.args.max_num_cand, 1),
], dim=1
)
x = self.linear1(x)
x = F.relu(x)
x = self.linear2(x)
x = x.view(self.args.num_mentions, self.args.max_num_cand)
return x, beta, entity_context_sim_scores
def test():
parser = argparse.ArgumentParser(
description='test entity embedding model_a',
allow_abbrev=False,
)
args = parser.parse_args()
args.type = 'cuda'
args.ctxt_window = 100
args.R = 25
args.model = 'local'
args.nn_pem_interm_size = 100
args.word_vecs_size = 300
args.ent_vecs_size = 300
args.max_num_cand = 6
args.unk_ent_wikiid = 1
args.unk_ent_thid = 1
args.unk_w_id = 1
args.num_mentions = 13
args.lbp_iter = 10
args.lbp_damp = 0.5
model = ModelGlobal(args)
model.train()
model.cuda()
# -- ctxt_w_vecs
ctxt_words = torch.ones(args.num_mentions, args.ctxt_window, dtype=torch.long)
ctxt_words_vec = torch.rand(args.num_mentions, args.ctxt_window, args.word_vecs_size)
# -- e_vecs
cand_entities = torch.ones(args.num_mentions, args.max_num_cand, dtype=torch.long)
cand_entities_vec = torch.rand(args.num_mentions, args.max_num_cand, args.ent_vecs_size)
# -- p(e|m)
p_e_m = torch.zeros(args.num_mentions, args.max_num_cand)
inputs = [
[ctxt_words, ctxt_words_vec],
[cand_entities, cand_entities_vec],
p_e_m,
]
torch.autograd.set_detect_anomaly(True)
inputs = utils.move_to_cuda(inputs)
outputs, beta, entity_context_sim_scores = model(inputs)
loss = torch.sum(outputs)
print('Forward Success!')
loss.backward()
print('Backward Success!')
if __name__ == '__main__':
test() | 36.335859 | 120 | 0.640698 | 12,641 | 0.878518 | 0 | 0 | 0 | 0 | 0 | 0 | 6,814 | 0.473556 |
5273f884bdde093ed253a7bc5d4d0356e46a62b0 | 4,349 | py | Python | 11_cnnInceptionMNIST.py | cwangjiang/PyTorch_Basics | bdc4dfb2a8565c3b86320c9c56cf57ef812387f1 | [
"MIT"
] | null | null | null | 11_cnnInceptionMNIST.py | cwangjiang/PyTorch_Basics | bdc4dfb2a8565c3b86320c9c56cf57ef812387f1 | [
"MIT"
] | null | null | null | 11_cnnInceptionMNIST.py | cwangjiang/PyTorch_Basics | bdc4dfb2a8565c3b86320c9c56cf57ef812387f1 | [
"MIT"
] | null | null | null | # this code use cnn with inception unit + softmax to predict handwriting 0-9
from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
batch_size = 64
# download the dataset, don't need to construct it using a class, just transfer it as data loader.
train_dataset = datasets.MNIST(root='./data/', train=True, transform=transforms.ToTensor(), download=False)
train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True)
test_dataset = datasets.MNIST(root='./data/', train=False, transform=transforms.ToTensor())
test_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=False)
# Step 1. Define network layers, Inception NN has different strucutre, and it has a well defined sub-net, we use individual class to define the inception sub-net
class Inception(nn.Module):
def __init__(self, in_channels):
super(Inception, self).__init__()
self.branch1_1 = nn.Conv2d(in_channels, 16, kernel_size = 1) # there are 4 parallel branches, each branches has different layers
self.branch2_1 = nn.Conv2d(in_channels, 16, kernel_size = 1)
self.branch2_2 = nn.Conv2d(16, 24, kernel_size = 5, padding = 2)
self.branch3_1 = nn.Conv2d(in_channels, 16, kernel_size = 1)
self.branch3_2 = nn.Conv2d(16,24, kernel_size = 3, padding = 1)
self.branch3_3 = nn.Conv2d(24,24, kernel_size = 3, padding = 1)
self.branch4_1 = nn.Conv2d(in_channels, 24, kernel_size = 1)
def forward(self, x):
branch1 = self.branch1_1(x)
branch2_1 = self.branch2_1(x)
branch2_2 = self.branch2_2(branch2_1)
branch3_1 = self.branch3_1(x)
branch3_2 = self.branch3_2(branch3_1)
branch3_3 = self.branch3_3(branch3_2)
branch4_1 = F.avg_pool2d(x, kernel_size = 3, stride = 1, padding = 1)
branch4_2 = self.branch4_1(branch4_1)
outputs = [branch1, branch2_2, branch3_3, branch4_2]
return torch.cat(outputs, 1)
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size = 5)
self.incept1 = Inception(in_channels = 10) # Inception unit will out put 88 layers
self.conv2 = nn.Conv2d(88, 20, kernel_size = 5)
self.incept2 = Inception(in_channels = 20)
self.mp = nn.MaxPool2d(2) #kernel size
self.fc = nn.Linear(1408, 10) # we need to calculate this 1408 manually
def forward(self, x):
in_size = x.size(0)
x = F.relu(self.mp(self.conv1(x)))
x = self.incept1(x)
x = F.relu(self.mp(self.conv2(x)))
x = self.incept2(x)
x = x.view(in_size, -1)
x = self.fc(x)
return F.log_softmax(x)
model = Net()
# Step 2. create loss criterion and optimizer
#criterion = nn.CrossEntropyLoss() # don't use CrossEntropyLoss now, but F.nll_loss()
optimizer = optim.SGD(model.parameters(), lr = 0.01, momentum=0.5)
# Define training function
def train(epoch):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target) # if output is already softmax, use nll_loss, if the output is raw, use CrossEntropyLoss
loss.backward()
optimizer.step()
if batch_idx % 10 == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(epoch, batch_idx * len(data), len(train_loader.dataset), 100. * batch_idx / len(train_loader), loss.data[0]))
# print everything, {} is the place to filled in, the values are in "format(x,y,z)". len(data) is the batch size, len(train_loader) is the totally number of batches, batches number.
# There is only one test set, there is no batches in the test set, only N test examples
def test():
model.eval()
test_loss = 0
correct = 0
for data, target in test_loader: # loop over all test examples
output = model(data)
test_loss += F.nll_loss(output, target).data[0] # compute the accumulate test loss
pred = output.data.max(1, keepdim=True)[1]
correct += pred.eq(target.data.view_as(pred)).cpu().sum() # compute totally number of correct prediction.
test_loss /= len(test_loader.dataset) # Compute average test loss
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(test_loss, correct, len(test_loader.dataset),100. * correct / len(test_loader.dataset)))
for epoch in range(1,10):
train(epoch)
test()
| 36.855932 | 184 | 0.721315 | 1,712 | 0.393654 | 0 | 0 | 0 | 0 | 0 | 0 | 1,273 | 0.292711 |
52779a9cdce9f94293477ea1630e84c3ba8660ce | 2,842 | py | Python | LovelyFlask/App/views.py | legendary6666/AxfFlask | a93aa8a17fd8a04e4b6510bbb66b317e0da2bdd8 | [
"Apache-2.0"
] | null | null | null | LovelyFlask/App/views.py | legendary6666/AxfFlask | a93aa8a17fd8a04e4b6510bbb66b317e0da2bdd8 | [
"Apache-2.0"
] | null | null | null | LovelyFlask/App/views.py | legendary6666/AxfFlask | a93aa8a17fd8a04e4b6510bbb66b317e0da2bdd8 | [
"Apache-2.0"
] | null | null | null | from flask import Blueprint, request, render_template, url_for
from LovelyFlask.App.models import HomeWheel, HomeNav, HomeMustBuy, HomeMainShow, HomeShop, FoodTypes, Goods, CartModel
blue = Blueprint("first_blue", __name__, url_prefix="/api/")
def init_first_blue(app):
app.register_blueprint(blueprint=blue)
ALL_TYPE = 0
@blue.route("/home/",methods="GET","POST")
def home():
wheels = HomeWheel.query.all()
navs = HomeNav.query.all()
mustbuys = HomeMustBuy.query.all()
mainshows = HomeMainShow.query.all()
shops = HomeShop.query.all()
shops0_1 = shops[0:1]
shops1_3 = shops[1:3]
shops3_7 = shops[3:7]
shops7_11 = shops[7:11]
data = {
"wheels": wheels,
"navs": navs,
"mustbuys": mustbuys,
'shops0_1': shops0_1,
'shops1_3': shops1_3,
'shops3_7': shops3_7,
'shops7_11': shops7_11,
'mainshows': mainshows,
}
return render_template("test.html")
def market(request):
#先给了一个默认值,然后就可以直接跳转到这个页面
return render_template(url_for("marketWithParams"))
def marketWithParams(request, categoryid,childcid):
foods = FoodTypes.objects.all()
if ALL_TYPE == 0:
goods_list = Goods.objects.all().filter(categoryid=categoryid)
else:
goods_list = Goods.objects.all().filter(categoryid = categoryid).filter(childcid=childcid)
food = FoodTypes.query.get(typeid=categoryid)
#food 是每一个视频分类对象
#得到属性childtypenames,然后是一个字符串的形式,而且这个每一个信息中间有#
childtypestr = food.childtypenames
#以#分割得到一个列表
childtypelist = childtypestr.split("#")
#以遍历 的形式将这个列表,遍历,并将这个列表的每一个元素以:分割,添加到一个控列表中,得到一个
#嵌套列表
childlist = []
for child in childtypelist:
childlist.append(child.split(":"))
#[["全部分类,0“],["进口水果",110]]
#将得到的这个列表,元素遍历出来,得到遍历出来的小列表,将小列表索引为0 的元素拿出来当做是页面显示的东西。
data = {
"title":"闪购",
"foods":foods,
"goods_list":goods_list,
"categoryid":categoryid,
"childlist":childlist,
}
return render_template('/html/market/market.html',context=data)
def cart(request):
userid = request.session.get("user_id")
if not userid:
return render_template(url_for(""))
carts = CartModel.objects.filter(c_user_id=userid)
is_all_select = True
totalprice = 0
# 总价应该在一进页面就开始算.应该是选中的都算进去
for cart in carts:
if not cart.c_goods_select:
is_all_select = False
#break当有未选中的时候等于false然后选中的需要进行计算价格
else:
totalprice = totalprice + cart.c_goods_num * cart.c_goods.price
data = {
"title":"购物车",
"carts":carts,
"is_all_select":is_all_select,
"totalprice":totalprice
}
return render_template('/html/cart/cart.html',context=data)
| 22.555556 | 119 | 0.63582 | 0 | 0 | 0 | 0 | 696 | 0.210272 | 0 | 0 | 1,064 | 0.32145 |
527a60539e80aabfa5ad82b97a6806b4bb9a6026 | 3,546 | py | Python | tests/test_benchmark_config.py | chayim/redisbench-admin | 11ca257c8d3f3af3cd1cd95ee2b2dedf366a807f | [
"Apache-2.0"
] | null | null | null | tests/test_benchmark_config.py | chayim/redisbench-admin | 11ca257c8d3f3af3cd1cd95ee2b2dedf366a807f | [
"Apache-2.0"
] | null | null | null | tests/test_benchmark_config.py | chayim/redisbench-admin | 11ca257c8d3f3af3cd1cd95ee2b2dedf366a807f | [
"Apache-2.0"
] | null | null | null | import json
import yaml
from redisbench_admin.utils.benchmark_config import (
results_dict_kpi_check,
check_required_modules,
extract_redis_dbconfig_parameters,
)
def test_results_dict_kpi_check():
return_code = results_dict_kpi_check({}, {}, 0)
assert return_code == 0
return_code = results_dict_kpi_check({}, {}, 1)
assert return_code == 1
with open(
"./tests/test_data/redisgraph-benchmark-go-result.json", "r"
) as result_fd:
results_dict = json.load(result_fd)
with open(
"./tests/test_data/redisgraph-benchmark-go-defaults.yml", "r"
) as config_fd:
benchmark_config = yaml.safe_load(config_fd)
return_code = results_dict_kpi_check(benchmark_config, results_dict, 0)
assert return_code == 0
with open(
"./tests/test_data/redisgraph-benchmark-go-bad-kpis.yml", "r"
) as config_fd:
benchmark_config = yaml.safe_load(config_fd)
return_code = results_dict_kpi_check(benchmark_config, results_dict, 0)
assert return_code == 1
def test_check_required_modules():
check_required_modules([], [])
try:
check_required_modules(["s"], ["search"])
except Exception as e:
assert "Unable to detect required module" in e.__str__()
try:
check_required_modules([], ["search"])
except Exception as e:
assert "Unable to detect required module" in e.__str__()
check_required_modules(["search", "ReJSON", "TimeSeries"], ["search"])
check_required_modules(["search", "ReJSON", "TimeSeries"], ["search", "TimeSeries"])
def test_extract_redis_configuration_parameters():
with open(
"./tests/test_data/redisgraph-benchmark-go-defaults.yml", "r"
) as config_fd:
benchmark_config = yaml.safe_load(config_fd)
(
redis_configuration_parameters,
dataset_load_timeout_secs,
) = extract_redis_dbconfig_parameters(benchmark_config, "dbconfig")
assert redis_configuration_parameters == {}
assert dataset_load_timeout_secs == 120
with open(
"./tests/test_data/tsbs-devops-ingestion-scale100-4days-keyspace.yml", "r"
) as config_fd:
benchmark_config = yaml.safe_load(config_fd)
(
redis_configuration_parameters,
dataset_load_timeout_secs,
) = extract_redis_dbconfig_parameters(benchmark_config, "dbconfig")
assert dataset_load_timeout_secs == 120
assert redis_configuration_parameters == {
"notify-keyspace-events": "KEA",
"timeout": 0,
}
with open(
"./tests/test_data/redisgraph-benchmark-go-defaults.yml", "r"
) as config_fd:
benchmark_config = yaml.safe_load(config_fd)
(
redis_configuration_parameters,
dataset_load_timeout_secs,
) = extract_redis_dbconfig_parameters(benchmark_config, "dbconfig")
assert redis_configuration_parameters == {}
assert dataset_load_timeout_secs == 120
with open(
"./tests/test_data/redisgraph-benchmark-go-dataset-timeout.yml", "r"
) as config_fd:
benchmark_config = yaml.safe_load(config_fd)
(
redis_configuration_parameters,
dataset_load_timeout_secs,
) = extract_redis_dbconfig_parameters(benchmark_config, "dbconfig")
assert dataset_load_timeout_secs == 1200
| 37.326316 | 88 | 0.64608 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 681 | 0.192047 |
527aa6734f670a7305701a9491f740dd20386e1e | 4,982 | py | Python | thornpy/utilities.py | bthornton191/thorpy | 3f5ffac31ef2d6d5763eb1a1abb3328c23ce5612 | [
"MIT"
] | null | null | null | thornpy/utilities.py | bthornton191/thorpy | 3f5ffac31ef2d6d5763eb1a1abb3328c23ce5612 | [
"MIT"
] | null | null | null | thornpy/utilities.py | bthornton191/thorpy | 3f5ffac31ef2d6d5763eb1a1abb3328c23ce5612 | [
"MIT"
] | null | null | null | """Miscellaneous tools.
"""
import os
import csv
from typing import Union
from subprocess import Popen
from pathlib import Path
from scipy.io import loadmat
import pandas as pd
def open_in_explorer(path : os.PathLike) -> None:
if Path(path).is_dir():
_ = Popen(f'explorer.exe /root,"{path}"')
elif Path(path).is_file():
_ = Popen(f'explorer.exe /select,"{path}"')
else:
raise FileNotFoundError()
def num_to_ith(num):
"""Converts an integer to a string containing an ordinal number (1st, 2nd, 3rd, ect.)
Parameters
----------
num : int
Number
Returns
-------
str
Ordinal number
"""
if num == -1:
return 'last'
elif num < -1:
value = str(num+1).replace('-', '')
else:
value = str(num)
last_digit = value[-1]
if len(value) > 1 and value[-2] == '1':
suffix = 'th'
elif last_digit == '1':
suffix = 'st'
elif last_digit == '2':
suffix = 'nd'
elif last_digit == '3':
suffix = 'rd'
else:
suffix = 'th'
if num < -1:
suffix += ' to last'
return value + suffix
def read_data_string(text, delimiter=',', newline='\n', has_headerline=True):
"""Reads a delimited string into a list of dictionaries. Functions very similar to :meth:`numpy.genfromtxt`, but for strings instead of text files.
Parameters
----------
text : str
String of row/column data with delimiters and new line indicators given in `delimiter` and `newline`.
delimiter : str, optional
Delimiter used in `text`, by default ','
newline : str, optional
New line indicator used in `text`, by default '\n'
has_headerline : bool, optional
If True, treats the first line of `text` as headers. If False, treats the first line of `text` as data and makes generic headers, by default True
Returns
-------
:obj:`list` of :obj:`dict`
A list of dictionaries containing the data from `text`
"""
lines = text.split(newline)
# Generate headers
if has_headerline:
# If the text has headerlines, get them
headers = lines.pop(0).split(delimiter)
else:
# If the text doesn't have headerlines, make generic ones
headers = [str(i+1) for i in range(len(lines[0].split(delimiter)))]
data = []
for line in lines:
# For each line, check if data is missing
if len(line.split(delimiter)) == len(headers):
# If there is no missing data on this line, initialize a dictionary for the line data
line_data = {}
for header, value in zip(headers, line.split(delimiter)):
# For each column in the line, add to the line_data dict (header as key and value as value)
line_data[header] = value
# Append the line_data dict to the data list
data.append(line_data)
return data
def convert_path(filepath):
"""Converts the slashes in `filepath` to whatever is appropriate for the current OS.
Parameters
----------
filepath : str
Filepath to be converted
Returns
-------
str
Converted filepath
"""
return os.path.normpath(filepath.replace('\\', os.sep).replace('/', os.sep))
def mat_to_pd(filename, data_start=None, data_end=None):
"""Creates a pandas dataframe from a .mat file
Parameters
----------
filename : str
Filename of a .mat file
Returns
-------
dataframe
Pandas datafrom of data in `filename`
"""
mat = loadmat(filename) # load mat-file
dataframes = []
for variable in [var for var in mat if var not in ['__header__', '__globals__', '__version__']]:
if mat[variable].shape[1] == 1:
array = mat[variable]
elif mat[variable].shape[0] == 1:
array = mat[variable].transpose()
else:
raise ValueError(f'{filename} does not contain the expected data!')
# Figure out data start and end indices if given
if data_start is None:
data_start = 0
if data_end is None:
data_end = len(array)
dataframes.append(pd.DataFrame(array[data_start:data_end], columns=[variable]))
dataframe = pd.concat(dataframes, axis=1)
return dataframe
def dict_to_csv(data, filename):
"""Writes data in `dict` to a csv file with the keys as headers and the values as columns.
Parameters
----------
data : dict
Dictionary of data
filename : str
Name of file to write data to. (include extension)
"""
with open(filename, 'w', newline='') as outfile:
writer = csv.writer(outfile)
writer.writerow(data.keys())
writer.writerows(zip(*data.values()))
| 27.988764 | 153 | 0.580891 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,430 | 0.487756 |
527b6c5e2d8ecc9a252bf2b0696deaac0d511759 | 25,521 | py | Python | python/paddle/fluid/tests/unittests/test_rnn_cell_api.py | L-Net-1992/Paddle | 4d0ca02ba56760b456f3d4b42a538555b9b6c307 | [
"Apache-2.0"
] | 11 | 2016-08-29T07:43:26.000Z | 2016-08-29T07:51:24.000Z | python/paddle/fluid/tests/unittests/test_rnn_cell_api.py | L-Net-1992/Paddle | 4d0ca02ba56760b456f3d4b42a538555b9b6c307 | [
"Apache-2.0"
] | null | null | null | python/paddle/fluid/tests/unittests/test_rnn_cell_api.py | L-Net-1992/Paddle | 4d0ca02ba56760b456f3d4b42a538555b9b6c307 | [
"Apache-2.0"
] | 1 | 2021-12-09T08:59:17.000Z | 2021-12-09T08:59:17.000Z | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy
import paddle.fluid as fluid
import paddle.fluid.layers as layers
import paddle.fluid.core as core
from paddle.fluid.framework import program_guard, Program
from paddle.fluid.executor import Executor
from paddle.fluid import framework
from paddle.fluid.layers.rnn import LSTMCell, GRUCell, RNNCell
from paddle.fluid.layers import rnn as dynamic_rnn
from paddle.fluid import contrib
from paddle.fluid.contrib.layers import basic_lstm
import paddle.fluid.layers.utils as utils
import numpy as np
class TestLSTMCellError(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
batch_size, input_size, hidden_size = 4, 16, 16
inputs = fluid.data(name='inputs',
shape=[None, input_size],
dtype='float32')
pre_hidden = fluid.data(name='pre_hidden',
shape=[None, hidden_size],
dtype='float32')
pre_cell = fluid.data(name='pre_cell',
shape=[None, hidden_size],
dtype='float32')
cell = LSTMCell(hidden_size)
def test_input_Variable():
np_input = np.random.random(
(batch_size, input_size)).astype("float32")
cell(np_input, [pre_hidden, pre_cell])
self.assertRaises(TypeError, test_input_Variable)
def test_pre_hidden_Variable():
np_pre_hidden = np.random.random(
(batch_size, hidden_size)).astype("float32")
cell(inputs, [np_pre_hidden, pre_cell])
self.assertRaises(TypeError, test_pre_hidden_Variable)
def test_pre_cell_Variable():
np_pre_cell = np.random.random(
(batch_size, input_size)).astype("float32")
cell(inputs, [pre_hidden, np_pre_cell])
self.assertRaises(TypeError, test_pre_cell_Variable)
def test_input_type():
error_inputs = fluid.data(name='error_inputs',
shape=[None, input_size],
dtype='int32')
cell(error_inputs, [pre_hidden, pre_cell])
self.assertRaises(TypeError, test_input_type)
def test_pre_hidden_type():
error_pre_hidden = fluid.data(name='error_pre_hidden',
shape=[None, hidden_size],
dtype='int32')
cell(inputs, [error_pre_hidden, pre_cell])
self.assertRaises(TypeError, test_pre_hidden_type)
def test_pre_cell_type():
error_pre_cell = fluid.data(name='error_pre_cell',
shape=[None, hidden_size],
dtype='int32')
cell(inputs, [pre_hidden, error_pre_cell])
self.assertRaises(TypeError, test_pre_cell_type)
def test_dtype():
# the input type must be Variable
LSTMCell(hidden_size, dtype="int32")
self.assertRaises(TypeError, test_dtype)
class TestLSTMCell(unittest.TestCase):
def setUp(self):
self.batch_size = 4
self.input_size = 16
self.hidden_size = 16
def test_run(self):
inputs = fluid.data(name='inputs',
shape=[None, self.input_size],
dtype='float32')
pre_hidden = fluid.data(name='pre_hidden',
shape=[None, self.hidden_size],
dtype='float32')
pre_cell = fluid.data(name='pre_cell',
shape=[None, self.hidden_size],
dtype='float32')
cell = LSTMCell(self.hidden_size)
lstm_hidden_new, lstm_states_new = cell(inputs, [pre_hidden, pre_cell])
lstm_unit = contrib.layers.rnn_impl.BasicLSTMUnit(
"basicLSTM", self.hidden_size, None, None, None, None, 1.0,
"float32")
lstm_hidden, lstm_cell = lstm_unit(inputs, pre_hidden, pre_cell)
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
else:
place = core.CPUPlace()
exe = Executor(place)
exe.run(framework.default_startup_program())
inputs_np = np.random.uniform(
-0.1, 0.1, (self.batch_size, self.input_size)).astype('float32')
pre_hidden_np = np.random.uniform(
-0.1, 0.1, (self.batch_size, self.hidden_size)).astype('float32')
pre_cell_np = np.random.uniform(
-0.1, 0.1, (self.batch_size, self.hidden_size)).astype('float32')
param_names = [[
"LSTMCell/BasicLSTMUnit_0.w_0", "basicLSTM/BasicLSTMUnit_0.w_0"
], ["LSTMCell/BasicLSTMUnit_0.b_0", "basicLSTM/BasicLSTMUnit_0.b_0"]]
for names in param_names:
param = np.array(fluid.global_scope().find_var(
names[0]).get_tensor())
param = np.random.uniform(-0.1, 0.1,
size=param.shape).astype('float32')
fluid.global_scope().find_var(names[0]).get_tensor().set(
param, place)
fluid.global_scope().find_var(names[1]).get_tensor().set(
param, place)
out = exe.run(feed={
'inputs': inputs_np,
'pre_hidden': pre_hidden_np,
'pre_cell': pre_cell_np
},
fetch_list=[lstm_hidden_new, lstm_hidden])
self.assertTrue(np.allclose(out[0], out[1], rtol=1e-4, atol=0))
class TestGRUCellError(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
batch_size, input_size, hidden_size = 4, 16, 16
inputs = fluid.data(name='inputs',
shape=[None, input_size],
dtype='float32')
pre_hidden = layers.data(name='pre_hidden',
shape=[None, hidden_size],
append_batch_size=False,
dtype='float32')
cell = GRUCell(hidden_size)
def test_input_Variable():
np_input = np.random.random(
(batch_size, input_size)).astype("float32")
cell(np_input, pre_hidden)
self.assertRaises(TypeError, test_input_Variable)
def test_pre_hidden_Variable():
np_pre_hidden = np.random.random(
(batch_size, hidden_size)).astype("float32")
cell(inputs, np_pre_hidden)
self.assertRaises(TypeError, test_pre_hidden_Variable)
def test_input_type():
error_inputs = fluid.data(name='error_inputs',
shape=[None, input_size],
dtype='int32')
cell(error_inputs, pre_hidden)
self.assertRaises(TypeError, test_input_type)
def test_pre_hidden_type():
error_pre_hidden = fluid.data(name='error_pre_hidden',
shape=[None, hidden_size],
dtype='int32')
cell(inputs, error_pre_hidden)
self.assertRaises(TypeError, test_pre_hidden_type)
def test_dtype():
# the input type must be Variable
GRUCell(hidden_size, dtype="int32")
self.assertRaises(TypeError, test_dtype)
class TestGRUCell(unittest.TestCase):
def setUp(self):
self.batch_size = 4
self.input_size = 16
self.hidden_size = 16
def test_run(self):
inputs = fluid.data(name='inputs',
shape=[None, self.input_size],
dtype='float32')
pre_hidden = layers.data(name='pre_hidden',
shape=[None, self.hidden_size],
append_batch_size=False,
dtype='float32')
cell = GRUCell(self.hidden_size)
gru_hidden_new, _ = cell(inputs, pre_hidden)
gru_unit = contrib.layers.rnn_impl.BasicGRUUnit("basicGRU",
self.hidden_size, None,
None, None, None,
"float32")
gru_hidden = gru_unit(inputs, pre_hidden)
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
else:
place = core.CPUPlace()
exe = Executor(place)
exe.run(framework.default_startup_program())
inputs_np = np.random.uniform(
-0.1, 0.1, (self.batch_size, self.input_size)).astype('float32')
pre_hidden_np = np.random.uniform(
-0.1, 0.1, (self.batch_size, self.hidden_size)).astype('float32')
param_names = [
["GRUCell/BasicGRUUnit_0.w_0", "basicGRU/BasicGRUUnit_0.w_0"],
["GRUCell/BasicGRUUnit_0.w_1", "basicGRU/BasicGRUUnit_0.w_1"],
["GRUCell/BasicGRUUnit_0.b_0", "basicGRU/BasicGRUUnit_0.b_0"],
["GRUCell/BasicGRUUnit_0.b_1", "basicGRU/BasicGRUUnit_0.b_1"]
]
for names in param_names:
param = np.array(fluid.global_scope().find_var(
names[0]).get_tensor())
param = np.random.uniform(-0.1, 0.1,
size=param.shape).astype('float32')
fluid.global_scope().find_var(names[0]).get_tensor().set(
param, place)
fluid.global_scope().find_var(names[1]).get_tensor().set(
param, place)
out = exe.run(feed={
'inputs': inputs_np,
'pre_hidden': pre_hidden_np
},
fetch_list=[gru_hidden_new, gru_hidden])
self.assertTrue(np.allclose(out[0], out[1], rtol=1e-4, atol=0))
class TestRnnError(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
batch_size = 4
input_size = 16
hidden_size = 16
seq_len = 4
inputs = fluid.data(name='inputs',
shape=[None, input_size],
dtype='float32')
pre_hidden = layers.data(name='pre_hidden',
shape=[None, hidden_size],
append_batch_size=False,
dtype='float32')
inputs_basic_lstm = fluid.data(name='inputs_basic_lstm',
shape=[None, None, input_size],
dtype='float32')
sequence_length = fluid.data(name="sequence_length",
shape=[None],
dtype='int64')
inputs_dynamic_rnn = layers.transpose(inputs_basic_lstm,
perm=[1, 0, 2])
cell = LSTMCell(hidden_size, name="LSTMCell_for_rnn")
np_inputs_dynamic_rnn = np.random.random(
(seq_len, batch_size, input_size)).astype("float32")
def test_input_Variable():
dynamic_rnn(cell=cell,
inputs=np_inputs_dynamic_rnn,
sequence_length=sequence_length,
is_reverse=False)
self.assertRaises(TypeError, test_input_Variable)
def test_input_list():
dynamic_rnn(cell=cell,
inputs=[np_inputs_dynamic_rnn],
sequence_length=sequence_length,
is_reverse=False)
self.assertRaises(TypeError, test_input_list)
def test_initial_states_type():
cell = GRUCell(hidden_size, name="GRUCell_for_rnn")
error_initial_states = np.random.random(
(batch_size, hidden_size)).astype("float32")
dynamic_rnn(cell=cell,
inputs=inputs_dynamic_rnn,
initial_states=error_initial_states,
sequence_length=sequence_length,
is_reverse=False)
self.assertRaises(TypeError, test_initial_states_type)
def test_initial_states_list():
error_initial_states = [
np.random.random(
(batch_size, hidden_size)).astype("float32"),
np.random.random(
(batch_size, hidden_size)).astype("float32")
]
dynamic_rnn(cell=cell,
inputs=inputs_dynamic_rnn,
initial_states=error_initial_states,
sequence_length=sequence_length,
is_reverse=False)
self.assertRaises(TypeError, test_initial_states_type)
def test_sequence_length_type():
np_sequence_length = np.random.random(
(batch_size)).astype("float32")
dynamic_rnn(cell=cell,
inputs=inputs_dynamic_rnn,
sequence_length=np_sequence_length,
is_reverse=False)
self.assertRaises(TypeError, test_sequence_length_type)
class TestRnn(unittest.TestCase):
def setUp(self):
self.batch_size = 4
self.input_size = 16
self.hidden_size = 16
self.seq_len = 4
def test_run(self):
inputs_basic_lstm = fluid.data(name='inputs_basic_lstm',
shape=[None, None, self.input_size],
dtype='float32')
sequence_length = fluid.data(name="sequence_length",
shape=[None],
dtype='int64')
inputs_dynamic_rnn = layers.transpose(inputs_basic_lstm, perm=[1, 0, 2])
cell = LSTMCell(self.hidden_size, name="LSTMCell_for_rnn")
output, final_state = dynamic_rnn(cell=cell,
inputs=inputs_dynamic_rnn,
sequence_length=sequence_length,
is_reverse=False)
output_new = layers.transpose(output, perm=[1, 0, 2])
rnn_out, last_hidden, last_cell = basic_lstm(inputs_basic_lstm, None, None, self.hidden_size, num_layers=1, \
batch_first = False, bidirectional=False, sequence_length=sequence_length, forget_bias = 1.0)
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
else:
place = core.CPUPlace()
exe = Executor(place)
exe.run(framework.default_startup_program())
inputs_basic_lstm_np = np.random.uniform(
-0.1, 0.1,
(self.seq_len, self.batch_size, self.input_size)).astype('float32')
sequence_length_np = np.ones(self.batch_size,
dtype='int64') * self.seq_len
inputs_np = np.random.uniform(
-0.1, 0.1, (self.batch_size, self.input_size)).astype('float32')
pre_hidden_np = np.random.uniform(
-0.1, 0.1, (self.batch_size, self.hidden_size)).astype('float32')
pre_cell_np = np.random.uniform(
-0.1, 0.1, (self.batch_size, self.hidden_size)).astype('float32')
param_names = [[
"LSTMCell_for_rnn/BasicLSTMUnit_0.w_0",
"basic_lstm_layers_0/BasicLSTMUnit_0.w_0"
],
[
"LSTMCell_for_rnn/BasicLSTMUnit_0.b_0",
"basic_lstm_layers_0/BasicLSTMUnit_0.b_0"
]]
for names in param_names:
param = np.array(fluid.global_scope().find_var(
names[0]).get_tensor())
param = np.random.uniform(-0.1, 0.1,
size=param.shape).astype('float32')
fluid.global_scope().find_var(names[0]).get_tensor().set(
param, place)
fluid.global_scope().find_var(names[1]).get_tensor().set(
param, place)
out = exe.run(feed={
'inputs_basic_lstm': inputs_basic_lstm_np,
'sequence_length': sequence_length_np,
'inputs': inputs_np,
'pre_hidden': pre_hidden_np,
'pre_cell': pre_cell_np
},
fetch_list=[output_new, rnn_out])
self.assertTrue(np.allclose(out[0], out[1], rtol=1e-4))
class TestRnnUtil(unittest.TestCase):
"""
Test cases for rnn apis' utility methods for coverage.
"""
def test_case(self):
inputs = {"key1": 1, "key2": 2}
func = lambda x: x + 1
outputs = utils.map_structure(func, inputs)
utils.assert_same_structure(inputs, outputs)
try:
inputs["key3"] = 3
utils.assert_same_structure(inputs, outputs)
except ValueError as identifier:
pass
class EncoderCell(RNNCell):
"""Encoder Cell"""
def __init__(
self,
num_layers,
hidden_size,
dropout_prob=0.,
init_scale=0.1,
):
self.num_layers = num_layers
self.hidden_size = hidden_size
self.dropout_prob = dropout_prob
self.lstm_cells = []
for i in range(num_layers):
self.lstm_cells.append(LSTMCell(hidden_size))
def call(self, step_input, states):
new_states = []
for i in range(self.num_layers):
out, new_state = self.lstm_cells[i](step_input, states[i])
step_input = layers.dropout(
out,
self.dropout_prob,
) if self.dropout_prob else out
new_states.append(new_state)
return step_input, new_states
@property
def state_shape(self):
return [cell.state_shape for cell in self.lstm_cells]
class DecoderCell(RNNCell):
"""Decoder Cell"""
def __init__(self, num_layers, hidden_size, dropout_prob=0.):
self.num_layers = num_layers
self.hidden_size = hidden_size
self.dropout_prob = dropout_prob
self.lstm_cells = []
for i in range(num_layers):
self.lstm_cells.append(LSTMCell(hidden_size))
def call(self, step_input, states):
new_lstm_states = []
for i in range(self.num_layers):
out, new_lstm_state = self.lstm_cells[i](step_input, states[i])
step_input = layers.dropout(
out,
self.dropout_prob,
) if self.dropout_prob else out
new_lstm_states.append(new_lstm_state)
return step_input, new_lstm_states
def def_seq2seq_model(num_layers, hidden_size, dropout_prob, src_vocab_size,
trg_vocab_size):
"vanilla seq2seq model"
# data
source = fluid.data(name="src", shape=[None, None], dtype="int64")
source_length = fluid.data(name="src_sequence_length",
shape=[None],
dtype="int64")
target = fluid.data(name="trg", shape=[None, None], dtype="int64")
target_length = fluid.data(name="trg_sequence_length",
shape=[None],
dtype="int64")
label = fluid.data(name="label", shape=[None, None, 1], dtype="int64")
# embedding
src_emb = fluid.embedding(source, (src_vocab_size, hidden_size))
tar_emb = fluid.embedding(target, (src_vocab_size, hidden_size))
# encoder
enc_cell = EncoderCell(num_layers, hidden_size, dropout_prob)
enc_output, enc_final_state = dynamic_rnn(cell=enc_cell,
inputs=src_emb,
sequence_length=source_length)
# decoder
dec_cell = DecoderCell(num_layers, hidden_size, dropout_prob)
dec_output, dec_final_state = dynamic_rnn(cell=dec_cell,
inputs=tar_emb,
initial_states=enc_final_state)
logits = layers.fc(dec_output,
size=trg_vocab_size,
num_flatten_dims=len(dec_output.shape) - 1,
bias_attr=False)
# loss
loss = layers.softmax_with_cross_entropy(logits=logits,
label=label,
soft_label=False)
loss = layers.unsqueeze(loss, axes=[2])
max_tar_seq_len = layers.shape(target)[1]
tar_mask = layers.sequence_mask(target_length,
maxlen=max_tar_seq_len,
dtype="float32")
loss = loss * tar_mask
loss = layers.reduce_mean(loss, dim=[0])
loss = layers.reduce_sum(loss)
# optimizer
optimizer = fluid.optimizer.Adam(0.001)
optimizer.minimize(loss)
return loss
class TestSeq2SeqModel(unittest.TestCase):
"""
Test cases to confirm seq2seq api training correctly.
"""
def setUp(self):
np.random.seed(123)
self.model_hparams = {
"num_layers": 2,
"hidden_size": 128,
"dropout_prob": 0.1,
"src_vocab_size": 100,
"trg_vocab_size": 100
}
self.iter_num = iter_num = 2
self.batch_size = batch_size = 4
src_seq_len = 10
trg_seq_len = 12
self.data = {
"src":
np.random.randint(
2, self.model_hparams["src_vocab_size"],
(iter_num * batch_size, src_seq_len)).astype("int64"),
"src_sequence_length":
np.random.randint(1, src_seq_len,
(iter_num * batch_size, )).astype("int64"),
"trg":
np.random.randint(
2, self.model_hparams["src_vocab_size"],
(iter_num * batch_size, trg_seq_len)).astype("int64"),
"trg_sequence_length":
np.random.randint(1, trg_seq_len,
(iter_num * batch_size, )).astype("int64"),
"label":
np.random.randint(
2, self.model_hparams["src_vocab_size"],
(iter_num * batch_size, trg_seq_len, 1)).astype("int64"),
}
place = core.CUDAPlace(
0) if core.is_compiled_with_cuda() else core.CPUPlace()
self.exe = Executor(place)
def test_seq2seq_model(self):
main_program = fluid.Program()
startup_program = fluid.Program()
with fluid.program_guard(main_program, startup_program):
cost = def_seq2seq_model(**self.model_hparams)
self.exe.run(startup_program)
for iter_idx in range(self.iter_num):
cost_val = self.exe.run(feed={
"src":
self.data["src"][iter_idx * self.batch_size:(iter_idx + 1) *
self.batch_size, :],
"src_sequence_length":
self.data["src_sequence_length"][iter_idx *
self.batch_size:(iter_idx +
1) *
self.batch_size],
"trg":
self.data["trg"][iter_idx * self.batch_size:(iter_idx + 1) *
self.batch_size, :],
"trg_sequence_length":
self.data["trg_sequence_length"][iter_idx *
self.batch_size:(iter_idx +
1) *
self.batch_size],
"label":
self.data["label"][iter_idx *
self.batch_size:(iter_idx + 1) *
self.batch_size]
},
fetch_list=[cost])[0]
print("iter_idx: %d, cost: %f" % (iter_idx, cost_val))
if __name__ == '__main__':
unittest.main()
| 39.323575 | 117 | 0.528819 | 22,027 | 0.863093 | 0 | 0 | 98 | 0.00384 | 0 | 0 | 2,781 | 0.108969 |
527bc4d5ab9a34fe4787a9eb7ce66708aaf90dff | 4,591 | py | Python | pyfsdb/dbheatmap.py | samihaija/pyfsdb | a993adf54fa5fe61fb74c544f0899fb37586fb13 | [
"MIT"
] | null | null | null | pyfsdb/dbheatmap.py | samihaija/pyfsdb | a993adf54fa5fe61fb74c544f0899fb37586fb13 | [
"MIT"
] | null | null | null | pyfsdb/dbheatmap.py | samihaija/pyfsdb | a993adf54fa5fe61fb74c544f0899fb37586fb13 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
import sys
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter, FileType
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pyfsdb
def parse_args():
parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter,
description=__doc__,
epilog="Exmaple Usage: ")
parser.add_argument("-c", "--columns", type=str, nargs=2,
help="Columns to use")
parser.add_argument("-v", "--value-column", default="count", type=str,
help="The value column to plot as the heat map")
parser.add_argument("-i", "--invert", action="store_true",
help="Invert the foreground/background colors")
parser.add_argument("-F", "--add-fractions", action="store_true",
help="Add text fraction labels to the grid")
parser.add_argument("-R", "--add-raw", action="store_true",
help="Add text raw-value labels to the grid")
parser.add_argument("-L", "--add-labels", action="store_true",
help="Add x/y axis labels")
parser.add_argument("-fs", "--font-size", default=None, type=int,
help="Set the fontsize for labels")
parser.add_argument("input_file", type=FileType('r'),
nargs='?', default=sys.stdin,
help="Input fsdb file to read")
parser.add_argument("output_file", type=str,
nargs='?', default="out.png",
help="Where to write the png file to")
args = parser.parse_args()
if not args.columns or len(args.columns) != 2:
raise ValueError("exactly 2 columns must be passed to -c")
return args
def main():
args = parse_args()
# read in the input data
f = pyfsdb.Fsdb(file_handle=args.input_file,
return_type=pyfsdb.RETURN_AS_DICTIONARY)
max_value = None
dataset = {} # nested tree structure
ycols = {} # stores each unique second value
for row in f:
if not max_value:
max_value = float(row[args.value_column])
else:
max_value = max(max_value, float(row[args.value_column]))
if row[args.columns[0]] not in dataset:
dataset[row[args.columns[0]]] = \
{ row[args.columns[1]]: float(row[args.value_column]) }
else:
dataset[row[args.columns[0]]][row[args.columns[1]]] = \
float(row[args.value_column])
ycols[row[args.columns[1]]] = 1
# merge the data into a two dimensional array
data = []
xcols = sorted(dataset.keys())
ycols = sorted(ycols.keys())
for first_column in xcols:
newrow = []
for second_column in ycols:
if second_column in dataset[first_column]:
newrow.append(dataset[first_column][second_column] / max_value)
else:
newrow.append(0.0)
data.append(newrow)
grapharray = np.array(data)
if not args.invert:
grapharray = 1 - grapharray
# generate the graph
fig, ax = plt.subplots()
ax.imshow(grapharray, vmin=0.0, vmax=1.0, cmap='gray')
ax.set_xlabel(args.columns[1])
ax.set_ylabel(args.columns[0])
if args.add_labels:
ax.set_yticks(np.arange(len(dataset)))
ax.set_yticklabels(xcols)
ax.set_xticks(np.arange(len(ycols)))
ax.set_xticklabels(ycols)
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
if args.add_fractions:
for i in range(len(grapharray)):
for j in range(len(grapharray[i])):
text = ax.text(j, i, "{:1.1f}".format(grapharray[i][j]),
ha="center", va="center", color="r",
fontsize=args.font_size)
elif args.add_raw:
for i, first_column in enumerate(xcols):
for j, second_column in enumerate(ycols):
try:
value = dataset[first_column][second_column]
ax.text(j, i, "{}".format(int(value)),
ha="center", va="center", color="r",
fontsize=args.font_size)
except Exception:
pass
fig.tight_layout()
plt.savefig(args.output_file,
bbox_inches="tight", pad_inches=0)
# import pprint
# pprint.pprint(dataset)
if __name__ == "__main__":
main()
| 33.028777 | 79 | 0.566543 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 850 | 0.185145 |
527dc5a4f8f8e32f762c34d938c444eff18aaebe | 1,912 | py | Python | cheatsheeter/extensions/embed_img.py | jorgebg/cheatsheets | fbcb9f09d9ece79d297870ce0623c1824613aad0 | [
"MIT"
] | null | null | null | cheatsheeter/extensions/embed_img.py | jorgebg/cheatsheets | fbcb9f09d9ece79d297870ce0623c1824613aad0 | [
"MIT"
] | null | null | null | cheatsheeter/extensions/embed_img.py | jorgebg/cheatsheets | fbcb9f09d9ece79d297870ce0623c1824613aad0 | [
"MIT"
] | null | null | null |
import base64
import mimetypes
import os
from urllib.parse import unquote
import xml.etree.ElementTree as etree
from markdown.inlinepatterns import LinkInlineProcessor
from markdown.extensions import Extension
def remove_namespace(doc, namespace):
"""Remove namespace in the passed document in place."""
ns = u'{%s}' % namespace
nsl = len(ns)
for elem in doc.getiterator():
if elem.tag.startswith(ns):
elem.tag = elem.tag[nsl:]
class EmbedImageInlineProcessor(LinkInlineProcessor):
""" Return a embed img element from the given match. """
def handleMatch(self, m, data):
from cheatsheeter.__main__ import cheatsheeter
text, index, handled = self.getText(data, m.end(0))
if not handled:
return None, None, None
src, title, index, handled = self.getLink(data, index)
if not handled:
return None, None, None
filename = os.path.join(cheatsheeter.source_path, unquote(src))
if src.endswith('.svg'):
el = etree.parse(filename).getroot()
remove_namespace(el, "http://www.w3.org/2000/svg")
el.attrib.pop('width', None)
el.attrib.pop('height', None)
else:
mime = mimetypes.guess_type(filename)[0]
with open(filename, 'br') as f:
data = base64.b64encode(f.read()).decode('ascii')
src_data = "data:{};base64,{}".format(mime, data)
el = etree.Element("img")
el.set("src", src_data)
if title is not None:
el.set("title", title)
el.set('alt', self.unescape(text))
return el, m.start(0), index
class EmbedImageExtension(Extension):
def extendMarkdown(self, md):
EMBED_IMAGE_LINK_RE = r'\!\!\['
md.inlinePatterns.register(EmbedImageInlineProcessor(EMBED_IMAGE_LINK_RE, md), 'embed_img', 175) | 32.965517 | 104 | 0.621339 | 1,442 | 0.754184 | 0 | 0 | 0 | 0 | 0 | 0 | 239 | 0.125 |
527e86226640cb993df97a6dd75cf709f8f69f69 | 1,000 | py | Python | Lintcode/Ladder_64_G_GC/1398. K Decimal Addition.py | ctc316/algorithm-python | ac4580d55e05e93e407c6156c9bb801808027d60 | [
"MIT"
] | null | null | null | Lintcode/Ladder_64_G_GC/1398. K Decimal Addition.py | ctc316/algorithm-python | ac4580d55e05e93e407c6156c9bb801808027d60 | [
"MIT"
] | null | null | null | Lintcode/Ladder_64_G_GC/1398. K Decimal Addition.py | ctc316/algorithm-python | ac4580d55e05e93e407c6156c9bb801808027d60 | [
"MIT"
] | null | null | null | class Solution:
"""
@param k: The k
@param a: The A
@param b: The B
@return: The answer
"""
def addition(self, k, a, b):
result = ""
i = len(a) - 1
j = len(b) - 1
carry = 0
while i >= 0 and j >= 0:
summ = carry + ord(a[i]) - ord('0') + ord(b[j]) - ord('0')
carry = int(summ / k)
result += str(summ % k)
i -= 1
j -= 1
while i >= 0 :
summ = carry + ord(a[i]) - ord('0')
carry = int(summ / k)
result += str(summ % k)
i -= 1
while j >= 0 :
summ = carry + ord(b[j]) - ord('0')
carry = int(summ / k)
result += str(summ % k)
j -= 1
if carry > 0:
result += str(carry)
result = result[::-1]
# trim zeros
for i in range(len(result)):
if result[i] != "0":
return result[i:]
return "0" | 23.255814 | 70 | 0.372 | 1,000 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 127 | 0.127 |
527ea97496890c3d4c2ba0cde080bddf0bb1b16e | 675 | py | Python | algorithm/util.py | Scott-Huang/SemEval-2021-Task-6 | d9dfac1b30d7926d614d18be29db3fcd4bc8f84f | [
"MIT"
] | null | null | null | algorithm/util.py | Scott-Huang/SemEval-2021-Task-6 | d9dfac1b30d7926d614d18be29db3fcd4bc8f84f | [
"MIT"
] | null | null | null | algorithm/util.py | Scott-Huang/SemEval-2021-Task-6 | d9dfac1b30d7926d614d18be29db3fcd4bc8f84f | [
"MIT"
] | null | null | null | import json
import argparse
def to_output(filepath, outpath):
with open(filepath, encoding='utf-8') as f:
data = json.load(f)
output = []
for d in data:
temp = {}
temp['id'] = d['id']
temp['labels'] = d['pred_labels']
output.append(temp)
with open(outpath, 'w+') as f:
json.dump(output, f)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Convert model prediction into acceptable format.')
parser.add_argument('in_path', metavar='i', type=str)
parser.add_argument('out_path', metavar='o', type=str)
args = parser.parse_args()
to_output(args.in_path, args.out_path) | 30.681818 | 100 | 0.634074 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 125 | 0.185185 |
527eb99dfa2b028f29819a3104c6f9c9adfe7aae | 2,886 | py | Python | pyxlsb2/cell.py | cccs-jh/pyxlsb2 | 6c5a2990a62e0d80a366d87eb65cbc20573c7b08 | [
"Apache-2.0",
"MIT"
] | 16 | 2020-04-24T20:07:12.000Z | 2022-02-03T18:58:11.000Z | pyxlsb2/cell.py | cccs-jh/pyxlsb2 | 6c5a2990a62e0d80a366d87eb65cbc20573c7b08 | [
"Apache-2.0",
"MIT"
] | 12 | 2020-06-08T14:10:13.000Z | 2022-03-31T14:58:06.000Z | pyxlsb2/cell.py | cccs-jh/pyxlsb2 | 6c5a2990a62e0d80a366d87eb65cbc20573c7b08 | [
"Apache-2.0",
"MIT"
] | 13 | 2020-06-06T07:58:06.000Z | 2021-12-24T11:39:43.000Z | import sys
if sys.version_info > (3,):
basestring = (str, bytes)
long = int
class DeprecatedCellMixin(object):
"""Deprecated Cell properties to preserve source compatibility with the 1.0.x releases."""
__slots__ = ()
@property
def r(self):
"""The row number of this cell.
.. deprecated:: 1.1.0
Use the ``row_num`` property instead.
"""
return self.row.num
@property
def c(self):
"""The column number of this cell.
.. deprecated:: 1.1.0
Use the ``col`` property instead.
"""
return self.col
@property
def v(self):
"""The value of this cell.
.. deprecated:: 1.1.0
Use the ``value`` or the typed ``*_value`` properties instead.
"""
return self.value
@property
def f(self):
"""The formula of this cell.
.. deprecated:: 1.1.0
Use the ``formula`` property instead.
"""
return self.formula
class Cell(DeprecatedCellMixin):
"""A cell in a worksheet.
Attributes:
row (Row): The containing row.
col (int): The column index for this cell.
value (mixed): The cell value.
formula (bytes): The formula PTG bytes.
style_id (int): The style index in the style table.
"""
__slots__ = ('row', 'col', 'value', 'formula', 'style_id')
def __init__(self, row, col, value=None, formula=None, style_id=None):
self.row = row
self.col = col
self.value = value
self.formula = formula
self.style_id = style_id
def __repr__(self):
return 'Cell(row={}, col={}, value={}, formula={}, style_id={})' \
.format(self.row, self.col, self.value, self.formula, self.style_id)
@property
def row_num(self):
"""The row number of this cell."""
return self.row.num
@property
def string_value(self):
"""The string value of this cell or None if not a string."""
if isinstance(self.value, basestring):
return self.value
@property
def numeric_value(self):
"""The numeric value of this cell or None if not a number."""
if isinstance(self.value, (int, long, float)):
return self.value
@property
def bool_value(self):
"""The boolean value of this cell or None if not a boolean."""
if isinstance(self.value, bool):
return self.value
@property
def date_value(self):
"""The date value of this cell or None if not a numeric cell."""
return self.row.sheet.workbook.convert_date(self.value)
@property
def is_date_formatted(self):
"""If this cell is formatted using a date-like format code."""
fmt = self.row.sheet.workbook.styles._get_format(self.style_id)
return fmt.is_date_format
| 27.226415 | 94 | 0.582814 | 2,796 | 0.968815 | 0 | 0 | 1,805 | 0.625433 | 0 | 0 | 1,325 | 0.459113 |
527f0ea921ffa6f195d05d140e655588c4f82a9e | 7,234 | py | Python | 2018/vis/simple_test.py | vais-ral/WorkExperience | 74cb47645a1c524230648fce465ece25e1a4d713 | [
"BSD-2-Clause"
] | null | null | null | 2018/vis/simple_test.py | vais-ral/WorkExperience | 74cb47645a1c524230648fce465ece25e1a4d713 | [
"BSD-2-Clause"
] | 1 | 2018-07-10T16:03:42.000Z | 2018-07-10T16:03:42.000Z | 2018/vis/simple_test.py | vais-ral/WorkExperience | 74cb47645a1c524230648fce465ece25e1a4d713 | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Tue Mar 27 13:59:43 2018
@author: ofn77899
"""
import numpy
from ccpi.segmentation.SimpleflexSegmentor import SimpleflexSegmentor
from ccpi.viewer.CILViewer import CILViewer
from ccpi.viewer.CILViewer2D import CILViewer2D, Converter
import vtk
#Text-based input system
valid = False
while valid == False:
try:
InitialCameraPositionX = int(input('Enter the initital camera position on the x-axis:'))
InitialCameraPositionY = int(input('Enter the initital camera position on the y-axis:'))
InitialCameraPositionZ = int(input('Enter the initital camera position on the z-axis:'))
FrameCount = int(input('Enter number of frames for the animation:'))
ViewUp1 = int(input('Enter the first viewup value:'))
ViewUp2 = int(input('Enter the second viewup value:'))
ViewUp3 = int(input('Enter the third viewup value:'))
FocalPointX = int(input('Enter the x-coordinate for the camera focal point:'))
FocalPointY = int(input('Enter the y-coordinate for the camera focal point:'))
FocalPointZ = int(input('Enter the z-coordinate for the camera focal point:'))
AngleRangeStart = int(input('Enter the first value for the angle range:'))
AngleRangeEnd = int(input('Enter the last value for the angle range:'))
ClippingRangeStart = int(input('Set lowest value for clipping range:'))
ClippingRangeEnd = int(input('Set highest value for clipping range:'))
InitialCameraPosition = (InitialCameraPositionX, InitialCameraPositionY, InitialCameraPositionZ)
FocalPoint = (FocalPointX, FocalPointY, FocalPointZ)
AngleRange = (AngleRangeStart, AngleRangeEnd)
ClippingRange = (ClippingRangeStart, ClippingRangeEnd)
ViewUp = (ViewUp1, ViewUp2, ViewUp3)
except ValueError:
print('One or more of your inputs were not valid! Try again')
else:
valid = True
def surface2vtkPolyData(coord_list, origin = (0,0,0), spacing=(1,1,1)):
########################################################################
# 7. Display
# with the retrieved data we construct polydata actors to be displayed
# with VTK. Notice that this part is VTK specific. However, it shows how to
# process the data returned by the algorithm.
# Create the VTK output
# Points coordinates structure
triangle_vertices = vtk.vtkPoints()
#associate the points to triangles
triangle = vtk.vtkTriangle()
trianglePointIds = triangle.GetPointIds()
# put all triangles in an array
triangles = vtk.vtkCellArray()
isTriangle = 0
nTriangle = 0
surface = 0
# associate each coordinate with a point: 3 coordinates are needed for a point
# in 3D. Additionally we perform a shift from image coordinates (pixel) which
# is the default of the Contour Tree Algorithm to the World Coordinates.
# TODO: add this in the algorithm.
mScaling = numpy.asarray([spacing[0], 0,0,0,
0,spacing[1],0,0,
0,0,spacing[2],0,
0,0,0,1]).reshape((4,4))
mShift = numpy.asarray([1,0,0,origin[0],
0,1,0,origin[1],
0,0,1,origin[2],
0,0,0,1]).reshape((4,4))
mTransform = numpy.dot(mScaling, mShift)
point_count = 0
for surf in coord_list:
print("Image-to-world coordinate trasformation ... %d" % surface)
for point in surf:
world_coord = numpy.dot(mTransform, point)
xCoord = world_coord[2]
yCoord = world_coord[1]
zCoord = world_coord[0]
# i += 3
triangle_vertices.InsertNextPoint(xCoord, yCoord, zCoord);
# The id of the vertex of the triangle (0,1,2) is linked to
# the id of the points in the list, so in facts we just link id-to-id
trianglePointIds.SetId(isTriangle, point_count)
isTriangle += 1
point_count += 1
if (isTriangle == 3) :
isTriangle = 0;
# insert the current triangle in the triangles array
triangles.InsertNextCell(triangle);
surface += 1
# polydata object
trianglePolyData = vtk.vtkPolyData()
trianglePolyData.SetPoints( triangle_vertices )
trianglePolyData.SetPolys( triangles )
return trianglePolyData
reader = vtk.vtkMetaImageReader()
reader.SetFileName("../../data/fuel_uc_python.mha")
reader.Update()
seg = SimpleflexSegmentor()
seg.setInputData(Converter.vtk2numpy(reader.GetOutput()))
seg.calculateContourTree()
#seg.setIsoValuePercent(24.)
seg.setLocalIsoValuePercent(0.)
seg.resetCollapsePriority(seg.PRIORITY_VOLUME)
# 5. Construct the iso-surfaces
print ("calling resetCollapsePriority")
#seg.updateTreeFromLogTreeSize(size=0.6, isGlobal=False)
print ("calling setlogtreesize")
seg.ct.SetLogTreeSize(1)
print ("calling UpdateTreeFromLogTreeSize")
seg.ct.UpdateTreeFromLogTreeSize()
print ("calling ConstructLocalIsoSurface")
#seg.constructLocalIsoSurfaces()
seg.ct.ConstructLocalIsoSurface()
print ("called ConstructLocalIsoSurface")
#seg.constructIsoSurfaces()
# 6. Retrieve the isosurfaces and display
coord_list = seg.getSurfaces()
del (seg)
#print ("getSurface " , len(coord_list))
spacing = numpy.asarray(reader.GetOutput().GetSpacing())
s1 = spacing[0]
spacing[0] = spacing[2]
spacing[2] = s1
print (len(coord_list))
v = CILViewer()
v.setInput3DData(reader.GetOutput())
v.displayPolyData(surface2vtkPolyData(coord_list, spacing=spacing))
#v.startRenderLoop()
dimX, dimY, dimZ = reader.GetOutput().GetDimensions()
#Setting locked values for camera position
locX = InitialCameraPosition[0]
locY = InitialCameraPosition[1]
locZ = InitialCameraPosition[2]
#Setting camera position
v.getCamera().SetPosition(InitialCameraPosition)
v.getCamera().SetFocalPoint(FocalPoint)
#Setting camera viewup
v.getCamera().SetViewUp(ViewUp)
#Set camera clipping range
v.getCamera().SetClippingRange(ClippingRange)
#Defining distance from camera to focal point
r = numpy.sqrt(((InitialCameraPosition[2]-FocalPoint[2])**2)
+(InitialCameraPosition[1]-FocalPoint[1])**2)
print('Radius: {}'.format(r))
camera = vtk.vtkCamera()
camera.SetPosition(InitialCameraPosition)
camera.SetFocalPoint(FocalPoint)
camera.SetViewUp(ViewUp)
v.getRenderer().SetActiveCamera(camera)
#Animating the camera
for x in range(100):
angle = ((numpy.pi)*4/100)*x
NewLocationX = r*(numpy.sin(angle))+FocalPoint[0]
NewLocationY = r*(numpy.cos(angle))+FocalPoint[1]
NewLocationZ = r*(numpy.cos(angle))+FocalPoint[2]
NewLocation = (NewLocationX, NewLocationY, locZ)
v.getCamera().SetPosition(NewLocation)
#Rendering and saving the render
v.getRenderer().Render()
v.saveRender('test_{}.png'.format(x))
v.startRenderLoop()
| 34.61244 | 105 | 0.64971 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,427 | 0.335499 |
528090f5687885fbfb1c8e3d60235e9fb42ea50b | 1,081 | py | Python | Scripts/day12.py | HarrisonGreen/Advent-of-Code-2015 | 6a81395882c79135548eb7984bfd98c279c5f258 | [
"MIT"
] | null | null | null | Scripts/day12.py | HarrisonGreen/Advent-of-Code-2015 | 6a81395882c79135548eb7984bfd98c279c5f258 | [
"MIT"
] | null | null | null | Scripts/day12.py | HarrisonGreen/Advent-of-Code-2015 | 6a81395882c79135548eb7984bfd98c279c5f258 | [
"MIT"
] | null | null | null | import json
def read_input():
file = open("Data/day12.json", "r")
return json.load(file)
def calculate_sum(accounts):
if type(accounts) == str:
return 0
elif type(accounts) == int:
return accounts
elif type(accounts) == list:
return sum(calculate_sum(item) for item in accounts)
elif type(accounts) == dict:
return sum(calculate_sum(item) for item in accounts.values())
def calculate_sum_no_red(accounts):
if type(accounts) == str:
return 0
elif type(accounts) == int:
return accounts
elif type(accounts) == list:
return sum(calculate_sum_no_red(item) for item in accounts)
elif type(accounts) == dict:
if "red" in accounts.values():
return 0
else:
return sum(calculate_sum_no_red(item) for item in accounts.values())
if __name__ == "__main__":
accounts = read_input()
print(f"Part one: {calculate_sum(accounts)}")
print(f"Part two: {calculate_sum_no_red(accounts)}")
| 30.027778 | 81 | 0.60407 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 118 | 0.109158 |
52813eb5a92147299029b6f56c0318355c220c8b | 131 | py | Python | backend/app/admin/components/__init__.py | griviala/garpix_page | 55f1d9bc6d1de29d18e15369bebcbef18811b5a4 | [
"MIT"
] | null | null | null | backend/app/admin/components/__init__.py | griviala/garpix_page | 55f1d9bc6d1de29d18e15369bebcbef18811b5a4 | [
"MIT"
] | null | null | null | backend/app/admin/components/__init__.py | griviala/garpix_page | 55f1d9bc6d1de29d18e15369bebcbef18811b5a4 | [
"MIT"
] | null | null | null | from .text_component import TextComponentAdmin # noqa
from .text_description_component import TextDescriptionComponentAdmin # noqa
| 43.666667 | 76 | 0.877863 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 0.091603 |
5282a5299abff7b8701b16a10c2c45a9be1078cc | 27 | py | Python | portal/pulsar/__init__.py | bbhunter/pulsar | 1f6384482eebc71137716e27ba7a010f3aea7241 | [
"Apache-2.0",
"BSD-3-Clause"
] | 12 | 2021-12-28T14:15:27.000Z | 2022-03-29T00:45:00.000Z | portal/pulsar/__init__.py | bbhunter/pulsar | 1f6384482eebc71137716e27ba7a010f3aea7241 | [
"Apache-2.0",
"BSD-3-Clause"
] | 1 | 2022-02-09T12:47:14.000Z | 2022-02-09T12:47:14.000Z | portal/pulsar/__init__.py | bbhunter/pulsar | 1f6384482eebc71137716e27ba7a010f3aea7241 | [
"Apache-2.0",
"BSD-3-Clause"
] | 1 | 2022-01-18T03:59:11.000Z | 2022-01-18T03:59:11.000Z |
from .celeryapp import *
| 6.75 | 24 | 0.703704 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
528428e2a5380d1c525ea3a3731e3cf65f022c14 | 377 | py | Python | src/tests/wikipedia.py | alexseitsinger/page-scrapers | 20898c487fa2dae72e17c10d51b7481e62c72202 | [
"BSD-2-Clause"
] | 1 | 2019-02-23T13:25:22.000Z | 2019-02-23T13:25:22.000Z | src/tests/wikipedia.py | alexseitsinger/page_scrapers | 20898c487fa2dae72e17c10d51b7481e62c72202 | [
"BSD-2-Clause"
] | null | null | null | src/tests/wikipedia.py | alexseitsinger/page_scrapers | 20898c487fa2dae72e17c10d51b7481e62c72202 | [
"BSD-2-Clause"
] | 1 | 2019-08-28T18:16:45.000Z | 2019-08-28T18:16:45.000Z | from page_scrapers.wikipedia.scrapers.film import WikipediaFilmScraper
query_1 = "hellraiser 2"
scraper_1 = WikipediaFilmScraper(query_1)
scraped_1 = scraper_1.scrape()
filtered_1 = scraper_1.filter()
print(filtered_1)
query_2 = "hellraiser judgement"
scraper_2 = WikipediaFilmScraper(query_2)
scraped_2 = scraper_2.scrape()
filtered_2 = scraper_2.filter()
print(filtered_2)
| 26.928571 | 70 | 0.816976 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 36 | 0.095491 |
5284ceab6fbd7910c2633d7b5d3b387b689554f1 | 42,835 | py | Python | mobilkit/temporal.py | mindearth/mobilkit | 26a426f0bc071c6cefc4a07f0901a3f9af6e62ae | [
"MIT"
] | 12 | 2021-08-02T15:55:13.000Z | 2022-03-27T12:23:59.000Z | mobilkit/temporal.py | datapartnership/mobilkit | 26a426f0bc071c6cefc4a07f0901a3f9af6e62ae | [
"MIT"
] | 1 | 2021-08-02T16:04:41.000Z | 2021-08-11T14:37:55.000Z | mobilkit/temporal.py | datapartnership/mobilkit | 26a426f0bc071c6cefc4a07f0901a3f9af6e62ae | [
"MIT"
] | 2 | 2021-05-19T09:47:56.000Z | 2021-09-28T15:11:48.000Z | # Copyright (C) MindEarth <enrico.ubaldi@mindearth.org> @ Mindearth 2020-2021
#
# This file is part of mobilkit.
#
# mobilkit is distributed under the MIT license.
'''Tools and functions to analyze the data in time.
'''
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import geopandas as gpd
from datetime import datetime, timedelta
from dask import dataframe as dd
from dask import array as da
import dask.bag as db
from mobilkit.dask_schemas import nunique, unique
from haversine import haversine
from mobilkit.dask_schemas import (
accColName,
lonColName,
latColName,
uidColName,
utcColName,
dttColName,
zidColName,
)
from mpl_toolkits.axes_grid1 import make_axes_locatable
from itertools import chain
from shapely.geometry import Polygon
from copy import copy, deepcopy
from mpl_toolkits.axes_grid1 import make_axes_locatable
# TODO
# from mobilkit.tools import flattenSetsUdf
# from spark_types import eventLineDTzone
def filter_daynight_time(df,
filter_from_h=21.5,
filter_to_h=8.5,
previous_day_until_h=4.,
daytime_from_h=9.0,
daytime_to_h=21.0,
):
'''
Prepares a raw event df for the ping-based displacement analysis.
Parameters
----------
df : dask.DataFrame
A dataframe containing at least the `uid,datetime,lat,lng` columns as
returned by :attr:`mobilkit.loader.load_raw_files` or similar functions.
filter_{from,to}_h : float
The starting and ending float hours to consider.
If `from_hour<to_hour` only pings whose float hour `h` are
`from_hour <= h < to_hour` are considered otherwise all the pings with
`h >= from_hour` or `h < to_hour`.
Note that float hour `h` for datetime `dt` is `h = dt.hour + dt.minute/60.`
so to express 9:45am put `9.75`.
previous_day_until_h : float
All the valid events with float hour `h < previous_day_until_h` will be
projected to the previous day. Put 0 or a negative number to keep all events
of one day to its `date`.
daytime_{from,to}_h : float
The starting and ending float hours to consider in daytime (other will be put
in nightime. All events with `from_hour<= float_hour <= to_hour` will have a
1 entry in the daytime column, others 0. from hour **must** be smaller than
to hour.
Note that float hour `h` for datetime `dt` is `h = dt.hour + dt.minute/60.`
so to express 9:45am put `9.75`.
Returns
-------
df : dask.DataFrame
The same initial dataframe filtered accordingly to `from_hour,to_hour` and
with three additional columns:
- `float_hour`: the day-hour expressed as `h=dt.hour + dt.minutes`
- `date`: the `datetime` column floored to the day. All events with
`float_hour < previous_day_until_h` will be further advanced by one
day.
- `daytime`: 1 if the event's `float_hour` is between `daytime_from_h` and
`daytime_to_h`
'''
assert daytime_from_h < daytime_to_h
df_with_hour = df.assign(float_hour=df[dttColName].dt.hour
+ df[dttColName].dt.minute / 60.)
if filter_from_h > filter_to_h:
df_filtered = df_with_hour[
(df_with_hour["float_hour"] >= filter_from_h)
| (df_with_hour["float_hour"] < filter_to_h)
]
else:
df_filtered = df_with_hour[
(df_with_hour["float_hour"] >= filter_from_h)
& (df_with_hour["float_hour"] < filter_to_h)
]
df_withDay = df_filtered.assign(
date=df_filtered[dttColName].dt.floor("1D"),
daytime=df_filtered["float_hour"].between(daytime_from_h, daytime_to_h))
df_fixed = df_withDay.assign(date=df_withDay["date"]
- dd.to_timedelta((df_withDay["float_hour"]
< previous_day_until_h).astype(int),
unit="d"))
return df_fixed
def computeTimeBinActivity(df, byArea=False, timeBin="hour", split_out=10):
'''Basic function to compute, for each time bin and area, the activity profile in terms of
users and pings recorded. It also computes the set of users seen in that bin for later aggregations.
Parameters
----------
df : dask.DataFrame
A dataframe as returned from :attr:`mobilkit.loader.load_raw_files` or imported from
``scikit-mobility`` using :attr:`mobilkit.loader.load_from_skmob`. If using ``byArea``
the df must contain the ``tile_ID`` column as returned by :attr:`mobilkit.spatial.tessellate`.
byArea : bool, optional
Whether or not to compute the activity per area (default ``False``).
If ``False`` will compute the overall activity.
timeBin : str, optional
The width of the time bin to use to aggregate activity. Must be one of the ones
found in [pandas time series aliases](https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#timeseries-offset-aliases). For instance:
- `'B'` business day frequency
- `'D'` calendar day frequency
- `'W'` weekly frequency
- `'M'` month end frequency
- `'MS'` month start frequency
- `'SMS'` semi-month start frequency (1st and 15th)
- `'BH'` business hour frequency
- `'H'` hourly frequency
- `'T','min'` minutely frequency
split_out : int, optional
The number of dask dataframe partitions after the groupby aggregation.
Returns
-------
df_activity : dask.dataframe
A dataframe with these columns:
- one with the same name as ``timeBin`` with the date truncated at the selected width.
- ``pings`` the number of pings recorded in that time bin and area (if ``byArea=True``).
- ``users`` the number of users seen in that time bin and area (if ``byArea=True``).
- ``users_set`` the set of users seen in that time bin and area (if ``byArea=True``). Useful to normalize later analysis.
- ``pings_per_user`` the average number of pings per user in that time bin and area (if ``byArea=True``).
- ``tile_ID`` (if ``byArea=True``) the area where the signal has been recorded.
'''
aggKeys = [timeBin]
if byArea:
aggKeys.append(zidColName)
df_out = copy(df)
df_out[timeBin] = df_out[dttColName].dt.round(timeBin)
df_out = df_out.groupby(aggKeys)[[uidColName]]\
.agg(["count", nunique, unique], split_out=split_out)
# Flatten columns and rename
df_out.columns = ['_'.join(col).strip() for col in df_out.columns.values]
df_out = df_out.rename(columns={
uidColName + "_count": "pings",
uidColName + "_nunique": "users",
uidColName + "_unique": "users_set",})
df_out["pings_per_user"] = df_out["pings"] / df_out["users"].clip(lower=1.)
return df_out
def plotMonthlyActivity(df_activity, timeBin, what="users", ax=None, log_y=False, **kwargs):
'''Basic function to plot the monthly activity of areas or total region.
Parameters
----------
df_activity : dask.DataFrame
A dataframe as returned from :attr:`mobilkit.temporal.computeTimeBinActivity`.
timeBin : str
The width of the time bin used in :attr:`mobilkit.temporal.computeTimeBinActivity`.
what : str, optional
The quantity to plot. Must be one amongst ``'users', 'pings', 'pings_per_user'``.
ax : axis, optional
The axis to use. If ``None`` will create a new figure.
log_y : bool, optional
Whether or not to plot with y log scale. Default ``False``.
**kwargs
Will be passed to ``seaborn.lineplot`` function.
Returns
-------
df : pandas.DataFrame
Thee aggregated data plotted.
ax : axis
The axis of the figure.
'''
if ax is None:
fig, ax = plt.subplots(1,1,figsize=(18,6))
df = copy(df_activity)
df["month"] = df[timeBin].dt.round("MS")
df["month_hour"] = df[timeBin].dt.dayofmonth()*24 + df["timeBin"].dt.hour
df = df.compute()
# df = df_activity.withColumn("month", sqlF.date_trunc("month", sqlF.col(timeBin)))\
# .withColumn("month_hour", sqlF.dayofmonth(timeBin)*24 + sqlF.hour(timeBin))
sns.lineplot("month_hour", what, hue="month", data=df, ax=ax, **kwargs)
if log_y: ax.set_yscale("log")
hours_per_day = 24
locs = np.arange(hours_per_day,hours_per_day*32,hours_per_day)
plt.xticks(locs, ["%d"%(l//hours_per_day) for l in locs], size=14)
plt.yticks(size=14)
plt.xlabel("Day", size=16)
plt.ylabel("Users" if what == "users"
else "Pings" if what == "pings"
else "Pings per user", size=16)
plt.legend(fontsize=14, bbox_to_anchor=(1.01,.5), loc="center left")
plt.tight_layout()
return df, ax
def computeTemporalProfile(df_tot, timeBin,
byArea=False,
profile="week",
weekdays=None,
normalization=None,
start_date=None,
stop_date=None,
date_format=None,
sliceName=None,
selected_areas=None,
areasName=None):
'''Function to compute the normalized profiles of areas.
The idea is to have a dataframe with the count of users and pings
per time bin (and per area is ``byArea=True``) together with a
normalization column (computed if ``normalization`` is not ``None``
over a different time window ``profile``) telling the total number
of pings and users seen in that period (and in that area if
``byArea``).
If ``normalization`` is specified, also the fraction of users and
pings recorded in an area at that time bin are given.
Parameters
----------
df_tot : dask.DataFrame
A dataframe as returned from :attr:`mobilkit.loader.load_raw_files`
or imported from ``scikit-mobility`` using :attr:`mobilkit.loader.load_from_skmob`.
If using ``byArea`` the df must contain the ``tile_ID`` column
as returned by :attr:`mobilkit.spatial.tessellate`.
timeBin : str
The width of the time bin to use to aggregate activity.
Currently supported: ["W", "MS", "M", "H", "D", "T"]
You can implement others found in [pandas time series aliases](https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#timeseries-offset-aliases). For instance:
- `'B'` business day frequency
- `'D'` calendar day frequency
- `'W'` weekly frequency
- `'M'` month end frequency
- `'MS'` month start frequency
- `'SMS'` semi-month start frequency (1st and 15th)
- `'BH'` business hour frequency
- `'H'` hourly frequency
- `'T','min'` minutely frequency
byArea : bool, optional
Whether or not to compute the activity per area (default ``False``).
If ``False`` will compute the overall activity.
profile : str
The base of the activity profile: must be ``"week"`` to compute the
weekly profile or ``"day"`` for the daily one or ``"month"`` for
one month period (`month_end` to use month end).
Each profile of area / week or day (depending on profile) will be
computed separately.
**NOTE** that this period should be equal or longer than the ``timeBin``
(i.e., ``"weekly"`` or ``"monthly"`` if ``timeBin="week"``) otherwise
the normalization will fail.
weekdays : set or list, optional
The weekdays to consider (1 Sunday -> 7 Saturday).
Default ``None`` equals to keep all.
normalize : str, optional
One of ``None, "area", "total"``.
Normalize nothing (``None``), on the total period of the area
(``area``) or on the total period of all the selected areas (``total``).
start_date : str, optional
The starting date when to consider data in the ``date_format`` format.
stop_date : str, optional
The end date when to consider. Must have the same format as ``start_date``.
date_format : str, optional
The python date format of the dates, if given.
sliceName : str, optional
The name that will be saved in `timeSlice` column, if given.
selected_areas : set or list, optional
The set or list of selected areas. If ``None`` (default) uses all the areas.
Use :attr:`mobilkit.spatial.selecteAreasFromBounds` to select areas from given bounds.
areasName : str, optional
The name that will be saved in `areaName` column, if given.
Returns
-------
df_normalized : dask.DataFrame
A dataframe with these columns:
- one with the same name as ``timeBin`` with the date truncated at the selected width.
- ``pings`` the number of pings recorded in that time bin and area (if ``byArea=True``).
- ``users`` the number of users seen in that time bin and area (if ``byArea=True``).
- ``pings_per_user`` the average number of pings per user in that time bin and area
(if ``byArea=True``).
- ``tile_ID`` (if ``byArea=True``) the area where the signal has been recorded.
- the additional columns ``timeSlice`` and ``areaName``if the two names are given,
plus, if ``normalization`` is not ``None``:
- ``tot_pings/users`` the total number of pings and users seen in the area (region) in
the profile period if normalize is ``"area"`` (``total``).
- ``frac_pings/users`` the fraction of pings and users seen in that area, at that time bin
with respect to the total volume of the area (region) depending on the normalization.
- ``profile_hour`` the zero-based hour of the typical month, week or day (depending on the
value of ``profile``).
'''
assert normalization in [None, "area", "total"]
assert timeBin in ["W","MS","M","H","D","T","min"]
if normalization == "area":
assert byArea == True
profile_dict = {"week": "W", "month": "MS", "day": "D", "month_end": "M"}
assert profile in profile_dict
profile_pandas = profile_dict[profile]
df = copy(df_tot)
if start_date is not None:
df = df[df[dttColName] >= datetime.strptime(start_date, date_format)]
if stop_date is not None:
df = df[df[dttColName] < datetime.strptime(stop_date, date_format)]
if selected_areas is not None:
df = df[df[zidColName].isin(list(selected_areas))]
if weekdays is not None:
weekdays = set([int(i) for i in weekdays])
df["ddooww"] = df[dttColName].dt.weekday
df = df[df["ddooww"].isin(weekdays)]
df = df.drop('ddooww', axis=1)
aggKeys = [timeBin]
if byArea:
aggKeys.append(zidColName)
# Compute number of users and pins per time bin
if timeBin == "W":
df[timeBin] = (df[dttColName] - dd.to_timedelta(df[dttColName].dt.weekday, unit='d')).dt.floor("D")
elif timeBin == "MS":
df[timeBin] = (df[dttColName] - dd.to_timedelta(df[dttColName].dt.day - 1, unit='d')).dt.floor("D")
elif timeBin == "M":
df[timeBin] = (df[dttColName] + dd.to_timedelta(df[dttColName].dt.days_in_month
-df[dttColName].dt.day,
unit='d')).dt.floor("D")
else:
df[timeBin] = df[dttColName].dt.floor(timeBin)
# Add the period
if profile_pandas == "MS":
df[profile] = (df[timeBin]
- dd.to_timedelta(df[timeBin].dt.day - 1,
unit='d')).dt.floor("D")
elif profile_pandas == "M":
df[profile] = (df[timeBin] +
dd.to_timedelta(df[timeBin].dt.days_in_month
-df[timeBin].dt.day,
unit='d')).dt.floor("D")
elif profile_pandas == "W":
df[profile] = (df[timeBin]
- dd.to_timedelta(df[timeBin].dt.weekday, unit='d')).dt.floor("D")
else:
df[profile] = df[timeBin].dt.floor(profile_pandas)
# df_reduced = df.groupby(aggKeys).agg(
# {uidColName: ["count", nunique],
# profile: ["first"],})
# # Flatten columns and rename
# df_reduced.columns = ['_'.join(col).strip() for col in df_reduced.columns.values]
# df_reduced = df_reduced.rename(columns={
# uidColName + "_count": "pings",
# uidColName + "_nunique": "users",
# profile + "_first": profile,})
# df_reduced["pings_per_user"] = df_reduced["pings"] / df_reduced["users"].clip(lower=1.)
# df_reduced = df_reduced.reset_index()
if normalization is None:
return_meta = dict(**df.dtypes)
return_meta = {k: return_meta[k] for k in [timeBin,profile,zidColName]}
return_meta["pings"] = np.int64
return_meta["users"] = np.int64
return_meta["pings_per_user"] = np.float64
if byArea:
# print("No norm, area", df)
df_reduced = df_reduced.assign(col1=df_reduced[zidColName])
df_reduced = df.groupby(col1)\
.apply(_computePerAreaGrouped,
binKey=timeBin, profile=profile,
meta=return_meta)
else:
df_reduced = _computePerAreaGrouped(df,
binKey=timeBin,
profile=profile)
else:
return_meta = dict(**df.dtypes)
return_meta = {k: return_meta[k] for k in [timeBin, zidColName, profile]}
return_meta["pings"] = np.int64
return_meta["users"] = np.int64
return_meta["pings_per_user"] = np.float64
return_meta["tot_pings"] = np.int64
return_meta["tot_users"] = np.int64
return_meta["frac_pings"] = np.float64
return_meta["frac_users"] = np.float64
if byArea:
df = df.assign(col1=df[zidColName], col2=df[profile])
aggNorm = ["col1","col2"]
# levelDrop = 2
else:
df = df.assign(col1=df[profile])
aggNorm = ["col1"]
# aggNorm = [profile]
# levelDrop = 1
# print(df.head(2))
# print(aggNorm, levelDrop, timeBin, return_meta)
df_reduced = df.groupby(aggNorm)\
.apply(_computePerAreaGroupedNormalization,
meta=return_meta, binKey=timeBin, profileKey=profile)
# try:
# # print(df_reduced.head(2))
# if levelDrop > 1:
# df_reduced = df_reduced.map_partitions(my_droplevel,
# level=levelDrop,
# meta=return_meta)
# else:
# #TODO not working on dask, don't know why...
# # df_reduced = df_reduced.reset_index()
# # df_reduced = df_reduced.drop("level_1", axis=1)
# pass
# df_reduced = df_reduced.reset_index()
# except Exception as E:
# print("Warning, got exc:", str(E))
df_reduced["profile_hour"] = df_reduced[timeBin].dt.hour
if profile == "week":
df_reduced["profile_hour"] = df_reduced["profile_hour"] +\
df_reduced[timeBin].dt.weekday*24
elif profile in ["month","month_end"]:
df_reduced["profile_hour"] = df_reduced["profile_hour"] +\
(df_reduced[timeBin].dt.day - 1) *24
if sliceName is not None:
assert type(sliceName) is str
df_reduced = df_reduced.assign(timeSlice=sliceName)
if areasName is not None:
assert type(areasName) is str
df_reduced = df_reduced.assign(areaName=areasName)
return df_reduced
def my_droplevel(df, level=1):
df.index = df.index.droplevel(level)
return df
def _computePerAreaGroupedNormalization(g, binKey=None, profileKey=None):
# For each profile, area compute totals and per bins totals
# Totals
if type(g) == dd.core.DataFrame:
df_reduced = g.groupby(binKey)\
.agg({latColName: "count",
uidColName: nunique,
zidColName: "first",
profileKey: "first"})
else:
df_reduced = g.groupby(binKey)\
.agg({latColName: "count",
uidColName: "nunique",
zidColName: "first",
profileKey: "first"})
df_reduced = df_reduced.rename(columns={latColName: "pings", uidColName: "users"})
df_reduced = df_reduced.assign(
pings_per_user=df_reduced["pings"]/df_reduced["users"],
tot_pings=df_reduced["pings"].sum(),
tot_users=df_reduced["users"].sum(),
)
df_reduced = df_reduced.assign(
frac_pings=df_reduced["pings"] / df_reduced["tot_pings"],
frac_users=df_reduced["users"] / df_reduced["tot_users"],
)
df_reduced = df_reduced.reset_index()
return df_reduced[[binKey, zidColName, profileKey, "pings","users","pings_per_user",
"tot_pings","tot_users","frac_pings","frac_users"]]
def _computePerAreaGrouped(g, binKey=None, profile=None):
if type(g) == dd.core.DataFrame:
df_reduced = g.groupby(binKey).agg({uidColName: ["count", nunique],
profile: ["first"],
zidColName: ["first"]}).reset_index()
else:
df_reduced = g.groupby(binKey).agg({uidColName: ["count", "nunique"],
profile: ["first"],
zidColName: ["first"]}).reset_index()
# Flatten columns and rename
df_reduced.columns = ['_'.join(col).strip() if len(col[-1])>0 else col[0]
for col in df_reduced.columns.values]
df_reduced = df_reduced.rename(columns={
uidColName + "_count": "pings",
uidColName + "_nunique": "users",
profile + "_first": profile,
zidColName + "_first": zidColName,
})
df_reduced["pings_per_user"] = df_reduced["pings"] / df_reduced["users"].clip(lower=1.)
df_reduced = df_reduced[[binKey, profile, zidColName, 'pings', 'users', 'pings_per_user']]
# df_reduced = df_reduced.reset_index()
return df_reduced
def computeResiduals(df_activity, signal_column, profile):
'''Function that computes the average, z-score and residual activity of an area in a given time
period and for a given time bin.
Parameters
----------
df_activity : dask.DataFrame
As returned by :attr:`mobilkit.temporal.computeTemporalProfile`, a dataframe with the columns
and periods volumes and normalization (if needed) already computed.
profile : str
The temporal profile used for normalization in :attr:`mobilkit.temporal.computeTemporalProfile`.
signal_column : str
The columns to use as proxy for volume. Usually one of ``"users", "pings", "frac_users", "frac_pings"``
Returns
-------
results, mappings
Two dictionaries containing the aggregated results in numpy arrays.
``results`` has four keys:
- ``raw`` the raw signal in the ``area_index,period_index,period_hour_index`` indexing;
- ``mean`` the mean over the periods of the raw signal in the
``area_index,period_hour_index`` shape;
- ``zscore`` the zscore of the area signal (with respect to its average and std) in the
``area_index,period_hour_index`` shape;
- ``residual`` the residual activity computed as the difference between the area's ``zscore``
and the global average ``zscore`` at a given hour in the ``area_index,period_hour_index``
shape;
On the other hand, ``mappings`` contains the back and forth mapping between the numpy indexes
and the original values of the areas (``idx2area`` and ``area2idx``), periods, and, hour of the period.
These will be useful later for plotting.
'''
# uniques = df_activity.select(
# sqlF.collect_set(zidColName).alias("areas"),
# sqlF.collect_set(profile).alias("periods"),
# sqlF.collect_set("profile_hour").alias("hours"),
# ).toPandas()
set_areas = set([d for d in df_activity[zidColName].unique()])# .compute())# set(uniques.loc[0,"areas"])
set_periods = set([pd.to_datetime(d) for d in df_activity[profile].unique()])# .compute())# set(uniques.loc[0,"areas"])
set_hours = set([d for d in df_activity["profile_hour"].unique()])# .compute())# set(uniques.loc[0,"areas"])
# set_periods = set(uniques.loc[0,"periods"])
# set_hours = set(uniques.loc[0,"hours"])
# Compute the mappings
area2idx = {k: v for v, k in enumerate(sorted(set_areas))}
week2idx = {k: v for v, k in enumerate(sorted(set_periods))}
hour2idx = {k: v for v, k in enumerate(sorted(set_hours))}
idx2area = dict(map(reversed,area2idx.items()))
idx2week = dict(map(reversed,week2idx.items()))
idx2hour = dict(map(reversed,hour2idx.items()))
nAreas = len(area2idx)
nWeeks = len(week2idx)
nHours = len(hour2idx)
zone_hour_volume = np.zeros((nAreas,nWeeks,nHours))
tmp_df = df_activity[[zidColName,profile,"profile_hour",signal_column]]# .compute()
for tid, prof, prof_h, val in tmp_df[
[zidColName, profile, "profile_hour", signal_column]].values:
try:
i = area2idx[tid]
except KeyError:
continue
j = week2idx[prof]
k = hour2idx[prof_h]
zone_hour_volume[i,j,k] = val
avg_zone_hour_volume = np.mean(zone_hour_volume, axis=1)
zsc_zone_hour_volume = (avg_zone_hour_volume -
avg_zone_hour_volume.mean(axis=-1,keepdims=True))
tmp_std = avg_zone_hour_volume.std(axis=-1,keepdims=True)
tmp_std = np.where(tmp_std==0, np.ones_like(tmp_std), tmp_std)
tmp_std = np.where(np.isnan(tmp_std), np.ones_like(tmp_std), tmp_std)
zsc_zone_hour_volume /= tmp_std
res_zone_hour_volume = zsc_zone_hour_volume - zsc_zone_hour_volume.mean(axis=0, keepdims=True)
results = {
"raw": zone_hour_volume,
"mean": avg_zone_hour_volume,
"zscore": zsc_zone_hour_volume,
"residual": res_zone_hour_volume,
}
mappings = {
"area2idx": area2idx,
"hour2idx": hour2idx,
"period2idx": week2idx,
"idx2area": idx2area,
"idx2hour": idx2hour,
"idx2period": idx2week,
}
return results, mappings
def homeLocationWindow(df_hw,
initial_days_home=None,
home_days_window=3,
start_date=None,
stop_date=None):
'''
Given a dataframe returned by :attr:`mobilkit.stats.userHomeWork` computes,
for each user, the home area for every window of ``home_days_window`` days
after the initial date.
Note that the points before 12pm will be assigned to the previous day's night
and the one after 12pm to the same day's night.
Parameters
----------
df_hw : dask.dataframe
A dataframe as returned by :attr:`mobilkit.stats.userHomeWork` with at
least the `uid`, `tile_ID`, `datetime` and `isHome` and `isWork` columns.
initial_days_home : int, optional
The number of initial days to be used to compute the original home area.
If ``None`` (default) it will just compute the home for every window
since the beginning.
home_days_window : int, optional
The number of days to use to assess the home location of a user (default 3).
For each day ``d`` in the ``start_date`` to ``stop_date - home_days_window``
it computes the home location between the ``[d,d+home_days_window)`` period.
start_date : datetime.datetime
A python datetime object with no timezone telling the date (included) to
start from. The default behavior is to keep all the events.
stop_date : datetime.datetime, optional
A python datetime object with no timezone telling the date (excluded) to
stop at. Default is to keep all the events.
Returns
-------
df_hwindow : pandas.dataframe
The dataframe containing, for each user and active day of user the
``tile_ID`` of the user's home and the number of pings recorded there in
the time window. The date is saved in ``window_date`` and refers to the
start of the time window (whose index is saved in ``timeSlice``).
For the initial home window the date corresponds to its end.
Note
----
When determining the home location of a user, please consider that some data providers, like _Cuebiq_, obfuscate/obscure/alter the coordinates of the points falling near the user's home location in order to preserve privacy.
This means that you cannot locate the precise home of a user with a spatial resolution higher than the one used to obfuscate these data. If you are interested in the census area (or geohash) of the user's home alone and you are using a spatial tessellation with a spatial resolution wider than or equal to the one used to obfuscate the data, then this is of no concern.
However, tasks such as stop-detection or POI visit rate computation may be affected by the noise added to data in the user's home location area. Please check if your data has such noise added and choose the spatial tessellation according to your use case.
'''
if initial_days_home is not None:
assert initial_days_home > 0
assert home_days_window > 0 and type(home_days_window) is int
# Prepare the column with the day to which a row is assigned
# and its distance to initial date
filtered_df = df_hw[[uidColName,zidColName,dttColName,"isHome"]]
filtered_df = filtered_df[filtered_df["isHome"] == 1]
filtered_df = filtered_df.assign(
hour=filtered_df[dttColName].dt.hour,
day=filtered_df[dttColName].dt.floor("D"))
filtered_df = filtered_df.assign(sday=(filtered_df["hour"] < 12).astype(int))
filtered_df["day"] = filtered_df["day"]\
- dd.to_timedelta(filtered_df["sday"], unit='d')
filtered_df = filtered_df[[uidColName,zidColName,dttColName,"isHome","day"]]
if start_date is not None:
filtered_df = filtered_df[filtered_df["day"] >= start_date]
else:
start_date = filtered_df["day"].min().compute()
if stop_date is not None:
filtered_df = filtered_df[filtered_df["day"] < stop_date]
# Compute once the number of pings per zone per day per user
filtered_df = filtered_df.assign(level0=filtered_df[uidColName],
level1=filtered_df[zidColName],
level2 =filtered_df["day"]
).groupby(["level0","level1","level2"])\
.agg({uidColName: "first",
zidColName: "first",
"day": "first",
"isHome": "sum",
}).rename(columns={"isHome": "pings"})
filtered_df = filtered_df.assign(day0=start_date)
filtered_df = filtered_df.assign(deltaDay=(filtered_df["day"]
- filtered_df["day0"]).dt.days)
filtered_df = filtered_df.map_partitions(lambda p: p.reset_index(drop=True))
filtered_df = filtered_df.persist()
print("Got the delta days distributed as:",
filtered_df["deltaDay"].compute().describe())
# First slice and set initial date for windows
if initial_days_home is not None:
initial_df = filtered_df[filtered_df["deltaDay"] < initial_days_home]
first_date_of_windows = initial_df["day"].max().compute()
initial_df = initial_df.assign(timeSlice=0,
level0=initial_df[uidColName],
)
return_meta = dict(**initial_df.dtypes)
return_meta = {k: return_meta[k] for k in [uidColName,zidColName,
"timeSlice"]}
return_meta["pings"] = np.int64
initial_df = initial_df.groupby("level0")\
.apply(_computeUserSliceWindows, meta=return_meta)\
.compute().reset_index(drop=True)
else:
initial_df = None
initial_days_home = 0
first_date_of_windows = filtered_df["day"].max().compute()
# Now for each window I do the same and concat to the original one
offset_windows = 1 if initial_days_home > 0 else 0
for window_idx in range(home_days_window):
tmp_day_0 = initial_days_home + window_idx
slice_df = filtered_df[filtered_df["deltaDay"] >= tmp_day_0]
slice_df = slice_df.assign(timeSlice=offset_windows + window_idx
+ home_days_window
* ( (slice_df["deltaDay"] - tmp_day_0)
//home_days_window ))
print("Doing window %02d / %02d" % (window_idx+1, home_days_window))
slice_df = slice_df.assign(level0=slice_df["timeSlice"],
level1=slice_df[uidColName],
)
return_meta = dict(**slice_df.dtypes)
return_meta = {k: return_meta[k] for k in [uidColName,zidColName,
"timeSlice"]}
return_meta["pings"] = np.int64
slice_df = slice_df.groupby(["level0","level1"])\
.apply(_computeUserSliceWindows, meta=return_meta)\
.compute().reset_index(drop=True)
if initial_df is None:
initial_df = slice_df
else:
initial_df = pd.concat([initial_df, slice_df], sort=True, ignore_index=True)
# Add reference date
initial_df = initial_df.assign(window_date=first_date_of_windows
+dd.to_timedelta(initial_df["timeSlice"],
unit="D"))
return initial_df
def _computeUserSliceWindows(g):
if type(g) == dd.core.DataFrame:
g = g.assign(groupZID=g[zidColName]).groupby("groupZID").agg({
uidColName: "first",
zidColName: "first",
"timeSlice": "first",
"pings": "sum",
}).sort_values("pings", ascending=False)\
.reset_index(drop=True)
else:
g = g.groupby(zidColName).agg({
uidColName: "first",
"timeSlice": "first",
"pings": "sum",
}).sort_values("pings", ascending=False)\
.reset_index()
return g[[uidColName,zidColName,"timeSlice","pings"]].iloc[0]
def computeDisplacementFigures(df_disp, minimum_pings_per_night=5):
'''
Given a dataframe returned by :attr:`mobilkit.temporal.homeLocationWindow` computes a pivoted
dataframe with, for each user, the home area for every time window, plus the arrays of displaced
and active people per area and the arrays with the (per user) cumulative number of areas where
the user slept.
Parameters
----------
df_disp : pandas.dataframe
A dataframe as returned by :attr:`mobilkit.temporal.homeLocationWindow`.
minimum_pings_per_night : int, optional
The number of pings recorded during a night for a user to be considered.
Returns
-------
df_pivoted, first_user_area, heaps_arrays, count_users_per_area : pandas.dataframe, dict, array, dict
- ``df_pivoted`` is a dataframe containing one row per user and with the column being the sorted
time windows of the analysis period. Each cell contains the location where the user (row)
has slept in night t (column), ``Nan`` if the user was not active that night.
- ``first_user_area`` is a dict telling, for each user, the ``tile_ID`` where he has been sleeping
for the first time.
- ``heaps_arrays`` is a (n_users x n_windows) array telling the cumulative number of areas where
a users slept up to window t.
- ``counts_users_per_area`` is a dictionary ``{tile_ID: {"active": [...], "displaced": [...]}}``
telling the number of active and displaced people per area in time.
'''
init_df_joined_pd = df_disp.sort_values([uidColName,"timeSlice"])
pivoted = init_df_joined_pd[
init_df_joined_pd["pings"]>=minimum_pings_per_night]\
.pivot(uidColName, "window_date", zidColName)
pivoted = pivoted[sorted(pivoted.columns)]
areas_displacement = set([d for d in pivoted.values.flatten() if not np.isnan(d)])
n_areas_displacement = len(areas_displacement)
n_time_windows = pivoted.shape[1]
pivoted_arra = pivoted.values
prima_zona = np.zeros(pivoted_arra.shape[0])
heaps = np.zeros_like(pivoted_arra)
count_users_per_area = {a: {
"active": np.zeros(n_time_windows),
"displaced": np.zeros(n_time_windows),
} for a in areas_displacement}
for i in range(pivoted_arra.shape[0]):
row = pivoted_arra[i,:]
tmp_set = set()
assigned = False
for j in range(pivoted_arra.shape[1]):
e = row[j]
if not np.isnan(e):
tmp_set.add(e)
if not assigned and len(tmp_set) == 1:
prima_zona[i] = int(e)
assigned = True
if assigned and not np.isnan(e):
tmp_zona_original = prima_zona[i]
count_users_per_area[tmp_zona_original]["active"][j] += 1
if e != tmp_zona_original:
count_users_per_area[tmp_zona_original]["displaced"][j] += 1
heaps[i,j] = len(tmp_set)
pivoted_arra.shape
prima_zona = {u: prima_zona[i] for i, u in enumerate(pivoted.index)}
return pivoted, prima_zona, heaps, count_users_per_area
def plotDisplacement(count_users_per_area, pivoted, gdf,
area_key="tile_ID",
epicenter=[18.584,98.399],
bins=5):
'''
Parameters
----------
count_users_per_area : dict
The dict returned with the pivot table, the original home location,
and the Heaps law of visited areas by :attr:`mobilkit.temporal.homeLocationWindow`.
pivoted : pandas.DataFrame
The pivoted dataframe of the visited location during the night as returned with the
the original home location, the Heaps law of visited areas and the count of users per
area and date by :attr:`mobilkit.temporal.homeLocationWindow`.
gdf : geopandas.GeoDataFrame
The geodataframe used to tessellate data. Must contain the `area_key` column.
area_key : str
The column containing the ID of the tessellation areas used to join the displacement
data and the GeoDataFrame.
epicenter : tuple
The `(lat,lon)` coordinates of the center to be used to split areas in `bins` bins
based on their distance from this point.
bins : int
The number of linear distance bins to compute from the epicenter.
Returns
-------
'''
gdf = gdf.copy()
dates_sorted = np.array(pivoted.columns)
# Compute the distance of each area's centroid from epicenter
gdf["distance_epic"] = gdf.geometry.centroid.apply(lambda p:
haversine(epicenter,
(p.xy[1][0], p.xy[0][0]) ))
# Bin areas depending on their distance
distance_bins = np.linspace(0, max(gdf["distance_epic"])+1, bins+1)
gdf["distance_bin"] = gdf["distance_epic"].apply(lambda v:
np.argmax(distance_bins>=v)-1)
# For each bin's areas plot the displacement rate
fig, ax = plt.subplots(1,1,figsize=(15,6))
ymax = -1
for dist_bin in range(len(distance_bins)):
tmp_areas = set(gdf[gdf["distance_bin"]==dist_bin][area_key].values)
tmp_areas = tmp_areas.intersection(count_users_per_area.keys())
if len(tmp_areas) == 0:
continue
tmp_arra_disp = np.vstack([count_users_per_area[a]["displaced"] for a in tmp_areas])
tmp_arra_act = np.vstack([count_users_per_area[a]["active"] for a in tmp_areas])
tmp_Ys = tmp_arra_disp.sum(axis=0) / np.clip(tmp_arra_act.sum(axis=0), a_min=1., a_max=None)
plt.plot(dates_sorted, tmp_Ys, label="Dist. bin %d"%dist_bin, lw=3)
ymax = max(ymax, max(tmp_Ys))
plt.vlines(dates_sorted[1], 0, ymax*2, lw=4, linestyles="--", color="r",
label="First window")
plt.ylim(0, ymax*1.1)
plt.xticks(rotation=40, ha="right")
plt.ylabel("Fraction of active\n users displaced")
plt.legend();
return fig, gdf
| 44.994748 | 373 | 0.591806 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 21,770 | 0.508229 |
5286e720b93035006a31236bf9586f44cd981b61 | 5,591 | py | Python | tests/test_mat2.py | TovarnovM/easyvec | 25a67a37907140c07feace02dd85d68fc59824a6 | [
"MIT"
] | 1 | 2021-06-24T18:12:51.000Z | 2021-06-24T18:12:51.000Z | tests/test_mat2.py | TovarnovM/easyvec | 25a67a37907140c07feace02dd85d68fc59824a6 | [
"MIT"
] | null | null | null | tests/test_mat2.py | TovarnovM/easyvec | 25a67a37907140c07feace02dd85d68fc59824a6 | [
"MIT"
] | null | null | null | from easyvec import Mat2, Vec2
import numpy as np
from pytest import approx
def test_constructor1():
m = Mat2(1,2,3,4)
assert m is not None
assert m.m11 == approx(1)
assert m.m12 == approx(2)
assert m.m21 == approx(3)
assert m.m22 == approx(4)
def test_constructor2():
m = Mat2([1,2,3,4])
assert m is not None
assert m.m11 == approx(1)
assert m.m12 == approx(2)
assert m.m21 == approx(3)
assert m.m22 == approx(4)
def test_constructor3():
m = Mat2([[1,2],[3,4]])
assert m is not None
assert m.m11 == approx(1)
assert m.m12 == approx(2)
assert m.m21 == approx(3)
assert m.m22 == approx(4)
def test_constructor4():
m = Mat2([1,2],[3,4])
assert m is not None
assert m.m11 == approx(1)
assert m.m12 == approx(2)
assert m.m21 == approx(3)
assert m.m22 == approx(4)
def test_constructor5():
m = Mat2(Vec2(1,2),Vec2(3,4))
assert m is not None
assert m.m11 == approx(1)
assert m.m12 == approx(2)
assert m.m21 == approx(3)
assert m.m22 == approx(4)
def test_constructor6():
m = Mat2([Vec2(1,2),Vec2(3,4)])
assert m is not None
assert m.m11 == approx(1)
assert m.m12 == approx(2)
assert m.m21 == approx(3)
assert m.m22 == approx(4)
def test_constructor7():
m = Mat2.eye()
assert m is not None
assert m.m11 == approx(1)
assert m.m12 == approx(0)
assert m.m21 == approx(0)
assert m.m22 == approx(1)
def test_constructor8():
from math import sin, cos, pi
for angle in np.random.uniform(-720, 720, 1000):
angle *= pi/180
m = Mat2.from_angle(angle)
assert m is not None
assert m.m11 == approx(cos(angle))
assert m.m12 == approx(sin(angle))
assert m.m21 == approx(-sin(angle))
assert m.m22 == approx(cos(angle))
def test_constructor9():
m = Mat2.from_xaxis((1,1))
assert m is not None
assert m.m11 == approx(1/2**0.5)
assert m.m12 == approx(1/2**0.5)
assert m.m21 == approx(-1/2**0.5)
assert m.m22 == approx(1/2**0.5)
def test_xiyj_axis():
m = Mat2(1,2,3,4)
assert m.x_axis() == (1,2)
assert m.i_axis() == (1,2)
assert m.y_axis() == (3,4)
assert m.j_axis() == (3,4)
def test_cmp():
m = Mat2(-1,2,-3,4)
assert m == [[-1,2],[-3,4]]
assert m != [[-1,-2],[-3,4]]
def test_T():
m = Mat2(-1,2,-3,4)
assert m.T == [[-1,-3], [2,4]]
def test_inverse1():
for angle in np.random.uniform(-720,720,1000):
m = Mat2.from_angle(angle)
assert m._1 == m.T
assert m.det() == approx(1)
def test_inverse2():
for ms in np.random.uniform(-720,720,(1000,4)):
m = Mat2(ms)
if abs(m.det()) < 1e-6:
continue
assert m * m._1 == Mat2.eye()
def test_mul1():
for ms in np.random.uniform(-720,720,(1000,5)):
m = Mat2(ms[:-1])
assert m * ms[-1] == (ms[:-1] * ms[-1]).reshape(2,2)
assert ms[-1] * m == (ms[:-1] * ms[-1]).reshape(2,2)
def test_mul2():
for angle, x, y in np.random.uniform(-180,180,(1000,3)):
m = Mat2.from_angle(angle, 1)
v = Vec2(x, y).norm()
v1 = m * v
assert v.angle_to(v1, 1) == approx(-angle)
v2 = m._1 * v1
assert v2 == v
v3 = m._1 * v
assert v.angle_to(v3, 1) == approx(angle)
def test_imul():
for ms in np.random.uniform(-720,720,(1000,4)):
m = Mat2(ms)
if abs(m.det()) < 1e-6:
continue
assert m * m._1 == Mat2.eye()
m *= m._1
assert m == Mat2.eye()
def test_add1():
for ms in np.random.uniform(-720,720,(1000,5)):
m = Mat2(ms[:-1])
m1 = m + ms[-1]
m1i = (ms[:-1] + ms[-1]).reshape(2,2)
assert m1 == m1i
assert ms[-1] + m == (ms[:-1] + ms[-1]).reshape(2,2)
def test_add2():
for ms in np.random.uniform(-720,720,(1000,8)):
m1 = Mat2(ms[:4])
m2 = Mat2(ms[4:])
assert m1 + m2 == m2 + m1
assert m1 + m2 == (ms[:4] + ms[4:]).reshape(2,2)
def test_iadd():
for ms in np.random.uniform(-720,720,(1000,8)):
m1 = Mat2(ms[:4])
m2 = Mat2(ms[4:])
m12 = m1 + m2
m1 += m2
assert m12 == m1
assert m1 == (ms[:4] + ms[4:]).reshape(2,2)
def test_sub1():
for ms in np.random.uniform(-720,720,(1000,5)):
m = Mat2(ms[:-1])
m1 = m - ms[-1]
m1i = (ms[:-1] - ms[-1]).reshape(2,2)
assert m1 == m1i
assert ms[-1] - m == -(ms[:-1] - ms[-1]).reshape(2,2)
def test_sub2():
for ms in np.random.uniform(-720,720,(1000,8)):
m1 = Mat2(ms[:4])
m2 = Mat2(ms[4:])
assert m1 - m2 == -(m2 - m1)
assert m1 - m2 == (ms[:4] - ms[4:]).reshape(2,2)
def test_isub():
for ms in np.random.uniform(-720,720,(1000,8)):
m1 = Mat2(ms[:4])
m2 = Mat2(ms[4:])
m12 = m1 - m2
m1 -= m2
assert m12 == m1
assert m1 == (ms[:4] - ms[4:]).reshape(2,2)
def test_div1():
for ms in np.random.uniform(-720,720,(1000,5)):
m = Mat2(ms[:-1])
m1 = m / ms[-1]
m1i = (ms[:-1] / ms[-1]).reshape(2,2)
assert m1 == m1i
def test_div2():
for ms in np.random.uniform(-720,720,(1000,8)):
m1 = Mat2(ms[:4])
m2 = Mat2(ms[4:])
assert m1 / m2 == (ms[:4] / ms[4:]).reshape(2,2)
def test_idiv():
for ms in np.random.uniform(-720,720,(1000,8)):
m1 = Mat2(ms[:4])
m2 = Mat2(ms[4:])
m12 = m1 / m2
m1 /= m2
assert m12 == m1
assert m1 == (ms[:4] / ms[4:]).reshape(2,2) | 27.273171 | 62 | 0.514398 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
5287530feaff051b883de6c50b928b9265f12103 | 1,664 | py | Python | tests/test_static_maze.py | agarwalishika/two-d-nav-gym | 7b16d16d6944f1ac7b832137096ab56cc5228b2f | [
"MIT"
] | 2 | 2021-09-09T15:59:32.000Z | 2021-09-09T16:03:48.000Z | tests/test_static_maze.py | agarwalishika/two-d-nav-gym | 7b16d16d6944f1ac7b832137096ab56cc5228b2f | [
"MIT"
] | null | null | null | tests/test_static_maze.py | agarwalishika/two-d-nav-gym | 7b16d16d6944f1ac7b832137096ab56cc5228b2f | [
"MIT"
] | null | null | null | import numpy as np
from two_d_nav.envs.static_maze import StaticMazeNavigation
def test_goal():
env = StaticMazeNavigation()
for i in range(60):
obs, reward, done, _ = env.step(np.array([1.0, -0.1]))
env.render()
for i in range(30):
obs, reward, done, _ = env.step(np.array([-1.0, -0.5]))
env.render()
for i in range(5):
obs, reward, done, _ = env.step(np.array([0.0, -1.0]))
env.render()
for i in range(15):
obs, reward, done, _ = env.step(np.array([-1.0, 0.0]))
env.render()
for i in range(30):
obs, reward, done, _ = env.step(np.array([0.0, -1.0]))
env.render()
for i in range(18):
obs, reward, done, _ = env.step(np.array([-1.0, -0.6]))
env.render()
if done:
print(f"Reach goal: {obs}")
print(f"Reward: {reward}")
def test_obstacle():
env = StaticMazeNavigation()
for i in range(60):
obs, reward, done, _ = env.step(np.array([1.0, -0.1]))
env.render()
for i in range(5):
obs, reward, done, _ = env.step(np.array([0.0, -1.0]))
env.render()
for i in range(30):
obs, reward, done, _ = env.step(np.array([-1.0, 0.0]))
env.render()
if done:
print(f"Hit obstacle: {obs}")
print(f"Reward: {reward}")
def test_wall():
env = StaticMazeNavigation()
reward = 0.0
for i in range(20):
obs, reward, done, _ = env.step(np.array([-1.0, 0.0]))
env.render()
print(f"Hit wall reward {reward}")
if __name__ == '__main__':
test_goal()
test_obstacle()
test_wall()
| 22.794521 | 63 | 0.53125 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 117 | 0.070313 |
528a9e7052216cb329d6d29d4440112a9d78b9fe | 146 | py | Python | starter_code/api_keys.py | bjouellette/python-api-challenge | 855c31769893596211ef072df8412cd47a557e19 | [
"ADSL"
] | 1 | 2022-01-27T00:04:14.000Z | 2022-01-27T00:04:14.000Z | starter_code/api_keys.py | bjouellette/python-api-challenge | 855c31769893596211ef072df8412cd47a557e19 | [
"ADSL"
] | null | null | null | starter_code/api_keys.py | bjouellette/python-api-challenge | 855c31769893596211ef072df8412cd47a557e19 | [
"ADSL"
] | null | null | null | # OpenWeatherMap API Key
weather_api_key = "e1067d92d6b631a16363bf4db3023b19"
# Google API Key
g_key = "AIzaSyA4RYdQ1nxoMTIW854C7wvVJMf0Qz5qjNk"
| 24.333333 | 52 | 0.842466 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 115 | 0.787671 |
528bb07e2da917115605e5bd86611d0e0fcfeb76 | 13,099 | py | Python | dist-packages/dtk/ui/paned.py | Jianwei-Wang/python2.7_lib | 911b8e81512e5ac5f13e669ab46f7693ed897378 | [
"PSF-2.0"
] | null | null | null | dist-packages/dtk/ui/paned.py | Jianwei-Wang/python2.7_lib | 911b8e81512e5ac5f13e669ab46f7693ed897378 | [
"PSF-2.0"
] | null | null | null | dist-packages/dtk/ui/paned.py | Jianwei-Wang/python2.7_lib | 911b8e81512e5ac5f13e669ab46f7693ed897378 | [
"PSF-2.0"
] | null | null | null | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2011 ~ 2012 Deepin, Inc.
# 2011 ~ 2012 Xia Bin
# 2011 ~ 2012 Wang Yong
#
# Author: Xia Bin <xiabin@linuxdeepin.com>
# Maintainer: Wang Yong <lazycat.manatee@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from draw import draw_pixbuf
from utils import is_in_rect, color_hex_to_cairo
from constant import PANED_HANDLE_SIZE
import gobject
import gtk
import math
from theme import ui_theme
# Load customize rc style before any other.
gtk.rc_parse_string("style 'my_style' {\n GtkPaned::handle-size = %s\n }\nwidget '*' style 'my_style'" % (PANED_HANDLE_SIZE))
class Paned(gtk.Paned):
'''
Paned.
@undocumented: do_enter_notify_event
@undocumented: do_button_press_event
@undocumented: do_size_allocate
@undocumented: do_enter_notify_event
@undocumented: is_in_button
@undocumented: draw_handle
@undocumented: do_expose_event
gtk.Paned with custom better apperance.
'''
def __init__(self,
shrink_first,
enable_animation=False,
always_show_button=False,
enable_drag=False,
handle_color=ui_theme.get_color("paned_line")
):
'''
Initialize Paned class.
'''
gtk.Paned.__init__(self)
self.shrink_first = shrink_first
self.enable_animation = enable_animation
self.always_show_button = always_show_button
self.enable_drag = enable_drag
self.handle_color = handle_color
self.bheight = ui_theme.get_pixbuf("paned/paned_up_normal.png").get_pixbuf().get_width()
self.saved_position = -1
self.handle_size = PANED_HANDLE_SIZE - 1
self.show_button = False
self.init_button("normal")
self.animation_delay = 20 # milliseconds
self.animation_times = 10
self.animation_position_frames = []
self.press_coordinate = None
def init_button(self, status):
if self.get_orientation() == gtk.ORIENTATION_HORIZONTAL:
if self.shrink_first:
self.button_pixbuf = ui_theme.get_pixbuf("paned/paned_left_%s.png" % status).get_pixbuf()
else:
self.button_pixbuf = ui_theme.get_pixbuf("paned/paned_right_%s.png" % status).get_pixbuf()
else:
if self.shrink_first:
self.button_pixbuf = ui_theme.get_pixbuf("paned/paned_up_%s.png" % status).get_pixbuf()
else:
self.button_pixbuf = ui_theme.get_pixbuf("paned/paned_down_%s.png" % status).get_pixbuf()
def do_expose_event(self, e):
'''
To intercept the default expose event and draw custom handle
after the **gtk.Container** expose evetn.
So the gtk.Paned's expose event callback is ignore.
'''
gtk.Container.do_expose_event(self, e)
self.draw_handle(e)
return False
def draw_handle(self, e):
'''
Draw the cusom handle apperance.
'''
handle = self.get_handle_window()
line_width = 1
cr = handle.cairo_create()
cr.set_source_rgb(*color_hex_to_cairo(self.handle_color.get_color()))
(width, height) = handle.get_size()
if self.get_orientation() == gtk.ORIENTATION_HORIZONTAL:
if self.shrink_first:
if self.get_position() != 0:
cr.rectangle(0, 0, line_width, height)
cr.fill()
if self.always_show_button or self.show_button:
if self.get_position() == 0:
pixbuf = ui_theme.get_pixbuf("paned/paned_right_normal.png").get_pixbuf()
else:
pixbuf = ui_theme.get_pixbuf("paned/paned_left_normal.png").get_pixbuf()
draw_pixbuf(cr,
pixbuf,
0,
(height - self.bheight) / 2)
else:
cr.rectangle(width - line_width, 0, line_width, height)
cr.fill()
if self.always_show_button or self.show_button:
if self.get_position() == 0:
pixbuf = ui_theme.get_pixbuf("paned/paned_left_normal.png").get_pixbuf()
else:
pixbuf = ui_theme.get_pixbuf("paned/paned_right_normal.png").get_pixbuf()
draw_pixbuf(cr,
pixbuf,
0,
(height - self.bheight) / 2)
else:
if self.shrink_first:
cr.rectangle(0, 0, width, line_width)
cr.fill()
if self.always_show_button or self.show_button:
if self.get_position() == 0:
pixbuf = ui_theme.get_pixbuf("paned/paned_down_normal.png").get_pixbuf()
else:
pixbuf = ui_theme.get_pixbuf("paned/paned_up_normal.png").get_pixbuf()
draw_pixbuf(cr,
pixbuf,
(width - self.bheight) / 2,
0)
else:
cr.rectangle(0, height - line_width, width, line_width)
cr.fill()
if self.always_show_button or self.show_button:
if self.get_position() == 0:
pixbuf = ui_theme.get_pixbuf("paned/paned_up_normal.png").get_pixbuf()
else:
pixbuf = ui_theme.get_pixbuf("paned/paned_down_normal.png").get_pixbuf()
draw_pixbuf(cr,
pixbuf,
(width - self.bheight) / 2,
0)
def is_in_button(self, x, y):
'''
Detection of wheter the mouse pointer is in the handler's button.
'''
handle = self.get_handle_window()
(width, height) = handle.get_size()
if self.get_orientation() == gtk.ORIENTATION_HORIZONTAL:
rect = (0, (height - self.bheight) / 2, width, self.bheight)
else:
rect = ((width - self.bheight) / 2, 0, self.bheight, height)
return is_in_rect((x, y), rect)
def do_enter_notify_event(self, e):
self.show_button = True
self.queue_draw()
def do_leave_notify_event(self, e):
self.show_button = False
self.init_button("normal")
self.queue_draw()
def do_motion_notify_event(self, e):
'''
change the cursor style when move in handler
'''
# Reset press coordinate if motion mouse after press event.
self.press_coordinate = None
handle = self.get_handle_window()
(width, height) = handle.get_size()
if self.is_in_button(e.x, e.y):
handle.set_cursor(gtk.gdk.Cursor(gtk.gdk.HAND1))
self.init_button("hover")
else:
if self.enable_drag:
handle.set_cursor(self.cursor_type)
gtk.Paned.do_motion_notify_event(self, e)
else:
handle.set_cursor(None)
self.init_button("normal")
def do_button_press_event(self, e):
'''
when press the handler's button change the position.
'''
handle = self.get_handle_window()
if e.window == handle:
if self.is_in_button(e.x, e.y):
self.init_button("press")
self.do_press_actoin()
else:
(width, height) = handle.get_size()
if is_in_rect((e.x, e.y), (0, 0, width, height)):
self.press_coordinate = (e.x, e.y)
gtk.Paned.do_button_press_event(self, e)
else:
gtk.Paned.do_button_press_event(self, e)
return True
def do_button_release_event(self, e):
'''
docs
'''
gtk.Paned.do_button_release_event(self, e)
# Do press event if not in button and finish `click` event.
if (not self.is_in_button(e.x, e.y)) and self.press_coordinate == (e.x, e.y):
self.do_press_actoin()
return True
def do_press_actoin(self):
'''
docs
'''
if self.saved_position == -1:
self.saved_position = self.get_position()
if self.shrink_first:
self.change_position(0)
else:
if self.get_orientation() == gtk.ORIENTATION_HORIZONTAL:
self.change_position(self.allocation.width)
else:
self.change_position(self.allocation.height)
else:
self.change_position(self.saved_position)
self.saved_position = -1
def change_position(self, new_position):
current_position = self.get_position()
if self.enable_animation:
if new_position != current_position:
for i in range(0, self.animation_times + 1):
step = int(math.sin(math.pi * i / 2 / self.animation_times) * (new_position - current_position))
self.animation_position_frames.append(current_position + step)
if self.animation_position_frames[-1] != new_position:
self.animation_position_frames.append(new_position)
gtk.timeout_add(self.animation_delay, self.update_position)
else:
self.set_position(new_position)
def update_position(self):
self.set_position(self.animation_position_frames.pop(0))
if self.animation_position_frames == []:
return False
else:
return True
def do_size_allocate(self, e):
gtk.Paned.do_size_allocate(self, e)
if self.shrink_first:
child = self.get_child2()
else:
child = self.get_child1()
if child == None: return
rect = child.allocation
offset = self.handle_size
if self.get_orientation() == gtk.ORIENTATION_HORIZONTAL:
if self.shrink_first:
rect.x -= offset
rect.width += offset
else:
rect.width += offset
else:
if self.shrink_first:
rect.y -= offset
rect.height += offset
else:
rect.height += offset
child.size_allocate(rect)
class HPaned(Paned):
def __init__(self,
shrink_first=True,
enable_animation=False,
always_show_button=False,
enable_drag=False,
handle_color=ui_theme.get_color("paned_line")
):
Paned.__init__(self, shrink_first, enable_animation, always_show_button, enable_drag, handle_color)
self.set_orientation(gtk.ORIENTATION_HORIZONTAL)
self.cursor_type = gtk.gdk.Cursor(gtk.gdk.SB_H_DOUBLE_ARROW)
class VPaned(Paned):
def __init__(self,
shrink_first=True,
enable_animation=False,
always_show_button=False,
enable_drag=False,
handle_color=ui_theme.get_color("paned_line")
):
Paned.__init__(self, shrink_first, enable_animation, always_show_button, enable_drag, handle_color)
self.set_orientation(gtk.ORIENTATION_VERTICAL)
self.cursor_type = gtk.gdk.Cursor(gtk.gdk.SB_V_DOUBLE_ARROW)
gobject.type_register(Paned)
gobject.type_register(HPaned)
gobject.type_register(VPaned)
if __name__ == '__main__':
w = gtk.Window()
w.set_size_request(700, 400)
#w.modify_bg(gtk.STATE_NORMAL, gtk.gdk.Color('yellow'))
box = gtk.VBox()
p = VPaned()
c1 = gtk.Button("11111111111111111111111")
c1.modify_bg(gtk.STATE_NORMAL, gtk.gdk.Color('blue'))
c2 = gtk.Button("122222222222222222222222")
c1.modify_bg(gtk.STATE_NORMAL, gtk.gdk.Color('red'))
p.add1(c1)
p.add2(c2)
box.pack_start(p)
p = HPaned()
c1 = gtk.Button("11111111111111111111111")
c1.modify_bg(gtk.STATE_NORMAL, gtk.gdk.Color('blue'))
c2 = gtk.Button("122222222222222222222222")
c1.modify_bg(gtk.STATE_NORMAL, gtk.gdk.Color('red'))
p.add1(c1)
p.add2(c2)
box.pack_start(p)
w.add(box)
w.connect('destroy', gtk.main_quit)
w.show_all()
gtk.main()
| 35.789617 | 128 | 0.578517 | 10,952 | 0.836094 | 0 | 0 | 0 | 0 | 0 | 0 | 2,661 | 0.203145 |
528ee35af89eb53ba2db896f868d4961ba685fff | 801 | py | Python | sghymnal/players/tests/test_drf_views.py | shortnd/sghymnal | c10d9a7e2fda803dcb5046b9f7bc099f32b6c603 | [
"MIT"
] | null | null | null | sghymnal/players/tests/test_drf_views.py | shortnd/sghymnal | c10d9a7e2fda803dcb5046b9f7bc099f32b6c603 | [
"MIT"
] | null | null | null | sghymnal/players/tests/test_drf_views.py | shortnd/sghymnal | c10d9a7e2fda803dcb5046b9f7bc099f32b6c603 | [
"MIT"
] | null | null | null | import pytest
from django.contrib.auth.models import AnonymousUser
from django.test import RequestFactory
from sghymnal.players.api.views import PlayersViewSet
from sghymnal.players.models import Player
pytestmark = pytest.mark.django_db
class TestPlayerViewSet:
def test_get_queryset(self, player: Player, rf: RequestFactory):
view = PlayersViewSet()
request = rf.get("/api/players/")
request.user = AnonymousUser()
view.request = request
assert player in view.get_queryset()
def test_get_detail(self, player: Player, rf: RequestFactory):
view = PlayersViewSet()
request = rf.get(f"/api/players/{player.uuid}/")
request.user = AnonymousUser()
view.request = request
assert player in view.get_queryset()
| 27.62069 | 68 | 0.70412 | 558 | 0.696629 | 0 | 0 | 0 | 0 | 0 | 0 | 45 | 0.05618 |
528f89ddb026f558a4cd118b62bb3b0e6ebc6dd4 | 2,351 | py | Python | arviz/plots/backends/matplotlib/separationplot.py | sudojarvis/arviz | 73531be4f23df7d764b2e3bec8c5ef5cb882590d | [
"Apache-2.0"
] | 1,159 | 2018-04-03T08:50:54.000Z | 2022-03-31T18:03:52.000Z | arviz/plots/backends/matplotlib/separationplot.py | sudojarvis/arviz | 73531be4f23df7d764b2e3bec8c5ef5cb882590d | [
"Apache-2.0"
] | 1,656 | 2018-03-23T14:15:05.000Z | 2022-03-31T14:00:28.000Z | arviz/plots/backends/matplotlib/separationplot.py | sudojarvis/arviz | 73531be4f23df7d764b2e3bec8c5ef5cb882590d | [
"Apache-2.0"
] | 316 | 2018-04-03T14:25:52.000Z | 2022-03-25T10:41:29.000Z | """Matplotlib separation plot."""
import matplotlib.pyplot as plt
import numpy as np
from ...plot_utils import _scale_fig_size
from . import backend_kwarg_defaults, backend_show, create_axes_grid
def plot_separation(
y,
y_hat,
y_hat_line,
label_y_hat,
expected_events,
figsize,
textsize,
color,
legend,
locs,
width,
ax,
plot_kwargs,
y_hat_line_kwargs,
exp_events_kwargs,
backend_kwargs,
show,
):
"""Matplotlib separation plot."""
if backend_kwargs is None:
backend_kwargs = {}
if plot_kwargs is None:
plot_kwargs = {}
# plot_kwargs.setdefault("color", "C0")
# if color:
plot_kwargs["color"] = color
if y_hat_line_kwargs is None:
y_hat_line_kwargs = {}
y_hat_line_kwargs.setdefault("color", "k")
if exp_events_kwargs is None:
exp_events_kwargs = {}
exp_events_kwargs.setdefault("color", "k")
exp_events_kwargs.setdefault("marker", "^")
exp_events_kwargs.setdefault("s", 100)
exp_events_kwargs.setdefault("zorder", 2)
backend_kwargs = {
**backend_kwarg_defaults(),
**backend_kwargs,
}
(figsize, *_) = _scale_fig_size(figsize, textsize, 1, 1)
backend_kwargs.setdefault("figsize", figsize)
backend_kwargs["squeeze"] = True
if ax is None:
_, ax = create_axes_grid(1, backend_kwargs=backend_kwargs)
idx = np.argsort(y_hat)
for i, loc in enumerate(locs):
positive = not y[idx][i] == 0
alpha = 1 if positive else 0.3
ax.bar(loc, 1, width=width, alpha=alpha, **plot_kwargs)
if y_hat_line:
ax.plot(np.linspace(0, 1, len(y_hat)), y_hat[idx], label=label_y_hat, **y_hat_line_kwargs)
if expected_events:
expected_events = int(np.round(np.sum(y_hat)))
ax.scatter(
y_hat[idx][len(y_hat) - expected_events - 1],
0,
label="Expected events",
**exp_events_kwargs
)
if legend and (expected_events or y_hat_line):
handles, labels = ax.get_legend_handles_labels()
labels_dict = dict(zip(labels, handles))
ax.legend(labels_dict.values(), labels_dict.keys())
ax.set_xticks([])
ax.set_yticks([])
ax.set_xlim(0, 1)
ax.set_ylim(0, 1)
if backend_show(show):
plt.show()
return ax
| 24.237113 | 98 | 0.630795 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 200 | 0.08507 |
528fa27732c9c401781702d54b0c3389d8dc1cbd | 5,053 | py | Python | mesonwrap/tools/import_from_hosted.py | Calinou/wrapweb | 12ef53e9628aeb5a8db105f3a9f31d8ab4f30af3 | [
"Apache-2.0"
] | null | null | null | mesonwrap/tools/import_from_hosted.py | Calinou/wrapweb | 12ef53e9628aeb5a8db105f3a9f31d8ab4f30af3 | [
"Apache-2.0"
] | null | null | null | mesonwrap/tools/import_from_hosted.py | Calinou/wrapweb | 12ef53e9628aeb5a8db105f3a9f31d8ab4f30af3 | [
"Apache-2.0"
] | null | null | null | import argparse
import git
import github
import os.path
from mesonwrap import gitutils
from mesonwrap import tempfile
from mesonwrap import webapi
from mesonwrap import wrap
from mesonwrap.tools import environment
from retrying import retry
class Importer:
def __init__(self):
self._tmp = None
self._projects = None
@property
def _org(self):
return environment.Github().get_organization('mesonbuild')
def __enter__(self):
self._tmp = tempfile.TemporaryDirectory()
self._projects = dict()
return self
def __exit__(self, exc_type, exc_value, traceback):
self._tmp.__exit__(exc_type, exc_value, traceback)
self._tmp = None
self._projects = None
def _clone(self, project):
if project not in self._projects:
repo = self._org.get_repo(project)
path = os.path.join(self._tmp.name, project)
self._projects[project] = git.Repo.clone_from(repo.clone_url,
to_path=path)
return self._projects[project]
def import_project(self, project):
for version in project.versions.values():
self.import_version(version)
def import_version(self, version):
for revision in version.revisions.values():
self.import_revision(revision)
@staticmethod
def _get_commit(repo, branch, revision):
cur = repo.refs['origin/' + branch].commit
todo = [cur]
while todo:
cur = todo.pop()
rev = gitutils.get_revision(repo, cur)
if rev > revision:
todo.extend(cur.parents)
elif rev == revision:
return cur
else:
raise ValueError('Impossible revision')
@staticmethod
def _is_github_error(exception):
return isinstance(exception, github.GithubException)
@retry(stop_max_attempt_number=3,
retry_on_exception=_is_github_error)
def import_wrap(self, wrap: wrap.Wrap):
wrappath = os.path.join(self._tmp.name, wrap.name + '.wrap')
zippath = os.path.join(self._tmp.name, wrap.name + '.zip')
repo = self._clone(wrap.name)
with open(wrappath, 'wb') as f:
f.write(wrap.wrap)
with open(zippath, 'wb') as f:
f.write(wrap.zip)
commit = self._get_commit(repo, wrap.version, wrap.revision)
ghrepo = self._org.get_repo(wrap.name)
tagname = '{}-{}'.format(wrap.version, wrap.revision)
try:
rel = ghrepo.get_release(tagname)
print('Release found')
except github.GithubException:
tag = ghrepo.create_git_tag(tag=tagname, message=tagname,
type='commit', object=commit.hexsha)
ghrepo.create_git_ref('refs/tags/{}'.format(tag.tag), tag.sha)
rel = ghrepo.create_git_release(tag=tagname, name=tagname,
message=tagname)
print('Release created')
patch_label = 'patch.zip'
wrap_label = 'upstream.wrap'
patch_found = False
wrap_found = False
for a in rel.get_assets():
if a.label == patch_label:
patch_found = True
elif a.label == wrap_label:
wrap_found = True
else:
print('Removing unknown asset {!r} / {!r}'.format(a.label,
a.name))
a.delete_asset()
if not wrap_found:
rel.upload_asset(wrappath, label=wrap_label,
content_type='text/plain')
if not patch_found:
rel.upload_asset(zippath, label=patch_label,
content_type='application/zip')
def import_revision(self, revision):
wrap = revision.combined_wrap
print(wrap.name,
wrap.version,
wrap.revision)
self.import_wrap(wrap)
print('Done')
def main(prog, args):
parser = argparse.ArgumentParser(prog)
parser.add_argument('--wrapdb_url', default='http://wrapdb.mesonbuild.com')
parser.add_argument('--project')
parser.add_argument('--version', help='Does not work without --project')
parser.add_argument('--revision', help='Does not work without --version')
args = parser.parse_args(args)
api = webapi.WebAPI(args.wrapdb_url)
projects = api.projects()
with Importer() as imp:
if args.project:
project = projects[args.project]
if args.version:
version = project.versions[args.version]
if args.revision:
imp.import_revision(version.revisions[args.revision])
else:
imp.import_version(version)
else:
imp.import_project(project)
else:
for project in projects:
imp.import_project(project)
| 35.090278 | 79 | 0.578864 | 3,843 | 0.760538 | 0 | 0 | 2,581 | 0.510786 | 0 | 0 | 365 | 0.072234 |
52905893b96180e3446a9e907db7c405fc9797c3 | 10,843 | py | Python | Cogs/GuideCog.py | BloomAutist47/bloom-bo | e3b298dc7ba27b8e526b18c2750b494b8a66ab3b | [
"CC0-1.0"
] | 1 | 2021-09-07T09:51:16.000Z | 2021-09-07T09:51:16.000Z | Cogs/GuideCog.py | BloomAutist47/bloom-bo | e3b298dc7ba27b8e526b18c2750b494b8a66ab3b | [
"CC0-1.0"
] | null | null | null | Cogs/GuideCog.py | BloomAutist47/bloom-bo | e3b298dc7ba27b8e526b18c2750b494b8a66ab3b | [
"CC0-1.0"
] | 3 | 2021-02-19T20:13:21.000Z | 2022-02-04T03:56:43.000Z | import os
from .Base import *
from discord.ext import commands
class GuideCog(commands.Cog, BaseTools):
def __init__(self, bot):
self.setup()
self.bot = bot
# self.bot.remove_command("help")
self.fotter = "Tip: Use \";g\" to summon a list of all guides.\nType `;bhelp` to summon a list of all commands."
@commands.command()
async def g(self, ctx, guide=""):
if os.name == "nt": # PC Mode
self.file_read("guides")
if guide == "":
embedVar = discord.Embed(title="🔹 List of Guide Commands 🔹", color=BaseProgram.block_color,
description="To summon this list, use `;g`. \n To know all Bloom Bot commands, use `;bhelp`.\n\n")
# embedVar.set_author()
desc = ""
guild_id = str(ctx.guild.id)
if guild_id in BaseProgram.settings["server_settings"]:
if BaseProgram.settings["server_settings"][guild_id]["server_privilage"] == "Homie":
for guide_name in BaseProgram.guides:
guide_data = BaseProgram.guides[guide_name]
if "type" in guide_data:
if guide_data["type"] == "header":
if "tag" not in guide_data:
desc += "\u200b"
embedVar.add_field(name=f"{guide_name}", inline=False, value=desc)
desc = ""
continue
if "title" in guide_data:
desc += "`;g {}` - {}.\n".format(guide_name, guide_data["title"])
if guild_id not in BaseProgram.settings["server_settings"]:
for guide_name in BaseProgram.guides:
if guide_name not in BaseProgram.settings["server_settings"]["Basic"]["banned_guides"]:
guide_data = BaseProgram.guides[guide_name]
if "type" in guide_data:
if guide_data["type"] == "header":
if "tag" not in guide_data:
desc += "\u200b"
embedVar.add_field(name=f"{guide_name}", inline=False, value=desc)
desc = ""
continue
if "title" in guide_data:
desc += "`;g {}` - {}.\n".format(guide_name, guide_data["title"])
await ctx.send(embed=embedVar)
return
g_name = guide.lower()
guide_mode = await self.check_guild_guide(ctx)
if not guide_mode:
if g_name in BaseProgram.settings["server_settings"]["Basic"]["banned_guides"]:
return
if g_name in BaseProgram.guides:
if "common_key" in BaseProgram.guides[g_name]:
key = BaseProgram.guides[g_name]["common_key"]
guide_data = BaseProgram.guides[key]
else:
guide_data = BaseProgram.guides[g_name]
if guide_data["type"] == "header":
return
au_title = BaseProgram.icons[guide_data["auth"]]["title"]
au_icon = BaseProgram.icons[guide_data["auth"]]["icon"]
if guide_data["type"] == "guide":
embedVar = discord.Embed(title="🔹 " + guide_data["title"] + " 🔹", color=BaseProgram.block_color,
description="The following is a short guide of %s. "\
"For the [Full Guide click this](%s)."%(guide_data["title"], guide_data["full_guide"]))
embedVar.set_image(url=guide_data["short_link"])
embedVar.set_thumbnail(url=guide_data["thumbnail"])
embedVar.set_footer(text=self.fotter)
embedVar.set_author(name=au_title, icon_url=au_icon)
await ctx.send(embed=embedVar)
return
if guide_data["type"] == "guide_links":
embedVar = discord.Embed(title="🔹 " + guide_data["title"] + " 🔹", color=BaseProgram.block_color)
desc = guide_data["description"]
for text in guide_data["content"]:
desc += "➣ [{}]({}).\n".format(text[0], text[1])
embedVar.description = desc
embedVar.set_thumbnail(url=guide_data["thumbnail"])
embedVar.set_footer(text=self.fotter)
embedVar.set_author(name=au_title, icon_url=au_icon)
await ctx.send(embed=embedVar)
return
if guide_data["type"] == "text":
embedVar = discord.Embed(title="🔹 " + guide_data["title"] + " 🔹", color=BaseProgram.block_color)
desc = guide_data["description"] + "\n\n"
bullet = ""
if "bullet" in guide_data:
bullet = "%s "%(guide_data["bullet"])
if type(guide_data["content"]) is list:
for sentence in guide_data["content"]:
desc += bullet + sentence + "\n"
else:
desc = guide_data["content"]
embedVar.description = desc
if "thumbnail" in guide_data:
embedVar.set_thumbnail(url=guide_data["thumbnail"])
if "image" in guide_data:
embedVar.set_image(url=guide_data["image"])
embedVar.set_footer(text=self.fotter)
embedVar.set_author(name=au_title, icon_url=au_icon)
await ctx.send(embed=embedVar)
return
if guide_data["type"] == "text_dict":
embedVar = discord.Embed(title="🔹 " + guide_data["title"] + " 🔹", color=BaseProgram.block_color)
desc = guide_data["description"] + "\n\n"
bullet = ""
if "bullet" in guide_data:
bullet = "%s "%(guide_data["bullet"])
for item in guide_data["content"]:
res = ""
for con in guide_data["content"][item]:
res += con + "\n"
embedVar.add_field(name=item, value=res, inline=False)
# if type(guide_data["content"]) is list:
# for sentence in guide_data["content"]:
# desc += bullet + sentence + "\n"
# else:
# desc = guide_data["content"]
embedVar.description = desc
if "thumbnail" in guide_data:
embedVar.set_thumbnail(url=guide_data["thumbnail"])
if "image" in guide_data:
embedVar.set_image(url=guide_data["image"])
embedVar.set_footer(text=self.fotter)
embedVar.set_author(name=au_title, icon_url=au_icon)
await ctx.send(embed=embedVar)
return
if guide_data["type"] == "text-field":
embedVar = discord.Embed(title="🔹 " + guide_data["title"] + " 🔹", color=BaseProgram.block_color,
description=guide_data["description"] + "\n\n")
count = 0
for item in guide_data["content"]:
if count == 1:
embedVar.add_field(name="\u200b", value="\u200b", inline=True)
if count == 2: count = 0
embedVar.add_field(name=item, value=guide_data["content"][item], inline=True)
count += 1
if "thumbnail" in guide_data:
embedVar.set_thumbnail(url=guide_data["thumbnail"])
if "image" in guide_data:
embedVar.set_image(url=guide_data["image"])
embedVar.set_footer(text=self.fotter)
embedVar.set_author(name=au_title, icon_url=au_icon)
await ctx.send(embed=embedVar)
return
if guide_data["type"] == "image":
embedVar = discord.Embed(title="🔹 " + guide_data["title"] + " 🔹", color=BaseProgram.block_color)
embedVar.description = guide_data["description"]
embedVar.set_image(url=guide_data["content"])
embedVar.set_footer(text=self.fotter)
embedVar.set_author(name=au_title, icon_url=au_icon)
await ctx.send(embed=embedVar)
return
if guide_data["type"] == "single_link":
embedVar = discord.Embed(title="🔹 " + guide_data["title"] + " 🔹", color=BaseProgram.block_color)
if type(guide_data["description"]) is list:
desc = ""
for sentence in guide_data["description"]:
desc += sentence + "\n"
else:
desc = guide_data["description"] + "\n"
desc += "➣ [Click this link]({}).".format(guide_data["content"])
embedVar.description = desc
if "thumbnail" in guide_data:
embedVar.set_thumbnail(url=guide_data["thumbnail"])
embedVar.set_footer(text=self.fotter)
embedVar.set_author(name=au_title, icon_url=au_icon)
await ctx.send(embed=embedVar)
return
else:
rec = ""
for guide in BaseProgram.guides:
if "type" in BaseProgram.guides[guide] and BaseProgram.guides[guide]["type"] == "header":
continue
if "common_key" in BaseProgram.guides[guide]:
continue
listed = []
print(f"➣ `;g {guide}` - {BaseProgram.guides[guide]['title']}")
if g_name in guide.lower() and guide not in listed:
rec += f"➣ `;g {guide}` - {BaseProgram.guides[guide]['title']}\n"
listed.append(guide)
if guide.lower() in g_name and guide not in listed:
rec += f"➣ `;g {guide}` - {BaseProgram.guides[guide]['title']}\n"
listed.append(guide)
if rec:
embedVar = discord.Embed(title="Guides", color=BaseProgram.block_color,
description="No specific guide name came up. Maybe one of these?")
embedVar.add_field(name="Suggestions:", value=rec, inline=False)
await ctx.send(embed=embedVar)
return
else:
embedVar = discord.Embed(title="Guides", color=BaseProgram.block_color,
description=f"No guide name came up with your search term `;g {g_name}`.")
await ctx.send(embed=embedVar)
return
| 48.40625 | 120 | 0.507793 | 10,833 | 0.993762 | 0 | 0 | 10,549 | 0.967709 | 10,525 | 0.965508 | 2,061 | 0.189065 |
529075584e7e7f5b22b46ff655d558470509c089 | 83,766 | py | Python | eden/modifier/rna/lib_forgi.py | zaidurrehman/EDeN | 1f29d4c9d458edb2bd62a98e57254d78a1f2093f | [
"MIT"
] | null | null | null | eden/modifier/rna/lib_forgi.py | zaidurrehman/EDeN | 1f29d4c9d458edb2bd62a98e57254d78a1f2093f | [
"MIT"
] | null | null | null | eden/modifier/rna/lib_forgi.py | zaidurrehman/EDeN | 1f29d4c9d458edb2bd62a98e57254d78a1f2093f | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""bulge_graph.py: A graph representation of RNA secondary structure based
on its decomposition into primitive structure types: stems, hairpins,
interior loops, multiloops, etc...
for eden and graphlearn we stripped forgi down to this single file.
forgi: https://github.com/pkerpedjiev/forgi
"""
import sys
import collections as col
import itertools as it
import os
import operator as oper
import contextlib
import random
import shutil
import tempfile as tf
__author__ = "Peter Kerpedjiev"
__copyright__ = "Copyright 2012, 2013, 2014"
__version__ = "0.2"
__maintainer__ = "Peter Kerpedjiev"
__email__ = "pkerp@tbi.univie.ac.at"
bracket_left = "([{<ABCDEFGHIJKLMNOPQRSTUVWXYZ"
bracket_right = ")]}>abcdefghijklmnopqrstuvwxyz"
def gen_random_sequence(l):
'''
Generate a random RNA sequence of length l.
'''
return "".join([random.choice(['A', 'C', 'G', 'U']) for i in range(l)])
@contextlib.contextmanager
def make_temp_directory():
'''
Yanked from:
http://stackoverflow.com/questions/13379742/right-way-to-clean-up-a-temporary-folder-in-python-class
'''
temp_dir = tf.mkdtemp()
yield temp_dir
shutil.rmtree(temp_dir)
def insert_into_stack(stack, i, j):
# print "add", i,j
k = 0
while len(stack[k]) > 0 and stack[k][len(stack[k]) - 1] < j:
k += 1
stack[k].append(j)
return k
def delete_from_stack(stack, j):
# print "del", j
k = 0
while len(stack[k]) == 0 or stack[k][len(stack[k]) - 1] != j:
k += 1
stack[k].pop()
return k
def pairtable_to_dotbracket(pt):
"""
Converts arbitrary pair table array (ViennaRNA format) to structure in dot bracket format.
"""
stack = col.defaultdict(list)
seen = set()
res = ""
for i in range(1, pt[0] + 1):
if pt[i] != 0 and pt[i] in seen:
raise ValueError('Invalid pairtable contains duplicate entries')
seen.add(pt[i])
if pt[i] == 0:
res += '.'
else:
if pt[i] > i: # '(' check if we can stack it...
res += bracket_left[insert_into_stack(stack, i, pt[i])]
else: # ')'
res += bracket_right[delete_from_stack(stack, i)]
return res
def inverse_brackets(bracket):
res = col.defaultdict(int)
for i, a in enumerate(bracket):
res[a] = i
return res
def dotbracket_to_pairtable(struct):
"""
Converts arbitrary structure in dot bracket format to pair table (ViennaRNA format).
"""
pt = [0] * (len(struct) + 1)
pt[0] = len(struct)
stack = col.defaultdict(list)
inverse_bracket_left = inverse_brackets(bracket_left)
inverse_bracket_right = inverse_brackets(bracket_right)
for i, a in enumerate(struct):
i += 1
# print i,a, pt
if a == ".":
pt[i] = 0
else:
if a in inverse_bracket_left:
stack[inverse_bracket_left[a]].append(i)
else:
if len(stack[inverse_bracket_right[a]]) == 0:
raise ValueError('Too many closing brackets!')
j = stack[inverse_bracket_right[a]].pop()
pt[i] = j
pt[j] = i
if len(stack[inverse_bracket_left[a]]) != 0:
raise ValueError('Too many opening brackets!')
return pt
def pairtable_to_tuples(pt):
'''
Convert a pairtable to a list of base pair tuples.
i.e. [4,3,4,1,2] -> [(1,3),(2,4),(3,1),(4,2)]
:param pt: A pairtable
:return: A list paired tuples
'''
pt = iter(pt)
# get rid of the first element which contains the length
# of the sequence. We'll figure it out after the traversal
pt.next()
tuples = []
for i, p in enumerate(pt):
tuples += [(i + 1, p)]
return tuples
def tuples_to_pairtable(pair_tuples, seq_length=None):
'''
Convert a representation of an RNA consisting of a list of tuples
to a pair table:
i.e. [(1,3),(2,4),(3,1),(4,2)] -> [4,3,4,1,2]
:param tuples: A list of pair tuples
:param seq_length: How long is the sequence? Only needs to be passed in when
the unpaired nucleotides aren't passed in as (x,0) tuples.
:return: A pair table
'''
if seq_length is None:
max_bp = max([max(x) for x in pair_tuples])
else:
max_bp = seq_length
pt = [0] * (max_bp + 1)
pt[0] = max_bp
for tup in pair_tuples:
pt[tup[0]] = tup[1]
return pt
def add_bulge(bulges, bulge, context, message):
"""
A wrapper for a simple dictionary addition
Added so that debugging can be made easier
:param bulges:
:param bulge:
:param context:
:param message:
:return:
"""
# bulge = (context, bulge)
bulges[context] = bulges.get(context, []) + [bulge]
return bulges
def any_difference_of_one(stem, bulge):
"""
See if there's any difference of one between the two
ends of the stem [(a,b),(c,d)] and a bulge (e,f)
:param stem: A couple of couples (2 x 2-tuple) indicating the start and end
nucleotides of the stem in the form ((s1, e1), (s2, e2))
:param bulge: A couple (2-tuple) indicating the first and last position
of the bulge.
:return: True if there is an overlap between the stem nucleotides and the
bulge nucleotides. False otherwise
"""
for stem_part in stem:
for part in stem_part:
for bulge_part in bulge:
if abs(bulge_part - part) == 1:
return True
return False
def print_bulges(bulges):
"""
Print the names and definitions of the bulges.
:param bulges: A list of tuples of the form [(s, e)] where s and e are the
numbers of the nucleotides at the start and end of the bulge.
"""
for i in range(len(bulges)):
# print "bulge:", bulge
bulge_str = "define b{} 1".format(i)
bulge = bulges[i]
bulge_str += " {} {}".format(bulge[0] + 1, bulge[1] + 1)
print bulge_str
def condense_stem_pairs(stem_pairs):
"""
Given a list of stem pairs, condense them into stem definitions
I.e. the pairs (0,10),(1,9),(2,8),(3,7) can be condensed into
just the ends of the stem: [(0,10),(3,7)]
:param stem_pairs: A list of tuples containing paired base numbers.
:returns: A list of tuples of tuples of the form [((s1, e1), (s2, e2))]
where s1 and e1 are the nucleotides at one end of the stem
and s2 and e2 are the nucleotides at the other.
"""
stem_pairs.sort()
prev_pair = (-10, -10)
stems = []
start_pair = None
for pair in stem_pairs:
# There's a potential bug here since we don't check the direction
# but hopefully it won't bite us in the ass later
if abs(pair[0] - prev_pair[0]) != 1 or abs(pair[1] - prev_pair[1]) != 1:
if start_pair is not None:
stems += [(start_pair, prev_pair)]
start_pair = pair
prev_pair = pair
if start_pair is not None:
stems += [(start_pair, prev_pair)]
return stems
def print_brackets(brackets):
"""
Print the brackets and a numbering, for debugging purposes
:param brackets: A string with the dotplot passed as input to this script.
"""
numbers = [chr(ord('0') + i % 10) for i in range(len(brackets))]
tens = [chr(ord('0') + i / 10) for i in range(len(brackets))]
print "brackets:\n", brackets, "\n", "".join(tens), "\n", "".join(numbers)
def find_bulges_and_stems(brackets):
"""
Iterate through the structure and enumerate the bulges and the stems that are
present.
The returned stems are of the form [[(s1, s2), (e1,e2)], [(s1,s2),(e1,e2)],...]
where (s1,s2) are the residue numbers of one end of the stem and (e1,e2) are the
residue numbers at the other end of the stem
(see condense_stem_pairs)
The returned bulges are of the form [(s,e), (s,e),...] where s is the start of a bulge
and e is the end of a bulge
:param brackets: A string with the dotplot passed as input to this script.
"""
prev = 'x'
context = 0
bulges = dict()
finished_bulges = []
context_depths = dict()
opens = []
stem_pairs = []
dots_start = 0
context_depths[0] = 0
i = 0
for i in range(len(brackets)):
if brackets[i] == '(':
opens.append(i)
if prev == '(':
context_depths[context] = context_depths.get(context, 0) + 1
continue
else:
context += 1
context_depths[context] = 1
if prev == '.':
dots_end = i - 1
bulges = add_bulge(
bulges, (dots_start, dots_end), context, "4")
if brackets[i] == ')':
if len(opens) == 0:
raise Exception("Unmatched close bracket")
stem_pairs.append((opens.pop(), i))
context_depths[context] -= 1
if context_depths[context] == 0:
if context in bulges:
finished_bulges += bulges[context]
bulges[context] = []
context -= 1
if prev == '.':
dots_end = i - 1
bulges = add_bulge(
bulges, (dots_start, dots_end), context, "2")
if brackets[i] == '.':
if prev == '.':
continue
dots_start = i
prev = brackets[i]
if prev == '.':
dots_end = i
bulges = add_bulge(bulges, (dots_start, dots_end), context, "7")
elif prev == '(':
print >> sys.stderr, "Unmatched bracket at the end"
sys.exit(1)
"""
elif prev == ')':
bulges = add_bulge(bulges, (i+1, i+1), context, "8")
"""
if context in bulges.keys():
finished_bulges += bulges[context]
if len(opens) > 0:
raise Exception("Unmatched open bracket")
stem_pairs.sort()
stems = condense_stem_pairs(stem_pairs)
return finished_bulges, stems
def print_name(filename):
print "name", os.path.splitext(filename)[0]
class BulgeGraph(object):
def __init__(self, bg_file=None, dotbracket_str='', seq=''):
self.seq_length = 0
self.ang_types = None
self.mst = None
self.build_order = None
self.name = "untitled"
self.defines = dict()
self.edges = col.defaultdict(set)
self.longrange = col.defaultdict(set)
self.weights = dict()
# sort the coordinate basis for each stem
self.bases = dict()
self.stem_invs = dict()
self.seq_ids = []
self.name_counter = 0
if dotbracket_str != '':
self.from_dotbracket(dotbracket_str)
self.seq = seq
for i, s in enumerate(seq):
self.seq_ids += [(' ', str(i + 1), ' ')]
# if bg_file is not None:
# self.from_bg_file(bg_file)
# get an internal index for a named vertex
# this applies to both stems and edges
def get_vertex(self, name=None):
"""
Return a new unique vertex name.
"""
if name is None:
name = "x{}".format(self.name_counter)
self.name_counter += 1
return name
def element_length(self, key):
"""
Get the number of residues that are contained within this element.
:param key: The name of the element.
"""
d = self.defines[key]
length = 0
for i in range(0, len(d), 2):
length += d[i + 1] - d[i] + 1
return length
def stem_length(self, key):
"""
Get the length of a particular element. If it's a stem, it's equal to
the number of paired bases. If it's an interior loop, it's equal to the
number of unpaired bases on the strand with less unpaired bases. If
it's a multiloop, then it's the number of unpaired bases.
"""
d = self.defines[key]
if key[0] == 's' or key[0] == 'y':
return (d[1] - d[0]) + 1
elif key[0] == 'f':
return self.get_bulge_dimensions(key)[0]
elif key[0] == 't':
return self.get_bulge_dimensions(key)[1]
elif key[0] == 'h':
return self.get_bulge_dimensions(key)[0]
else:
return min(self.get_bulge_dimensions(key))
def get_single_define_str(self, key):
"""
Get a define string for a single key.
"""
return "define {} {}".format(key, " ".join([str(d) for d in self.defines[key]]))
def get_define_str(self):
"""
Convert the defines into a string.
Format:
define [name] [start_res1] [end_res1] [start_res2] [end_res2]
"""
defines_str = ''
# a method for sorting the defines
def define_sorter(k):
drni = self.define_residue_num_iterator(k, adjacent=True)
return drni.next()
for key in sorted(self.defines.keys(), key=define_sorter):
defines_str += self.get_single_define_str(key)
# defines_str += "define %s %s" % ( key, " ".join([str(d) for d in
# self.defines[key]]))
defines_str += '\n'
return defines_str
def get_length_str(self):
return "length " + str(self.seq_length) + '\n'
def get_connect_str(self):
"""
Get the connections of the bulges in the graph.
Format:
connect [from] [to1] [to2] [to3]
"""
whole_str = ''
for key in self.edges:
if len(self.edges[key]) == 0:
continue
# Our graph will be defined by the stems and the bulges they
# connect to
name = key
if name[0] == 's':
out_str = "connect {}".format(name)
for dest in self.edges[key]:
out_str += " {}".format(dest)
whole_str += out_str
whole_str += '\n'
return whole_str
def get_sequence_str(self):
"""
Return the sequence along with its keyword. I.e.
seq ACGGGCC
"""
if len(self.seq) > 0:
return "seq {}\n".format(self.seq)
else:
return ""
def get_name_str(self):
"""
Return the name of this structure along with its keyword:
name 1y26
"""
return "name {}\n".format(self.name)
def to_bg_string(self):
"""
Output a string representation that can be stored and reloaded.
"""
out_str = ''
out_str += self.get_name_str()
out_str += self.get_length_str()
out_str += self.get_sequence_str()
out_str += self.get_define_str()
out_str += self.get_connect_str()
return out_str
def to_file(self, filename):
with open(filename, 'w') as f:
out_str = self.to_bg_string()
f.write(out_str)
def to_element_string(self):
"""
Create a string similar to dotbracket notation that identifies what
type of element is present at each location.
For example the following dotbracket:
..((..))..
Should yield the following element string:
ffsshhsstt
Indicating that it begins with a fiveprime region, continues with a
stem, has a hairpin after the stem, the stem continues and it is terminated
by a threeprime region.
"""
output_str = [' '] * (self.seq_length + 1)
for d in self.defines.keys():
for resi in self.define_residue_num_iterator(d, adjacent=False):
output_str[resi] = d[0]
return "".join(output_str).strip()
def define_range_iterator(self, node, adjacent=False, seq_ids=False):
"""
Return the ranges of the nucleotides in the define.
In other words, if a define contains the following: [1,2,7,8]
The ranges will be [1,2] and [7,8].
:param adjacent: Use the nucleotides in the neighboring element which
connect to this element as the range starts and ends.
:return: A list of two-element lists
"""
a = iter(self.defines[node])
ranges = it.izip(a, a)
if node[0] == 'i':
# interior loops have to be treated specially because
# they might have a bulge that has no unpaired nucleotides on one
# strand
if adjacent:
conns = self.connections(node)
s1 = self.defines[conns[0]]
s2 = self.defines[conns[1]]
# offset by one, which will be reversed in the yield step
# below
ranges = [[s1[1] + 1, s2[0] - 1], [s2[3] + 1, s1[2] - 1]]
if node[0] == 'm':
if adjacent:
conns = self.connections(node)
s1 = self.get_sides_plus(conns[0], node)[0]
s2 = self.get_sides_plus(conns[1], node)[0]
rnge = sorted([self.defines[conns[0]][s1],
self.defines[conns[1]][s2]])
ranges = [[rnge[0] + 1, rnge[1] - 1]]
for (ds1, ds2) in ranges:
if adjacent:
if ds1 > 1:
ds1 -= 1
if ds2 < self.seq_length:
ds2 += 1
if seq_ids:
# this will cause problems if the nucleotide has insertion
# codes
yield [self.seq_ids[ds1 - 1], self.seq_ids[ds2 - 1]]
else:
yield [ds1, ds2]
def define_residue_num_iterator(self, node, adjacent=False, seq_ids=False):
"""
Iterate over the residue numbers that belong to this node.
:param node: The name of the node
"""
visited = set()
for r in self.define_range_iterator(node, adjacent, seq_ids=False):
for i in range(r[0], r[1] + 1):
if seq_ids:
if self.seq_ids[i - 1] not in visited:
visited.add(self.seq_ids[i - 1])
yield self.seq_ids[i - 1]
else:
if i not in visited:
visited.add(i)
yield i
def iterate_over_seqid_range(self, start_id, end_id):
"""
Iterate over the seq_ids between the start_id and end_id.
"""
i1 = self.seq_ids.index(start_id)
i2 = self.seq_ids.index(end_id)
for i in range(i1, i2 + 1):
yield self.seq_ids[i]
def create_bulge_graph(self, stems, bulges):
"""
Find out which stems connect to which bulges
Stems and bulges which share a nucleotide are considered connected.
:param stems: A list of tuples of tuples of the form [((s1, e1), (s2, e2))]
where s1 and e1 are the nucleotides at one end of the stem
and s2 and e2 are the nucleotides at the other.
:param bulges: A list of tuples of the form [(s, e)] where s and e are the
numbers of the nucleotides at the start and end of the bulge.
"""
for i in range(len(stems)):
stem = stems[i]
for j in range(len(bulges)):
bulge = bulges[j]
if any_difference_of_one(stem, bulge):
self.edges['y{}'.format(i)].add('b{}'.format(j))
self.edges['b{}'.format(j)].add('y{}'.format(i))
def create_stem_graph(self, stems, bulge_counter):
"""
Determine which stems are connected to each other. A stem can be connected to
another stem when there is an interior loop with an unpaired nucleotide on
one side. In this case, a bulge will be created on the other side, but it
will only consist of the two paired bases around where the unpaired base
would be if it existed.
The defines for these bulges will be printed as well as the connection strings
for the stems they are connected to.
:param stems: A list of tuples of tuples of the form [((s1, e1), (s2, e2))]
where s1 and e1 are the nucleotides at one end of the stem
and s2 and e2 are the nucleotides at the other.
:param bulge_counter: The number of bulges that have been encountered so far.
:returns: A dictionary indexed by the number of a stem, containing a set of the
other stems that the index is connected to.
"""
# print "stems:", stems
stem_stems = dict()
for i in range(len(stems)):
for j in range(i + 1, len(stems)):
for k1 in range(2):
# don't fear the for loop
for k2 in range(2):
for l1 in range(2):
for l2 in range(2):
s1 = stems[i][k1][l1]
s2 = stems[j][k2][l2]
if abs(s1 - s2) == 1:
stem_stems_set = stem_stems.get(i, set())
if j not in stem_stems_set:
bn = 'b{}'.format(bulge_counter)
# self.defines[bn] = [min(s1, s2)+1,
# max(s1, s2)+1]
self.defines[bn] = []
self.weights[bn] = 1
self.edges['y{}'.format(i)].add(bn)
self.edges[bn].add('y{}'.format(i))
self.edges['y{}'.format(j)].add(bn)
self.edges[bn].add('y{}'.format(j))
bulge_counter += 1
stem_stems_set.add(j)
stem_stems[i] = stem_stems_set
for d in self.defines.keys():
if d[0] != 'y':
continue
(s1, e1, s2, e2) = self.defines[d]
if abs(s2 - e1) == 1:
bn = 'b{}'.format(bulge_counter)
self.defines[bn] = []
self.weights[bn] = 1
self.edges[bn].add(d)
self.edges[d].add(bn)
bulge_counter += 1
return stem_stems
def remove_vertex(self, v):
"""
Delete a node after merging it with another
:param v: The name of the node
"""
# delete all edges to this node
for key in self.edges[v]:
self.edges[key].remove(v)
for edge in self.edges:
if v in self.edges[edge]:
self.edges[edge].remove(v)
# delete all edges from this node
del self.edges[v]
del self.defines[v]
def reduce_defines(self):
"""
Make defines like this:
define x0 2 124 124 3 4 125 127 5 5
Into this:
define x0 2 3 5 124 127
That is, consolidate contiguous bulge region defines.
"""
for key in self.defines.keys():
if key[0] != 's':
assert (len(self.defines[key]) % 2 == 0)
new_j = 0
while new_j < len(self.defines[key]):
j = new_j
new_j += j + 2
(f1, t1) = (
int(self.defines[key][j]), int(self.defines[key][j + 1]))
# remove bulges of length 0
if f1 == -1 and t1 == -2:
del self.defines[key][j]
del self.defines[key][j]
new_j = 0
continue
# merge contiguous bulge regions
for k in range(j + 2, len(self.defines[key]), 2):
if key[0] == 'y':
# we can have stems with defines like: [1,2,3,4]
# which would imply a non-existant loop at its end
continue
(f2, t2) = (
int(self.defines[key][k]), int(self.defines[key][k + 1]))
if t2 + 1 != f1 and t1 + 1 != f2:
continue
if t2 + 1 == f1:
self.defines[key][j] = str(f2)
self.defines[key][j + 1] = str(t1)
elif t1 + 1 == f2:
self.defines[key][j] = str(f1)
self.defines[key][j + 1] = str(t2)
del self.defines[key][k]
del self.defines[key][k]
new_j = 0
break
def merge_vertices(self, vertices):
"""
This is done when two of the outgoing strands of a stem
go to different bulges
It is assumed that the two ends are on the same sides because
at least one vertex has a weight of 2, implying that it accounts
for all of the edges going out of one side of the stem
:param vertices: A list of vertex names to combine into one.
"""
merge_str = ""
new_vertex = self.get_vertex()
self.weights[new_vertex] = 0
# assert(len(vertices) == 2)
connections = set()
for v in vertices:
merge_str += " {}".format(v)
# what are we gonna merge?
for item in self.edges[v]:
connections.add(item)
# Add the definition of this vertex to the new vertex
# self.merge_defs[new_vertex] = self.merge_defs.get(new_vertex, [])
# + [v]
if v[0] == 's':
self.defines[new_vertex] = self.defines.get(
new_vertex, []) + [self.defines[v][0],
self.defines[v][2]] + [
self.defines[v][1], self.defines[v][3]]
else:
self.defines[new_vertex] = self.defines.get(
new_vertex, []) + self.defines[v]
self.weights[new_vertex] += 1
# remove the old vertex, since it's been replaced by new_vertex
self.remove_vertex(v)
self.reduce_defines()
# self.weights[new_vertex] = 2
for connection in connections:
self.edges[new_vertex].add(connection)
self.edges[connection].add(new_vertex)
return new_vertex
def nucleotides_to_elements(self, nucleotides):
"""
Convert a list of nucleotides to element names.
Remove redundant entries and return a set.
"""
return set([self.get_node_from_residue_num(n) for n in nucleotides])
def find_bulge_loop(self, vertex, max_length=4):
"""
Find a set of nodes that form a loop containing the
given vertex and being no greater than 4 nodes long.
:param vertex: The vertex to start the search from.
:returns: A list of the nodes in the loop.
"""
visited = set()
to_visit = [(key, 1) for key in self.edges[vertex]]
visited.add(vertex)
in_path = [vertex]
while len(to_visit) > 0:
(current, depth) = to_visit.pop()
visited.add(current)
in_path = in_path[:depth]
in_path.append(current)
for key in self.edges[current]:
if key == vertex and depth > 1:
if len(in_path[:depth + 1]) > max_length:
continue
else:
return in_path[:depth + 1]
if key not in visited:
to_visit.append((key, depth + 1))
return []
def add_node(self, name, edges, define, weight=1):
self.defines[name] = define
self.edges[name] = edges
self.weights[name] = weight
for edge in self.edges[name]:
self.edges[edge].add(name)
def dissolve_stem(self, key):
"""
Remove a stem. This means that we need
to reconfigure all of the adjacent elements in such a manner
that they now include the nucleotides that were formerly
in this stem.
"""
st = list(self.stem_bp_iterator(key))
self.remove_base_pairs(st)
def remove_base_pairs(self, to_remove):
"""
Remove all of the base pairs which are in pair_list.
:param to_remove: A list of tuples containing the names of the base pairs.
:return: nothing
"""
pt = self.to_pair_tuples()
nt = []
for p in pt:
to_add = p
for s in to_remove:
if sorted(p) == sorted(s):
to_add = (p[0], 0)
break
nt += [to_add]
self.defines = dict()
# self.edges = dict()
self.from_tuples(nt)
def collapse(self):
"""
If any vertices form a loop, then they are either a bulge region of
a fork region. The bulge (interior loop) regions will be condensed
into one node.
"""
new_vertex = True
while new_vertex:
new_vertex = False
bulges = [k for k in self.defines if k[0] != 'y']
for (b1, b2) in it.combinations(bulges, r=2):
if self.edges[b1] == self.edges[b2] and len(self.edges[b1]) > 1:
connections = self.connections(b1)
all_connections = [sorted(
(self.get_sides_plus(connections[0], b1)[0],
self.get_sides_plus(
connections[0], b2)[0])),
sorted(
(self.get_sides_plus(connections[
1], b1)[0],
self.get_sides_plus(connections[1], b2)[0]))]
if all_connections == [[1, 2], [0, 3]]:
# interior loop
self.merge_vertices([b1, b2])
new_vertex = True
break
def interior_loop_iterator(self):
"""
Iterate over all of the interior loops.
An interior loop can only have two connections: to the two stems which it links.
"""
for key in self.defines.keys():
if key[0] == 'i':
yield key
def relabel_node(self, old_name, new_name):
"""
Change the name of a node.
param old_name: The previous name of the node
param new_name: The new name of the node
"""
# replace the define name
define = self.defines[old_name]
del self.defines[old_name]
self.defines[new_name] = define
# replace the index into the edges array
edge = self.edges[old_name]
del self.edges[old_name]
self.edges[new_name] = edge
# replace the name of any edge that pointed to old_name
for k in self.edges.keys():
new_edges = set()
for e in self.edges[k]:
if e == old_name:
new_edges.add(new_name)
else:
new_edges.add(e)
self.edges[k] = new_edges
def compare_stems(self, b):
"""
A function that can be passed in as the key to a sort.
"""
return (self.defines[b][0], 0)
def compare_bulges(self, b):
connections = self.connections(b)
return (self.defines[connections[0]][0],
self.defines[connections[1]][0])
def compare_hairpins(self, b):
connections = self.connections(b)
return (self.defines[connections[0]][1], sys.maxint)
def relabel_nodes(self):
"""
Change the labels of the nodes to be more indicative of their nature.
s: stem
h: hairpin
i: interior loop
m: multiloop
f: five-prime unpaired
t: three-prime unpaired
"""
stems = []
hairpins = []
interior_loops = []
multiloops = []
fiveprimes = []
threeprimes = []
for d in self.defines.keys():
if d[0] == 'y' or d[0] == 's':
stems += [d]
stems.sort(key=self.compare_stems)
continue
if len(self.defines[d]) == 0 and len(self.edges[d]) == 1:
hairpins += [d]
continue
if len(self.defines[d]) == 0 and len(self.edges[d]) == 2:
multiloops += [d]
continue
if len(self.edges[d]) <= 1 and self.defines[d][0] == 1:
fiveprimes += [d]
continue
if len(self.edges[d]) == 1 and self.defines[d][1] == self.seq_length:
threeprimes += [d]
continue
if (len(self.edges[d]) == 1 and
self.defines[d][0] != 1 and
self.defines[d][1] != self.seq_length):
hairpins += [d]
hairpins.sort(key=self.compare_hairpins)
continue
if d[0] == 'm' or (d[0] != 'i' and len(self.edges[d]) == 2 and
self.weights[d] == 1 and
self.defines[d][0] != 1 and
self.defines[d][1] != self.seq_length):
multiloops += [d]
multiloops.sort(key=self.compare_bulges)
continue
if d[0] == 'i' or self.weights[d] == 2:
interior_loops += [d]
interior_loops.sort(key=self.compare_stems)
for d in fiveprimes:
self.relabel_node(d, 'f1')
for d in threeprimes:
self.relabel_node(d, 't1')
for i, d in enumerate(stems):
self.relabel_node(d, 's%d' % (i))
for i, d in enumerate(interior_loops):
self.relabel_node(d, 'i%d' % (i))
for i, d in enumerate(multiloops):
self.relabel_node(d, 'm%d' % (i))
for i, d in enumerate(hairpins):
self.relabel_node(d, 'h%d' % (i))
def has_connection(self, v1, v2):
""" Is there an edge between these two nodes """
if v2 in self.edges[v1]:
return True
else:
# two multiloops can be connected at the end of a stem
for e in self.edges[v1]:
if e[0] != 's':
continue
if v2 in self.edges[e]:
(s1b, s1e) = self.get_sides(e, v1)
(s2b, s2e) = self.get_sides(e, v2)
if s1b == s2b:
return True
return False
def connection_type(self, define, connections):
"""
Classify the way that two stems are connected according to the type
of bulge that separates them.
Potential angle types for single stranded segments, and the ends of
the stems they connect:
1 2 (1, 1) #pseudoknot
1 0 (1, 0)
3 2 (0, 1)
3 0 (0, 0)
:param define: The name of the bulge separating the two stems
:param connections: The two stems and their separation
"""
if define[0] == 'i':
# interior loop, we just have to check if
# connections[0] < connections[1]
if self.defines[connections[0]][0] < self.defines[connections[1]][0]:
return 1
else:
return -1
elif define[0] == 'm':
(s1c, b1c) = self.get_sides_plus(connections[0], define)
(s2c, b2c) = self.get_sides_plus(connections[1], define)
if (s1c, s2c) == (1, 0):
return 2
elif (s1c, s2c) == (0, 1):
return -2
elif (s1c, s2c) == (3, 0):
return 3
elif (s1c, s2c) == (0, 3):
return -3
elif (s1c, s2c) == (2, 3):
return 4
elif (s1c, s2c) == (3, 2):
return -4
# the next two refer to pseudoknots
elif (s1c, s2c) == (2, 1):
return 5
elif (s1c, s2c) == (1, 2):
return -5
else:
raise Exception("Weird angle type: (s1c, s2c) = (%d, %d)" %
(s1c, s2c))
else:
raise Exception(
"connection_type called on non-interior loop/multiloop")
def connection_ends(self, connection_type):
"""
Find out which ends of the stems are connected by a particular angle
type.
:param connection_type: The angle type, as determined by which corners
of a stem are connected
:return: (s1e, s2b)
"""
ends = ()
if abs(connection_type) == 1:
ends = (1, 0)
elif abs(connection_type) == 2:
ends = (1, 0)
elif abs(connection_type) == 3:
ends = (0, 0)
elif abs(connection_type) == 4:
ends = (1, 0)
elif abs(connection_type) == 5:
ends = (1, 1)
else:
raise Exception('Unknown connection type: %d' % (connection_type))
if connection_type < 0:
return ends[::-1]
else:
return ends
def get_multiloop_nucleotides(self, multiloop_loop):
"""
Return a list of nucleotides which make up a particular
multiloop.
:param multiloop_loop: The elements which make up this multiloop
:return: A list of nucleotides
"""
stems = [d for d in multiloop_loop if d[0] == 's']
multis = [d for d in multiloop_loop if d[0] == 'm']
residues = []
for s in stems:
relevant_edges = [c for c in self.edges[s] if c in multiloop_loop]
sides = [self.get_sides_plus(s, c)[0] for c in relevant_edges]
sides.sort()
# the whole stem is part of this multiloop
if sides == [2, 3] or sides == [0, 1]:
residues += range(
self.defines[s][sides[0]], self.defines[s][sides[1]] + 1)
else:
residues += [
self.defines[s][sides[0]], self.defines[s][sides[1]]]
for m in multis:
residues += self.define_residue_num_iterator(m, adjacent=False)
return residues
def find_external_loops(self):
'''
Return all of the elements which are part of
an external loop.
:return: A list containing the external loops in this molecule
(i.e. ['f0, m3, m5, t0'])
'''
ext_loop = []
for d in it.chain(self.floop_iterator(),
self.tloop_iterator(),
self.mloop_iterator()):
loop_nts = self.shortest_bg_loop(d)
if len(loop_nts) == 0:
ext_loop += [d]
return ext_loop
def find_multiloop_loops(self):
"""
Find out which defines are connected in a multiloop.
:return: Two lists, one containing the sets of nucleotides comprising the shortest loops
and the other containing sets of nucleotides comprising the shortest loops.
"""
loops = set()
for d in self.mloop_iterator():
loop_nts = self.shortest_bg_loop(d)
if len(loop_nts) > 0:
loops.add(tuple(sorted(loop_nts)))
loops = list(loops)
loop_elems = []
for loop in loops:
all_loops = set([self.get_node_from_residue_num(n) for n in loop])
# some multiloops might not contain any nucleotides, so we
# have to explicitly add these
for a, b in it.combinations(all_loops, r=2):
common_edges = set.intersection(self.edges[a], self.edges[b])
for e in common_edges:
all_loops.add(e)
loop_elems += [all_loops]
return loop_elems, loops
def seq_ids_from_seq(self):
"""
Get the sequence ids of the string.
"""
self.seq_ids = []
# when provided with just a sequence, we presume that the
# residue ids are numbered from 1-up
for i, s in enumerate(self.seq):
self.seq_ids += [(' ', i + 1, ' ')]
def remove_degenerate_nodes(self):
"""
For now just remove all hairpins that have no length.
"""
to_remove = []
for d in self.defines:
if d[0] == 'h' and len(self.defines[d]) == 0:
to_remove += [d]
for r in to_remove:
self.remove_vertex(r)
def from_stems_and_bulges(self, stems, bulges):
"""
Create the graph from the list of stems and bulges.
:param stems: A list of tuples of two two-tuples, each containing the start
and end nucleotides of each strand of the stem.
:param bulges: A list of tuples containing the starts and ends of the
of the bulge regions.
:return: Nothing, just make the bulgegraph
"""
for i in range(len(stems)):
# one is added to each coordinate to make up for the fact that
# residues are 1-based
ss1 = stems[i][0][0] + 1
ss2 = stems[i][0][1] + 1
se1 = stems[i][1][0] + 1
se2 = stems[i][1][1] + 1
self.defines['y%d' % (i)] = [min(ss1, se1), max(ss1, se1),
min(ss2, se2), max(ss2, se2)]
self.weights['y%d' % (i)] = 1
for i in range(len(bulges)):
bulge = bulges[i]
self.defines['b%d' % (i)] = sorted([bulge[0] + 1, bulge[1] + 1])
self.weights['b%d' % (i)] = 1
self.create_bulge_graph(stems, bulges)
self.create_stem_graph(stems, len(bulges))
self.collapse()
self.relabel_nodes()
self.remove_degenerate_nodes()
self.sort_defines()
def dissolve_length_one_stems(self):
# dissolve all stems which have a length of one
repeat = True
while repeat:
repeat = False
for k in self.defines:
if k[0] == 's' and self.stem_length(k) == 1:
self.dissolve_stem(k)
repeat = True
break
def from_dotbracket(self, dotbracket_str, dissolve_length_one_stems=False):
"""
Populate the BulgeGraph structure from a dotbracket representation.
ie: ..((..))..
:param dotbracket_str: A string containing the dotbracket representation
of the structure
"""
self.__init__()
self.dotbracket_str = dotbracket_str
self.seq_length = len(dotbracket_str)
if len(dotbracket_str) == 0:
return
pt = dotbracket_to_pairtable(dotbracket_str)
tuples = pairtable_to_tuples(pt)
self.from_tuples(tuples)
if dissolve_length_one_stems:
self.dissolve_length_one_stems()
def to_pair_table(self):
"""
Create a pair table from the list of elements.
The first element in the returned list indicates the number of
nucleotides in the structure.
i.e. [5,5,4,0,2,1]
"""
pair_tuples = self.to_pair_tuples()
return tuples_to_pairtable(pair_tuples)
def to_pair_tuples(self):
"""
Create a list of tuples corresponding to all of the base pairs in the
structure. Unpaired bases will be shown as being paired with a
nucleotide numbered 0.
i.e. [(1,5),(2,4),(3,0),(4,2),(5,1)]
"""
# iterate over each element
table = []
for d in self.defines:
# iterate over each nucleotide in each element
for b in self.define_residue_num_iterator(d):
p = self.pairing_partner(b)
if p is None:
p = 0
table += [(b, p)]
return table
def to_bpseq_string(self):
"""
Create a bpseq string from this structure.
"""
out_str = ''
for i in range(1, self.seq_length + 1):
pp = self.pairing_partner(i)
if pp is None:
pp = 0
out_str += "{} {} {}\n".format(i, self.seq[i - 1], pp)
return out_str
def bpseq_to_tuples_and_seq(self, bpseq_str):
"""
Convert a bpseq string to a list of pair tuples and a sequence
dictionary. The return value is a tuple of the list of pair tuples
and a sequence string.
:param bpseq_str: The bpseq string
:return: ([(1,5),(2,4),(3,0),(4,2),(5,1)], 'ACCAA')
"""
lines = bpseq_str.split('\n')
seq = []
tuples = []
for line in lines:
parts = line.split()
if len(parts) == 0:
continue
(t1, s, t2) = (int(parts[0]), parts[1], int(parts[2]))
tuples += [(t1, t2)]
seq += [s]
seq = "".join(seq).upper().replace('T', 'U')
return (tuples, seq)
def from_tuples(self, tuples):
"""
Create a bulge_graph from a list of pair tuples. Unpaired
nucleotides have a pairing partner of 0.
"""
stems = []
bulges = []
tuples.sort()
tuples = iter(tuples)
(t1, t2) = tuples.next()
prev_from = t1
prev_to = t2
start_from = prev_from
start_to = prev_to
last_paired = prev_from
for t1, t2 in tuples:
(from_bp, to_bp) = (t1, t2)
if abs(to_bp - prev_to) == 1 and prev_to != 0:
# stem
if (((prev_to - prev_from > 0 and to_bp - from_bp > 0) or
(prev_to - prev_from < 0 and to_bp - from_bp < 0)) and
(to_bp - prev_to) == -(from_bp - prev_from)):
(prev_from, prev_to) = (from_bp, to_bp)
last_paired = from_bp
continue
if to_bp == 0 and prev_to == 0:
# bulge
(prev_from, prev_to) = (from_bp, to_bp)
continue
else:
if prev_to != 0:
new_stem = tuple(
sorted([tuple(sorted([start_from - 1, start_to - 1])),
tuple(sorted([prev_from - 1, prev_to - 1]))]))
if new_stem not in stems:
stems += [new_stem]
last_paired = from_bp
start_from = from_bp
start_to = to_bp
else:
new_bulge = ((last_paired - 1, prev_from - 1))
bulges += [new_bulge]
start_from = from_bp
start_to = to_bp
prev_from = from_bp
prev_to = to_bp
# Take care of the last element
if prev_to != 0:
new_stem = tuple(
sorted([tuple(sorted([start_from - 1, start_to - 1])),
tuple(sorted([prev_from - 1, prev_to - 1]))]))
if new_stem not in stems:
stems += [new_stem]
if prev_to == 0:
new_bulge = ((last_paired - 1, prev_from - 1))
bulges += [new_bulge]
self.from_stems_and_bulges(stems, bulges)
def sort_defines(self):
"""
Sort the defines of interior loops and stems so that the 5' region
is always first.
"""
for k in self.defines.keys():
d = self.defines[k]
if len(d) == 4:
if d[0] > d[2]:
new_d = [d[2], d[3], d[0], d[1]]
self.defines[k] = new_d
def to_dotbracket_string(self):
"""
Convert the BulgeGraph representation to a dot-bracket string
and return it.
:return: A dot-bracket representation of this BulgeGraph
"""
pt = self.to_pair_table()
return pairtable_to_dotbracket(pt)
def sorted_stem_iterator(self):
"""
Iterate over a list of the stems sorted by the lowest numbered
nucleotide in each stem.
"""
stems = [d for d in self.defines if d[0] == 's']
stems.sort(key=lambda s: self.defines[s][0])
for s in stems:
yield s
def is_single_stranded(self, node):
"""
Does this node represent a single-stranded region?
Single stranded regions are five-prime and three-prime unpaired
regions, multiloops, and hairpins
:param node: The name of the node
:return: True if yes, False if no
"""
if node[0] == 'f' or node[0] == 't' or node[0] == 'm' or node[0] == 'h':
return True
else:
return False
def get_node_dimensions(self, node):
"""
Return the dimensions of a node.
If the node is a stem, then the dimensions will be l where l is
the length of the stem.
Otherwise, see get_bulge_dimensions(node)
:param node: The name of the node
:return: A pair containing its dimensions
"""
if node[0] == 's':
return (self.stem_length(node), self.stem_length(node))
"""
return (self.defines[node][1] - self.defines[node][0] + 1,
self.defines[node][1] - self.defines[node][0] + 1)
"""
else:
return self.get_bulge_dimensions(node)
def adjacent_stem_pairs_iterator(self):
"""
Iterate over all pairs of stems which are separated by some element.
This will always yield triples of the form (s1, e1, s2) where s1 and
s2 are the stem identifiers and e1 denotes the element that separates
them.
"""
for d in self.defines.keys():
if len(self.edges[d]) == 2:
edges = list(self.edges[d])
if edges[0][0] == 's' and edges[1][0] == 's':
yield (edges[0], d, edges[1])
def stem_bp_iterator(self, stem):
"""
Iterate over all the base pairs in the stem.
"""
d = self.defines[stem]
stem_length = self.stem_length(stem)
for i in range(stem_length):
yield (d[0] + i, d[3] - i)
def get_connected_residues(self, s1, s2):
"""
Get the nucleotides which are connected by the element separating
s1 and s2. They should be adjacent stems.
The connected nucleotides are those which are spanned by a single
interior loop or multiloop. In the case of an interior loop, this
function will return a list of two tuples and in the case of multiloops
if it will be a list of one tuple.
If the two stems are not separated by a single element, then return
an empty list.
"""
# sort the stems according to the number of their first nucleotide
stems = [s1, s2]
stems.sort(key=lambda x: self.defines[x][0])
c1 = self.edges[s1]
c2 = self.edges[s2]
# find out which edges they share
common_edges = c1.intersection(c2)
if len(common_edges) == 0:
# not connected
return []
if len(common_edges) > 1:
raise Exception("Too many connections between the stems")
# the element linking the two stems
conn = list(common_edges)[0]
# find out the sides of the stems that face the bulge
(s1b, s1e) = self.get_sides(s1, conn)
(s2b, s2e) = self.get_sides(s2, conn)
# get the nucleotides on the side facing the stem
s1_nucleotides = self.get_side_nucleotides(s1, s1b)
s2_nucleotides = self.get_side_nucleotides(s2, s2b)
# find out the distances between all the nucleotides flanking
# the bulge
dists = []
for n1 in s1_nucleotides:
for n2 in s2_nucleotides:
dists += [(abs(n2 - n1), n1, n2)]
dists.sort()
# return the ones which are closest to each other
if conn[0] == 'i':
return sorted([sorted(dists[0][1:]), sorted(dists[1][1:])])
else:
return sorted([sorted(dists[0][1:])])
def get_side_nucleotides(self, stem, side):
"""
Get the nucleotide numbers on the given side of
them stem. Side 0 corresponds to the 5' end of the
stem whereas as side 1 corresponds to the 3' side
of the stem.
:param stem: The name of the stem
:param side: Either 0 or 1, indicating the 5' or 3' end of the stem
:return: A tuple of the nucleotide numbers on the given side of
the stem.
"""
if side == 0:
return (self.defines[stem][0], self.defines[stem][3])
elif side == 1:
return (self.defines[stem][1], self.defines[stem][2])
raise Exception("Invalid side (%d) for the stem (%s)." % (stem, side))
def get_any_sides(self, e1, e2):
"""
Get the side of e1 that e2 is on. The only difference from the get_sides
method is the fact that e1 does not have to be a stem.
0 indicates that e2 is on the side with lower numbered
nucleotides and 1 indicates that e2 is on the side with
greater nucleotide numbers.
:param e1: The name of the first element.
:param e2: The name of the second element.
:return: A tuple indicating the side of e1 adjacent to e2 and the side of e2
adjacent to e1
"""
if e1[0] == 's':
return self.get_sides(e1, e2)
elif e2[0] == 's':
return self.get_sides(e2, e1)[::-1]
return None
def get_sides(self, s1, b):
"""
Get the side of s1 that is next to b.
s1e -> s1b -> b
:param s1: The stem.
:param b: The bulge.
:return: A tuple indicating which side is the one next to the bulge
and which is away from the bulge.
"""
s1d = self.defines[s1]
bd = self.defines[b]
# if the bulge is a length 0, multiloop then use the adjacent
# stem to determine its side
if len(bd) == 0:
edges = self.edges[b]
for e in edges:
if e != s1:
bd = self.defines[e]
break
for i in xrange(4):
for k in xrange(len(bd)):
if s1d[i] - bd[k] == 1:
if i == 0:
s1b = 0
break
if i == 2:
s1b = 1
break
elif s1d[i] - bd[k] == -1:
if i == 1:
s1b = 1
break
if i == 3:
s1b = 0
break
if s1b == 0:
s1e = 1
else:
s1e = 0
return (s1b, s1e)
def get_sides_plus(self, s1, b):
"""
Get the side of s1 that is next to b.
s1e -> s1b -> b
:param s1: The stem.
:param b: The bulge.
:return: A tuple indicating the corner of the stem that connects
to the bulge as well as the corner of the bulge that connects
to the stem.
"""
s1d = self.defines[s1]
bd = self.defines[b]
if len(bd) == 0:
edges = self.edges[b]
for e in edges:
if e != s1:
bd = self.defines[e]
break
for k in xrange(len(bd)):
# before the stem on the 5' strand
if s1d[0] - bd[k] == 1:
return (0, k)
# after the stem on the 5' strand
elif bd[k] - s1d[1] == 1:
return (1, k)
# before the stem on the 3' strand
elif s1d[2] - bd[k] == 1:
return (2, k)
# after the stem on the 3' strand
elif bd[k] - s1d[3] == 1:
return (3, k)
raise Exception("Faulty multiloop %s connecting %s"
% (" ".join(map(str, bd)),
" ".join(map(str, s1d))))
def stem_side_vres_to_resn(self, stem, side, vres):
"""
Return the residue number given the stem name, the strand (side) it's on
and the virtual residue number.
"""
d = self.defines[stem]
if side == 0:
return d[0] + vres
else:
return d[3] - vres
def stem_iterator(self):
"""
Iterator over all of the stems in the structure.
"""
for d in self.defines.keys():
if d[0] == 's':
yield d
def hloop_iterator(self):
"""
Iterator over all of the hairpin in the structure.
"""
for d in self.defines.keys():
if d[0] == 'h':
yield d
def mloop_iterator(self):
"""
Iterator over all of the multiloops in the structure.
"""
for d in self.defines.keys():
if d[0] == 'm':
yield d
def iloop_iterator(self):
"""
Iterator over all of the interior loops in the structure.
"""
for d in self.defines.keys():
if d[0] == 'i':
yield d
def floop_iterator(self):
"""
Yield the name of the 5' prime unpaired region if it is
present in the structure.
"""
if 'f1' in self.defines.keys():
yield 'f1'
def tloop_iterator(self):
"""
Yield the name of the 3' prime unpaired region if it is
present in the structure.
"""
if 't1' in self.defines.keys():
yield 't1'
def pairing_partner(self, nucleotide_number):
"""
Return the base pairing partner of the nucleotide at position
nucleotide_number. If this nucleotide is unpaired, return None.
:param nucleotide_number: The position of the query nucleotide in the
sequence.
:return: The number of the nucleotide base paired with the one at
position nucleotide_number.
"""
for d in self.stem_iterator():
for (r1, r2) in self.stem_bp_iterator(d):
if r1 == nucleotide_number:
return r2
elif r2 == nucleotide_number:
return r1
return None
def connections(self, bulge):
"""
Return the edges that connect to a bulge in a list form,
sorted by lowest res number of the connection.
"""
def sort_key(x):
if len(self.defines[x]) > 0:
if self.defines[x][0] == 1:
# special case for stems at the beginning since there is no
# adjacent nucleotide 0
return 0
return list(self.define_residue_num_iterator(x, adjacent=True))[0]
connections = list(self.edges[bulge])
connections.sort(key=sort_key)
return connections
def get_define_seq_str(self, d, adjacent=False):
"""
Get an array containing the sequences for the given define.
Non-stem sequences will contain the sequence without the overlapping
stem residues that are part of the define.
:param d: The define for which to get the sequences
:return: An array containing the sequences corresponding to the defines
"""
define = self.defines[d]
ranges = zip(*[iter(define)] * 2)
c = self.connections(d)
if d[0] == 'i':
s1 = self.defines[c[0]]
s2 = self.defines[c[1]]
if adjacent:
return [self.seq[s1[1] - 1:s2[0]],
self.seq[s2[3] - 1:s1[2]]]
else:
return [self.seq[s1[1]:s2[0] - 1],
self.seq[s2[3]:s1[2] - 1]]
if d[0] == 'm':
s1 = self.defines[c[0]]
s2 = self.defines[c[1]]
i1 = s1[self.get_sides_plus(c[0], d)[0]]
i2 = s2[self.get_sides_plus(c[1], d)[0]]
(i1, i2) = (min(i1, i2), max(i1, i2))
if adjacent:
return [self.seq[i1 - 1:i2]]
else:
return [self.seq[i1:i2 - 1]]
else:
seqs = []
for r in ranges:
if d[0] == 's':
seqs += [self.seq[r[0] - 1:r[1]]]
else:
if adjacent:
if r[0] > 1:
seqs += [self.seq[r[0] - 2:r[1] + 1]]
else:
seqs += [self.seq[r[0] - 1:r[1] + 1]]
else:
seqs += [self.seq[r[0] - 1:r[1]]]
return seqs
def get_stem_direction(self, s1, s2):
"""
Return 0 if the lowest numbered residue in s1
is lower than the lowest numbered residue in s2.
"""
if self.defines[s1][0] < self.defines[s2][0]:
return 0
return 1
def get_multiloop_side(self, m):
"""
Find out which strand a multiloop is on. An example of a situation in
which the loop can be on both sides can be seen in the three-stemmed
structure below:
(.().().)
In this case, the first multiloop section comes off of the 5' strand of
the first stem (the prior stem is always the one with a lower numbered
first residue). The second multiloop section comess of the 3' strand of
the second stem and the third loop comes off the 3' strand of the third
stem.
"""
c = self.connections(m)
p1 = self.get_sides_plus(c[0], m)
p2 = self.get_sides_plus(c[1], m)
return (p1[0], p2[0])
def get_strand(self, multiloop):
"""
Get the strand on which this multiloop is located.
:param multiloop: The name of the multiloop
:return: 0 for being on the lower numbered strand and 1 for
being on the higher numbered strand.
"""
conn = self.connections(multiloop)
t = self.connection_type(multiloop, conn)
if abs(t) == 2:
return 1
elif abs(t) == 3:
return 0
else:
return 2
pass
def get_bulge_dimensions(self, bulge):
"""
Return the dimensions of the bulge.
If it is single stranded it will be (0, x). Otherwise it will be (x, y).
:param bulge: The name of the bulge.
:return: A pair containing its dimensions
"""
bd = self.defines[bulge]
c = self.connections(bulge)
if bulge[0] == 'i':
# if this interior loop only has one unpaired region
# then we have to find out if it's on the 5' strand or
# the 3' strand
# Example:
# s1 1 3
# 23 25
# s2 5 10
# 15 20
s1 = self.defines[c[0]]
s2 = self.defines[c[1]]
dims = (s2[0] - s1[1] - 1, s1[2] - s2[3] - 1)
if bulge[0] == 'm':
# Multiloops are also pretty easy
if len(bd) == 2:
dims = (bd[1] - bd[0] + 1, 1000)
else:
dims = (0, 1000)
if bulge[0] == 'f' or bulge[0] == 't':
dims = (bd[1] - bd[0] + 1, -1)
if bulge[0] == 'h':
dims = (bd[1] - bd[0] + 1, -1)
return dims
def get_node_from_residue_num(self, base_num, seq_id=False):
"""
Iterate over the defines and see which one encompasses this base.
"""
for key in self.defines.keys():
define = self.defines[key]
for i in range(0, len(define), 2):
a = [int(define[i]), int(define[i + 1])]
a.sort()
if seq_id:
for i in range(a[0], a[1] + 1):
if self.seq_ids[i - 1][1] == base_num:
return key
else:
if base_num >= a[0] and base_num <= a[1]:
return key
raise Exception(
"Base number %d not found in the defines." % (base_num))
def get_length(self, vertex):
"""
Get the minimum length of a vertex.
If it's a stem, then the result is its length (in base pairs).
If it's a bulge, then the length is the smaller of it's dimensions.
:param vertex: The name of the vertex.
"""
if vertex[0] == 's':
return abs(self.defines[vertex][1] - self.defines[vertex][0]) + 1
else:
if len(self.edges[vertex]) == 1:
return self.defines[vertex][1] - self.defines[vertex][0] + 1
else:
dims = list(self.get_bulge_dimensions(vertex))
dims.sort()
if vertex[0] == 'i':
return sum(dims) / float(len(dims))
else:
return min(dims)
def get_flanking_region(self, bulge_name, side=0):
"""
If a bulge is flanked by stems, return the lowest residue number
of the previous stem and the highest residue number of the next
stem.
:param bulge_name: The name of the bulge
:param side: The side of the bulge (indicating the strand)
"""
c = self.connections(bulge_name)
if bulge_name[0] == 'h':
s1 = self.defines[c[0]]
return (s1[0], s1[3])
s1 = self.defines[c[0]]
s2 = self.defines[c[1]]
if bulge_name[0] == 'i':
# interior loop
if side == 0:
return (s1[0], s2[1])
else:
return (s2[2], s1[3])
elif bulge_name[0] == 'm':
ss = self.get_multiloop_side(bulge_name)
st = [s1, s2]
ends = []
# go through the two sides and stems and pick
# the other end of the same strand
for i, s in enumerate(ss):
if s == 0:
ends += [st[i][1]]
elif s == 1:
ends += [st[i][0]]
elif s == 2:
ends += [st[i][3]]
elif s == 3:
ends += [st[i][2]]
else:
raise Exception("Weird multiloop sides: %s" %
bulge_name)
ends.sort()
return tuple(ends)
# multiloop
return (None, None)
def get_flanking_sequence(self, bulge_name, side=0):
if len(self.seq) == 0:
raise Exception(
"No sequence present in the bulge_graph: %s" % (self.name))
(m1, m2) = self.get_flanking_region(bulge_name, side)
return self.seq[m1 - 1:m2]
def get_flanking_handles(self, bulge_name, side=0):
"""
Get the indices of the residues for fitting bulge regions.
So if there is a loop like so (between residues 7 and 16):
(((...))))
7890123456
^ ^
Then residues 9 and 13 will be used as the handles against which
to align the fitted region.
In the fitted region, the residues (2,6) will be the ones that will
be aligned to the handles.
:return: (orig_chain_res1, orig_chain_res1, flanking_res1, flanking_res2)
"""
f1 = self.get_flanking_region(bulge_name, side)
c = self.connections(bulge_name)
if bulge_name[0] == 'h':
s1 = self.defines[c[0]]
ab = [s1[1], s1[2]]
return (ab[0], ab[1], ab[0] - f1[0], ab[1] - f1[0])
s1 = self.defines[c[0]]
s2 = self.defines[c[1]]
if bulge_name[0] == 'm':
sides = self.get_multiloop_side(bulge_name)
ab = [s1[sides[0]], s2[sides[1]]]
ab.sort()
return (ab[0], ab[1], ab[0] - f1[0], ab[1] - f1[0])
if bulge_name[0] == 'i':
if side == 0:
ab = [s1[1], s2[0]]
else:
ab = [s2[3], s1[2]]
return (ab[0], ab[1], ab[0] - f1[0], ab[1] - f1[0])
# probably still have to include the 5' and 3' regions, but that
# will come a little later
return None
def are_adjacent_stems(self, s1, s2, multiloops_count=True):
"""
Are two stems separated by only one element. If multiloops should not
count as edges, then the appropriate parameter should be set.
:param s1: The name of the first stem
:param s2: The name of the second stem
:param multiloops_count: Whether to count multiloops as an edge linking
two stems
"""
for e in self.edges[s1]:
if not multiloops_count and e[0] == 'm':
continue
if s2 in self.edges[e]:
return True
return False
def random_subgraph(self, subgraph_length=None):
"""
Return a random subgraph of this graph.
:return: A list containing a the nodes comprising a random subgraph
"""
if subgraph_length is None:
subgraph_length = random.randint(1, len(self.defines.keys()))
start_node = random.choice(self.defines.keys())
curr_length = 0
visited = set()
next_nodes = [start_node]
new_graph = []
while curr_length < subgraph_length:
curr_node = random.choice(next_nodes)
if curr_node[0] == 'i' or curr_node[0] == 'm':
# if it's an interior loop or a multiloop, then we have to
# add the adjacent stems
for e in self.edges[curr_node]:
if e in new_graph:
continue
visited.add(e)
new_graph += [e]
next_nodes += list(self.edges[e])
curr_length += 1
visited.add(curr_node)
next_nodes += list(self.edges[curr_node])
next_nodes = [n for n in next_nodes if n not in visited]
new_graph += [curr_node]
curr_length += 1 # self.element_length(curr_node)
return new_graph
def same_stem_end(self, sd):
"""
Return the index of the define that is on the same end of the
stem as the index sd.
:param sd: An index into a define.
:return: The index pointing to the nucleotide on the other strand
on the same side as the stem.
"""
if sd == 0:
return 3
elif sd == 1:
return 2
elif sd == 2:
return 1
else:
return 0
def get_resseqs(self, define, seq_ids=True):
"""
Return the pdb ids of the nucleotides in this define.
:param define: The name of this element.
:param: Return a tuple of two arrays containing the residue ids
on each strand
"""
resnames = []
ranges = zip(*[iter(self.defines[define])] * 2)
for r in ranges:
strand_resnames = []
for x in range(r[0], r[1] + 1):
if seq_ids:
strand_resnames += [self.seq_ids[x - 1]]
else:
strand_resnames += [x]
resnames += [strand_resnames]
return resnames
def connected_stem_iterator(self):
"""
Iterate over all pairs of connected stems.
"""
for l in it.chain(self.mloop_iterator(), self.iloop_iterator()):
edge_list = list(self.edges[l])
yield (edge_list[0], l, edge_list[1])
def get_mst(self):
"""
Create a minimum spanning tree from this BulgeGraph. This is useful
for constructing a structure where each section of a multiloop is
sampled independently and we want to introduce a break at the largest
multiloop section.
"""
priority = {'s': 1, 'i': 2, 'm': 3, 'f': 4, 't': 5}
# keep track of all linked nodes
edges = sorted(it.chain(self.mloop_iterator(),
self.iloop_iterator()),
key=lambda x: (priority[x[0]], min(self.get_node_dimensions(x))))
mst = set(it.chain(self.stem_iterator(),
self.floop_iterator(),
self.tloop_iterator()))
# store all of the disconnected trees
forest = [set([m]) for m in mst]
# get the tree containing a particular element
def get_tree(elem):
for t in forest:
if elem in t:
return t
while len(edges) > 0:
conn = edges.pop(0)
neighbors = list(self.edges[conn])
# get the trees containing the neighbors of this node
# the node should be an interior loop or multiloop so
# the neighbors should necessarily be stems, 5' or 3'
t1 = get_tree(neighbors[0])
t2 = get_tree(neighbors[1])
if len(set.intersection(t1, t2)) == 0:
# if this node connects two disparate trees, then add it to the
# mst
new_tree = t1.union(t2)
forest.remove(t1)
forest.remove(t2)
forest.append(new_tree)
mst.add(conn)
return mst
def traverse_graph(self):
"""
Traverse the graph to get the angle types. The angle type depends on
which corners of the stem are connected by the multiloop or internal
loop.
"""
if self.mst is None:
self.mst = self.get_mst()
build_order = []
to_visit = [('s0', 'start')]
visited = set(['s0'])
build_paths = col.defaultdict(list)
while len(to_visit) > 0:
to_visit.sort(key=lambda x: min(self.get_node_dimensions(x[0])))
(current, prev) = to_visit.pop(0)
for e in self.edges[current]:
if e not in visited and e in self.mst:
# make sure the node hasn't been visited
# and is in the minimum spanning tree
to_visit.append((e, current))
build_paths[e] += [e]
build_paths[e] += build_paths[current]
visited.add(e)
if current[0] != 's' and len(self.edges[current]) == 2:
# multiloop or interior loop
# overkill method of getting the stem that isn't
# equal to prev
next_stem = set.difference(self.edges[current],
set([prev]))
build_order += [(prev, current, list(next_stem)[0])]
self.build_paths = build_paths
self.build_order = build_order
return build_order
def set_angle_types(self):
"""
Fill in the angle types based on the build order
"""
if self.build_order is None:
self.traverse_graph()
self.ang_types = dict()
for (s1, b, s2) in self.build_order:
self.ang_types[b] = self.connection_type(b, [s1, s2])
def get_angle_type(self, bulge):
"""
Return what type of angle this bulge is, based on the way this
would be built using a breadth-first traversal along the minimum
spanning tree.
"""
if self.ang_types is None:
self.set_angle_types()
if bulge in self.ang_types:
return self.ang_types[bulge]
else:
return None
def is_node_pseudoknot(self, d):
"""
Is a particular multiloop part of a pseudoknot?
"""
conn = self.connections(d)
ct = self.connection_type(d, conn)
if abs(ct) == 5:
return True
return False
def is_loop_pseudoknot(self, loop):
"""
Is a particular loop a pseudoknot?
:param loop: A list of elements that are part of the loop.
:return: Either True or false
"""
allowed_ang_types = [2, 3, 4]
found_ang_types = set()
for l in loop:
if l[0] != 'm':
continue
conn = self.connections(l)
ctype = self.connection_type(l, conn)
if ctype not in allowed_ang_types:
return True
found_ang_types.add(ctype)
if len(found_ang_types) == 3:
return False
return True
def is_pseudoknot(self):
"""
Is this bulge part of a pseudoknot?
"""
for d in self.mloop_iterator():
if self.is_node_pseudoknot(d):
return True
return False
'''
def to_networkx(self):
"""
Convert this graph to a networkx representation. This representation
will contain all of the nucleotides as nodes and all of the base pairs
as edges as well as the adjacent nucleotides.
"""
import networkx as nx
G = nx.Graph()
residues = []
for d in self.defines:
prev = None
for r in self.define_residue_num_iterator(d):
G.add_node(r)
residues += [r]
residues.sort()
prev = None
for r in residues:
if prev is not None:
G.add_edge(prev, r)
prev = r
for s in self.stem_iterator():
for (f, t) in self.stem_bp_iterator(s):
G.add_edge(f, t)
return G
'''
def ss_distance(self, e1, e2):
'''
Calculate the distance between two elements (e1, e2)
along the secondary structure. The distance only starts
at the edge of each element, and is the closest distance
between the two elements.
:param e1: The name of the first element
:param e2: The name of the second element
:return: The integer distance between the two along the secondary
structure.
'''
# get the edge nucleotides
# thanks to:
# http://stackoverflow.com/questions/2154249/identify-groups-of-continuous-numbers-in-a-list
# we get the edges, except that they might be one too close because we use adjacent
# nucleotides, nevertheless we'll take care of that later
d1_corners = []
d2_corners = []
for key, group in it.groupby(
enumerate(self.define_residue_num_iterator(e1, adjacent=True)),
lambda(index, item): index - item):
group = map(oper.itemgetter(1), group)
d1_corners += group
for key, group in it.groupby(
enumerate(self.define_residue_num_iterator(e2, adjacent=True)),
lambda(index, item): index - item):
group = map(oper.itemgetter(1), group)
d2_corners += group
import networkx as nx
G = self.to_networkx()
path_lengths = []
for c1, c2 in it.product(d1_corners, d2_corners):
path_lengths += [nx.shortest_path_length(G, c1, c2)]
if e1 == e2:
return 0
if e1 in self.edges[e2]:
return min(path_lengths) + 1
# make some exceptions for edges which have length 0
common_edges = set.intersection(self.edges[e1], self.edges[e2])
for e in common_edges:
if e[0] == 'i' and len(self.defines[e]) < 4:
return min(path_lengths) + 1
elif e[0] == 'm' and len(self.defines[e]) < 2:
return min(path_lengths) + 1
return min(path_lengths) + 2
def get_position_in_element(self, resnum):
node = self.get_node_from_residue_num(resnum)
if node[0] == 's':
if self.defines[node][0] <= resnum <= self.defines[node][1]:
return resnum - self.defines[node][0], self.defines[node][1] - self.defines[node][0]
else:
return abs(resnum - self.defines[node][3]), self.defines[node][1] - self.defines[node][0]
elif node[0] == 'i':
s0, s1 = self.connections(node)
if self.defines[s0][1] <= resnum <= self.defines[s1][0]:
return resnum - self.defines[s0][1], self.defines[s1][0] - self.defines[s0][1]
else:
return abs(resnum - self.defines[s0][2]) - 1, self.defines[s0][2] - self.defines[s1][3]
elif node[0] == 'h':
pos1 = resnum - self.defines[node][0]
pos2 = abs(resnum - self.defines[node][1])
return min(pos1, pos2) + 1, (self.defines[node][1] - self.defines[node][0] + 2) / 2
i = 0
while i < len(self.defines[node]):
s = self.defines[node][i]
e = self.defines[node][i + 1]
if s <= resnum <= e:
return resnum - s + 1, e - s + 2
i += 2
return None
def connected(self, n1, n2):
'''
Are the nucleotides n1 and n2 connected?
@param n1: A node in the BulgeGraph
@param n2: Another node in the BulgeGraph
@return: True or False indicating whether they are connected.
'''
if n1 in self.edges[n2] or n2 in self.edges[n1]:
return True
# two multiloops can be considered connected if they both
# link to the same side of the same stem
if n1[0] == 'm' and n2[0] == 'm':
common_stems = list(
set.intersection(self.edges[n1], self.edges[n2]))
if len(common_stems) == 0:
return False
common_stem = common_stems[0]
(s1c, b1c) = self.get_sides_plus(common_stem, n1)
(s2c, b1c) = self.get_sides_plus(common_stem, n2)
if sorted([s1c, s2c]) == [0, 3] or sorted([s1c, s2c]) == [1, 2]:
return True
return False
def bg_from_subgraph(bg, sg):
"""
Create a BulgeGraph from a list containing the nodes
to take from the original.
WARNING: The sequence information is not copied
"""
nbg = BulgeGraph()
nbg.seq_length = 0
for d in sg:
# copy the define
nbg.defines[d] = bg.defines[d][::]
# copy edges only if they connect elements which
# are also in the new structure
for e in bg.edges.keys():
for conn in bg.edges[e]:
if conn in sg:
nbg.edges[e].add(conn)
return nbg
| 31.69353 | 105 | 0.514899 | 72,941 | 0.870771 | 6,019 | 0.071855 | 267 | 0.003187 | 0 | 0 | 29,838 | 0.356207 |
52935524fdd04ab3aad5b99a96020fa25d8790c3 | 3,215 | py | Python | tools/test_net.py | Willy0919/Evovling_Boxes | a8543227f9e715d67dde1ffe62ee6d0400cca517 | [
"BSD-3-Clause"
] | 74 | 2017-04-07T13:18:39.000Z | 2021-05-20T01:35:31.000Z | tools/test_net.py | Willy0919/Evovling_Boxes | a8543227f9e715d67dde1ffe62ee6d0400cca517 | [
"BSD-3-Clause"
] | 10 | 2017-06-22T12:54:17.000Z | 2021-06-28T11:28:11.000Z | tools/test_net.py | Willy0919/Evovling_Boxes | a8543227f9e715d67dde1ffe62ee6d0400cca517 | [
"BSD-3-Clause"
] | 26 | 2017-06-02T05:54:44.000Z | 2021-03-12T07:53:54.000Z | #!/usr/bin/env python
import _init_paths
from evb.test import test_net
from evb.config import cfg, cfg_from_file, cfg_from_list
from datasets.factory import get_imdb
import caffe
import argparse
import pprint
import time, os, sys
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Test a Fast R-CNN network')
parser.add_argument('--gpu', dest='gpu_id', help='GPU id to use',
default=0, type=int)
parser.add_argument('--def', dest='prototxt',
help='prototxt file defining the network',
default=None, type=str)
parser.add_argument('--net', dest='caffemodel',
help='model to test',
default=None, type=str)
parser.add_argument('--cfg', dest='cfg_file',
help='optional config file', default=None, type=str)
parser.add_argument('--wait', dest='wait',
help='wait until net file exists',
default=True, type=bool)
parser.add_argument('--imdb', dest='imdb_name',
help='dataset to test',
default='detrac', type=str)
parser.add_argument('--comp', dest='comp_mode', help='competition mode',
action='store_true')
parser.add_argument('--set', dest='set_cfgs',
help='set config keys', default=None,
nargs=argparse.REMAINDER)
parser.add_argument('--vis', dest='vis', help='visualize detections',
action='store_true')
parser.add_argument('--num_dets', dest='max_per_image',
help='max number of detections per image',
default=100, type=int)
parser.add_argument('--test', dest='test_order',
help='test file',
default=01, type=int)
parser.add_argument('--data', dest='data_path',
help='set training and testing data path', default=None, type=str)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
print('Called with args:')
print(args)
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs)
cfg.GPU_ID = args.gpu_id
cfg.DATASET_DIR = args.data_path
print('Using config:')
pprint.pprint(cfg)
while not os.path.exists(args.caffemodel) and args.wait:
print('Waiting for {} to exist...'.format(args.caffemodel))
time.sleep(10)
caffe.set_mode_gpu()
caffe.set_device(args.gpu_id)
net = caffe.Net(args.prototxt, args.caffemodel, caffe.TEST)
net.name = os.path.splitext(os.path.basename(args.caffemodel))[0]
imdb = get_imdb(args.imdb_name)
imdb.competition_mode(args.comp_mode)
test_file = s = "%02d" % (args.test_order)
print 'test_file:', test_file
print 'max per image:',args.max_per_image
test_net(net, imdb, max_per_image=args.max_per_image, vis=args.vis,test=test_file)
| 36.954023 | 90 | 0.597823 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 711 | 0.221151 |
52964f8e82619896f05ff838aec7c2f18d6b3605 | 2,030 | py | Python | GameStore/product/migrations/0001_initial.py | hossein9090/gamestore | 62d20b1d32c52c68dfe587ae8b6de5c36c122a37 | [
"MIT"
] | null | null | null | GameStore/product/migrations/0001_initial.py | hossein9090/gamestore | 62d20b1d32c52c68dfe587ae8b6de5c36c122a37 | [
"MIT"
] | null | null | null | GameStore/product/migrations/0001_initial.py | hossein9090/gamestore | 62d20b1d32c52c68dfe587ae8b6de5c36c122a37 | [
"MIT"
] | null | null | null | # Generated by Django 3.2.6 on 2021-08-24 09:26
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('cat_parent', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='product.category')),
],
),
migrations.CreateModel(
name='Productbase',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('stock', models.BooleanField(default=False)),
('device', models.CharField(choices=[('ps4', 'ps4'), ('ps5', 'ps5'), ('all', 'all'), ('xbox', 'xbox'), ('nintendo', 'nintendo switch')], max_length=20)),
('description', models.TextField(blank=True, null=True)),
('price', models.FloatField(default=0.0)),
('added_time', models.DateTimeField(auto_now_add=True)),
('category', models.ForeignKey(on_delete=django.db.models.deletion.RESTRICT, to='product.category')),
],
),
migrations.CreateModel(
name='ImageProduct',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('image', models.ImageField(upload_to='images/')),
('default', models.BooleanField(default=False)),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='product.productbase')),
],
),
]
| 43.191489 | 169 | 0.578818 | 1,904 | 0.937931 | 0 | 0 | 0 | 0 | 0 | 0 | 355 | 0.174877 |
529658a483417869521cdbb5cf8a291986f401d8 | 85 | py | Python | personal_assistant/constants.py | avryhof/personal-assistant | 115b384a405ee6b2c5099619beff433b113e24ff | [
"MIT"
] | null | null | null | personal_assistant/constants.py | avryhof/personal-assistant | 115b384a405ee6b2c5099619beff433b113e24ff | [
"MIT"
] | 2 | 2021-04-06T17:59:11.000Z | 2021-06-01T23:40:39.000Z | personal_assistant/constants.py | avryhof/personal-assistant | 115b384a405ee6b2c5099619beff433b113e24ff | [
"MIT"
] | null | null | null | RECOGNIZER_SNOWBOY = 'snowboy'
RECOGNIZER_SPHINX = 'sphinx'
RECOGNIZER_WIT = 'wit.ai' | 28.333333 | 30 | 0.788235 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 25 | 0.294118 |
5297b606a1bd8dcbc70078cca628b648831434d4 | 1,023 | py | Python | src/content-processor.py | rionaldichandraseta/anti-clickbait | 77dbaab8a64c81957d2d98b7d0b7476b814f6f49 | [
"MIT"
] | null | null | null | src/content-processor.py | rionaldichandraseta/anti-clickbait | 77dbaab8a64c81957d2d98b7d0b7476b814f6f49 | [
"MIT"
] | null | null | null | src/content-processor.py | rionaldichandraseta/anti-clickbait | 77dbaab8a64c81957d2d98b7d0b7476b814f6f49 | [
"MIT"
] | null | null | null | # Processor for kompas-data-contents.txt
MIN_IDX = 100
contentfile = open('../data/kompas-data-contents.txt', 'r')
content_data = []
for line in contentfile:
content_data.append(line)
processed_contentfile = open('../data/kompas-data-contents-processed.txt', 'w+')
for id, content in enumerate(content_data):
# idxs = [0, 0, 0, 0]
idx = content.find('KOMPAS.com')
# idxs[1] = content.find('–')
# idxs[2] = content.find('—')
# idxs[3] = content.find('--')
# If string is not found, add 100 to make the idx big
# for i, idx in enumerate(idxs):
# if idx < 0:
# idxs[i] += MIN_IDX
# separator_idx = min(idxs)
# d_dash = separator_idx == idxs[3]
# start_index = separator_idx + 1
# if d_dash:
# start_index += 1
# if start_index <= MIN_IDX:
# content = content[start_index:]
content = content[idx + 12:]
content = content.strip('-')
content = content.strip()
processed_contentfile.write(content + '\n') | 26.921053 | 80 | 0.602151 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 573 | 0.557936 |
52984fb6886845bba544a9e79802fb55bac1da12 | 10,296 | py | Python | app/home/routes.py | ajietrid/network-monitoring-using-pysnmp | 095ad60b43c1e0f165a834d167e5679b886907d6 | [
"MIT"
] | null | null | null | app/home/routes.py | ajietrid/network-monitoring-using-pysnmp | 095ad60b43c1e0f165a834d167e5679b886907d6 | [
"MIT"
] | null | null | null | app/home/routes.py | ajietrid/network-monitoring-using-pysnmp | 095ad60b43c1e0f165a834d167e5679b886907d6 | [
"MIT"
] | 1 | 2021-03-08T02:52:22.000Z | 2021-03-08T02:52:22.000Z | # -*- encoding: utf-8 -*-
"""
License: MIT
Copyright (c) 2019 - present AppSeed.us
"""
from app.home import blueprint
from flask import render_template, redirect, url_for, request, flash, send_file
from flask_login import login_required, current_user
from app import login_manager
from jinja2 import TemplateNotFound
import json
from app.base.forms import AddNewIPphaseone, AddNewIPphasetwo, AddNewInterface
from backend.dbhelper import DBHelper
from backend.snmphelper import SNMPhelper
from backend.selfmonitoringhelper import SelfMonitoring
dbhelp = DBHelper()
snmphelp = SNMPhelper()
smhelp = SelfMonitoring()
alarm_notification = dbhelp.show_notification()
counter = len(alarm_notification)
client = dbhelp.show_client_data()
all_sensor = dbhelp.counter_sensor()
@blueprint.route('/index', methods=['GET', 'POST'])
@login_required
def index():
selfmon = smhelp.get_pc_stats()
#if not current_user.is_authenticated:
#return redirect(url_for('base_blueprint.login'))
phaseoneform = AddNewIPphaseone(request.form)
if 'addnewipphaseone' in request.form:
# read form data
ip = request.form['ip']
if dbhelp.check_ip(ip) == "False":
return redirect(url_for('home_blueprint.addnewipphasetwo', ip=ip, alarm = alarm_notification, counter= counter))
else:
client = dbhelp.show_client_data()
return render_template('index.html', msg='IP is already on the database.', form=phaseoneform, client= client, alarm = alarm_notification, counter= counter, all_sensor= all_sensor, selfmon = selfmon)
client = dbhelp.show_client_data()
# Something (user or pass) is not ok
return render_template('index.html', form =phaseoneform, client = client, alarm = alarm_notification, counter= counter, all_sensor= all_sensor, selfmon = selfmon)
@blueprint.route('/addnewipnext/<ip>', methods=['GET', 'POST'])
@login_required
def addnewipphasetwo(ip):
phasetwoform = AddNewIPphasetwo(request.form)
if 'addnewipphasetwo' in request.form:
# read form data
ip = request.form['ip']
while dbhelp.check_ip(ip) == "True":
return render_template('addnewparentnext.html', msg='IP is already on the database.', form=phasetwoform, alarm = alarm_notification, counter= counter)
name = request.form['name']
sysdescr = request.form['sysdescr']
syslocation = request.form['syslocation']
snmp_ver = request.form['snmp_ver']
community_string = request.form['community_string']
dbhelp.add_parent(name, ip, snmp_ver, community_string, sysdescr, syslocation)
dbhelp.add_sqf_client( name, ip)
flash('IP has successfully been added.')
return redirect(url_for('home_blueprint.index'))
name = snmphelp.get_sysname(ip)
if name[0] == 'error':
sysname = ""
else:
sysname = name[1]
descr = snmphelp.get_sysdescr(ip)
if descr[0] == 'error':
sysdescr = ""
else:
sysdescr = descr[1]
location = snmphelp.get_syslocation(ip)
if location[0] == 'error':
syslocation = ""
else:
syslocation = location[1]
return render_template('addnewparentnext.html', form=phasetwoform, ipv=ip, sysnamev=sysname, sysdescrv=sysdescr, syslocationv=syslocation, alarm = alarm_notification, counter= counter)
@blueprint.route('/device/<device_id>', methods=['GET', 'POST'])
@login_required
def device_template(device_id):
selfmon = smhelp.get_pc_stats()
data_by_id = dbhelp.filter_client_data_by_id(device_id)
lastrow_log = dbhelp.get_lastrow_logs(device_id)
uptime_sparkline = dbhelp.show_uptime_sparkline( device_id)
traffic_sparkline_23 = dbhelp.show_traffic_sparkline( device_id, 23)
traffic_sparkline_24 = dbhelp.show_traffic_sparkline( device_id, 24)
ping_sparkline = dbhelp.show_ping_sparkline(device_id)
sqf_sparkline = dbhelp.show_sqf_sparkline(device_id)
#if not current_user.is_authenticated:
# return redirect(url_for('base_blueprint.login'))
try:
data_by_id = dbhelp.filter_client_data_by_id(device_id)
lastrow_log = dbhelp.get_lastrow_logs(device_id)
interfaceform = AddNewInterface(request.form)
if 'addnewinterface' in request.form:
interface = request.form['interface']
while dbhelp.check_int(data_by_id[2], interface) == "True":
return render_template('device.html', msg='Interface is already on the database.', form=interfaceform, logs = lastrow_log, by_id= data_by_id, uptime_sparkline= uptime_sparkline, traffic_sparkline_23=traffic_sparkline_23, traffic_sparkline_24 = traffic_sparkline_24, ping_sparkline=ping_sparkline, sqf_sparkline=sqf_sparkline, client = client, alarm = alarm_notification, counter= counter, selfmon = selfmon)
name = data_by_id[1]
host = data_by_id[2]
dbhelp.add_int(name, host, interface)
flash('Interface has successfully been added.')
return redirect(url_for('home_blueprint.index'))
return render_template('device.html', form=interfaceform, by_id=data_by_id, logs = lastrow_log, uptime_sparkline= uptime_sparkline, traffic_sparkline_23=traffic_sparkline_23, traffic_sparkline_24 = traffic_sparkline_24, ping_sparkline=ping_sparkline, sqf_sparkline=sqf_sparkline, client = client, alarm = alarm_notification, counter= counter, selfmon = selfmon)
except TemplateNotFound:
return render_template('page-404.html'), 404
except:
return render_template('page-500.html'), 500
@blueprint.route('/device/<device_id>/uptime')
@login_required
def uptime_template(device_id):
selfmon = smhelp.get_pc_stats()
data_by_id = dbhelp.filter_client_data_by_id(device_id)
uptime = dbhelp.show_uptime(device_id)
uptime_graph = dbhelp.show_uptime_graph(device_id)
host = data_by_id[2]
ts = dbhelp.get_currenttimestamp()
#if not current_user.is_authenticated:
# return redirect(url_for('base_blueprint.login'))
try:
return render_template('uptime.html',uptime= uptime, uptime_graph= uptime_graph, client = client, alarm = alarm_notification, counter= counter, selfmon = selfmon, host=host, ts=ts, by_id= data_by_id)
except TemplateNotFound:
return render_template('page-404.html'), 404
except:
return render_template('page-500.html'), 500
@blueprint.route('/device/<device_id>/traffic/<interface>')
@login_required
def traffic_template(device_id, interface):
selfmon = smhelp.get_pc_stats()
data_by_id = dbhelp.filter_client_data_by_id(device_id)
traffic = dbhelp.show_traffic(device_id, interface)
traffic_tot = dbhelp.show_traffictot_graph(device_id, interface)
traffic_in = dbhelp.show_trafficin_graph(device_id, interface)
traffic_out = dbhelp.show_trafficout_graph(device_id, interface)
host = data_by_id[2]
ts = dbhelp.get_currenttimestamp()
interface = interface
#if not current_user.is_authenticated:
# return redirect(url_for('base_blueprint.login'))
try:
return render_template('traffic.html',traffic= traffic, traffic_tot=traffic_tot, traffic_in=traffic_in, traffic_out=traffic_out, client = client, alarm = alarm_notification, counter= counter, selfmon = selfmon, host=host, ts=ts, interface=interface, by_id= data_by_id)
except TemplateNotFound:
return render_template('page-404.html'), 404
except:
return render_template('page-500.html'), 500
@blueprint.route('/device/<device_id>/ping')
@login_required
def ping_template(device_id):
selfmon = smhelp.get_pc_stats()
data_by_id = dbhelp.filter_client_data_by_id(device_id)
ping = dbhelp.show_ping(device_id)
pingtime = dbhelp.show_pingtime_graph(device_id)
pingmax = dbhelp.show_pingmax_graph(device_id)
pingmin = dbhelp.show_pingmin_graph(device_id)
host = data_by_id[2]
ts = dbhelp.get_currenttimestamp()
#if not current_user.is_authenticated:
# return redirect(url_for('base_blueprint.login'))
try:
return render_template('ping.html',ping= ping, pingtime=pingtime, pingmax=pingmax, pingmin=pingmin, client = client, alarm = alarm_notification, counter= counter, selfmon = selfmon, host=host, ts=ts, by_id= data_by_id)
except TemplateNotFound:
return render_template('page-404.html'), 404
except:
return render_template('page-500.html'), 500
@blueprint.route('/device/<device_id>/sqf')
@login_required
def sqf_template(device_id):
selfmon = smhelp.get_pc_stats()
data_by_id = dbhelp.filter_client_data_by_id(device_id)
sqf = dbhelp.show_sqf(device_id)
sqf_graph = dbhelp.show_sqf_graph(device_id)
host = data_by_id[2]
ts = dbhelp.get_currenttimestamp()
#if not current_user.is_authenticated:
# return redirect(url_for('base_blueprint.login'))
try:
return render_template('sqf.html',sqf= sqf, sqf_graph=sqf_graph, client = client, alarm = alarm_notification, counter= counter, selfmon = selfmon, host=host, ts=ts, by_id= data_by_id)
except TemplateNotFound:
return render_template('page-404.html'), 404
except:
return render_template('page-500.html'), 500
@blueprint.route('/download/<table>/<host>/<ts>/<interface>')
@login_required
def download_template(table, host, ts, interface):
filename = table + "_" + host + "_" + str(ts) + "_" + str(interface)
path = "/tmp/"+filename+".csv"
dbhelp.export(table, host, filename, interface=0)
return send_file(path, as_attachment=True)
#if not current_user.is_authenticated:
# return redirect(url_for('base_blueprint.login'))
@blueprint.route('/<template>')
@login_required
def route_template(template):
selfmon = smhelp.get_pc_stats()
logs = dbhelp.show_log()
#if not current_user.is_authenticated:
# return redirect(url_for('base_blueprint.login'))
try:
return render_template(template + '.html', client= client, logs= logs, counter=counter, alarm=alarm_notification, selfmon = selfmon)
except TemplateNotFound:
return render_template('page-404.html'), 404
except:
return render_template('page-500.html'), 500
| 40.535433 | 423 | 0.716783 | 0 | 0 | 0 | 0 | 9,496 | 0.9223 | 0 | 0 | 1,916 | 0.186092 |
52998add29338eeb164775a55420f1427cccf228 | 2,730 | py | Python | src/aws_cloudformation_power_switch/rds_cluster.py | binxio/aws-cloudformation-stack-power-off | 9faebd77e76aedb2c2cb84db47417d5a4d77bfc4 | [
"Apache-2.0"
] | 1 | 2020-01-22T21:52:13.000Z | 2020-01-22T21:52:13.000Z | src/aws_cloudformation_power_switch/rds_cluster.py | binxio/aws-cloudformation-stack-power-off | 9faebd77e76aedb2c2cb84db47417d5a4d77bfc4 | [
"Apache-2.0"
] | 3 | 2020-03-31T10:37:26.000Z | 2021-04-07T08:26:50.000Z | src/aws_cloudformation_power_switch/rds_cluster.py | binxio/aws-cloudformation-stack-power-off | 9faebd77e76aedb2c2cb84db47417d5a4d77bfc4 | [
"Apache-2.0"
] | null | null | null | import logging
from botocore.exceptions import ClientError
from aws_cloudformation_power_switch.power_switch import PowerSwitch
from aws_cloudformation_power_switch.tag import logical_id
class RDSClusterPowerSwitch(PowerSwitch):
def __init__(self):
super(RDSClusterPowerSwitch, self).__init__()
self.resource_type = "AWS::RDS::DBCluster"
def startup(self, instance: dict):
name = logical_id(instance)
cluster_id = self.instance_id(instance)
logging.info("startup rds cluster %s", cluster_id)
if not self.dry_run:
try:
self.rds.start_db_cluster(DBClusterIdentifier=cluster_id)
except ClientError as e:
logging.error("failed to stop %s (%s), %s", name, cluster_id, e)
def shutdown(self, instance: dict):
name = logical_id(instance)
cluster_id = self.instance_id(instance)
logging.info("shutdown rds cluster %s (%s)", name, cluster_id)
if not self.dry_run:
try:
self.rds.stop_db_cluster(DBClusterIdentifier=cluster_id)
except ClientError as e:
logging.error("failed to stop %s (%s), %s", name, cluster_id, e)
def instance_id(self, instance) -> str:
return instance["DBClusterIdentifier"]
def instance_state(self, instance) -> str:
return instance["Status"]
def instance_needs_shutdown(self, instance) -> bool:
return self.instance_state(instance) == "available"
def instance_needs_startup(self, instance) -> bool:
return self.instance_state(instance) == "stopped"
@property
def rds(self):
return self.session.client("rds")
def select_instances(self):
result = []
if self.rds.describe_db_clusters().get("DBClusters"):
for r in self.stack_resources:
instance = self.rds.describe_db_clusters(
DBClusterIdentifier=r["PhysicalResourceId"]
)["DBClusters"][0]
instance["TagList"] = [
{"Key": "aws:cloudformation:stack-name", "Value": r["StackName"]},
{
"Key": "aws:cloudformation:logical-id",
"Value": r["LogicalResourceId"],
},
]
result.append(instance)
for i in filter(lambda i: self.verbose, result):
logging.info(
"rds cluster %s (%s) in state %s",
logical_id(i),
i["DBClusterIdentifier"],
i["Status"],
)
if not result and self.verbose:
logging.info("No RDS clusters found")
return result
| 35 | 86 | 0.588645 | 2,538 | 0.92967 | 0 | 0 | 70 | 0.025641 | 0 | 0 | 439 | 0.160806 |
bfdb1ec3bcf1251777d5b7ee5b6352813b38dd21 | 435 | py | Python | src/organ_match_app/matchapp/migrations/0004_auto_20190509_1708.py | ajmengistu/organmatch | 0b1549bde715eb2e44cbb6dcd34fc5e0ce315e4e | [
"bzip2-1.0.6"
] | 1 | 2019-05-07T21:47:54.000Z | 2019-05-07T21:47:54.000Z | src/organ_match_app/matchapp/migrations/0004_auto_20190509_1708.py | ajmengistu/organmatch | 0b1549bde715eb2e44cbb6dcd34fc5e0ce315e4e | [
"bzip2-1.0.6"
] | 7 | 2019-12-04T22:51:59.000Z | 2022-02-10T08:28:35.000Z | src/organ_match_app/matchapp/migrations/0004_auto_20190509_1708.py | ajmengistu/organmatch | 0b1549bde715eb2e44cbb6dcd34fc5e0ce315e4e | [
"bzip2-1.0.6"
] | 1 | 2020-07-23T03:43:46.000Z | 2020-07-23T03:43:46.000Z | # Generated by Django 2.2.1 on 2019-05-09 21:08
from django.conf import settings
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('matchapp', '0003_available_doctors'),
]
operations = [
migrations.RenameModel(
old_name='UserProfile',
new_name='Person',
),
]
| 21.75 | 66 | 0.648276 | 317 | 0.728736 | 0 | 0 | 0 | 0 | 0 | 0 | 102 | 0.234483 |
bfdb50593c6e1e9d0effbbd8845a4184d945a3b0 | 547 | py | Python | plugins/minfraud/komand_minfraud/actions/__init__.py | lukaszlaszuk/insightconnect-plugins | 8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892 | [
"MIT"
] | 46 | 2019-06-05T20:47:58.000Z | 2022-03-29T10:18:01.000Z | plugins/minfraud/komand_minfraud/actions/__init__.py | lukaszlaszuk/insightconnect-plugins | 8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892 | [
"MIT"
] | 386 | 2019-06-07T20:20:39.000Z | 2022-03-30T17:35:01.000Z | plugins/minfraud/komand_minfraud/actions/__init__.py | lukaszlaszuk/insightconnect-plugins | 8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892 | [
"MIT"
] | 43 | 2019-07-09T14:13:58.000Z | 2022-03-28T12:04:46.000Z | # GENERATED BY KOMAND SDK - DO NOT EDIT
from .account_lookup.action import AccountLookup
from .all_lookup.action import AllLookup
from .billing_lookup.action import BillingLookup
from .card_lookup.action import CardLookup
from .cart_lookup.action import CartLookup
from .device_lookup.action import DeviceLookup
from .email_lookup.action import EmailLookup
from .event_lookup.action import EventLookup
from .order_lookup.action import OrderLookup
from .payment_lookup.action import PaymentLookup
from .shipping_lookup.action import ShippingLookup
| 42.076923 | 50 | 0.859232 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 39 | 0.071298 |
bfdbb2b1ddde5a49cf9aef5ea8e6fde1ea4245c1 | 3,630 | py | Python | catkin_ws/src/deep_learning/img_recognition/src/inference_to_reaction.py | CIRCUSPi/ROSKY | d0328e19cb65416b9353a1faabc725c1ac01c9f6 | [
"MIT"
] | 3 | 2021-08-03T07:08:24.000Z | 2021-09-12T14:33:41.000Z | catkin_ws/src/deep_learning/img_recognition/src/inference_to_reaction.py | kjoelovelife/ROSKY | d0328e19cb65416b9353a1faabc725c1ac01c9f6 | [
"MIT"
] | 1 | 2021-09-03T13:27:33.000Z | 2021-09-06T03:28:16.000Z | catkin_ws/src/deep_learning/img_recognition/src/inference_to_reaction.py | kjoelovelife/ROSKY | d0328e19cb65416b9353a1faabc725c1ac01c9f6 | [
"MIT"
] | 4 | 2020-12-28T08:20:07.000Z | 2021-08-10T06:55:30.000Z | #!/usr/bin/env python
import os, sys, argparse, errno, yaml, time, datetime
import rospy, rospkg
import torch, torchvision, cv2
import numpy as np
from rosky_msgs.msg import WheelsCmdStamped, Twist2DStamped
from img_recognition.msg import Inference
from cv_bridge import CvBridge, CvBridgeError
from jetcam_ros.utils import bgr8_to_jpeg
class Inference_To_Reaction(object):
def __init__(self):
self.package = "img_recognition"
self.node_name = rospy.get_name()
self.veh_name = self.node_name.split("/")[1]
rospy.loginfo("{} Initializing inference_model.py......".format(self.node_name))
self.start = rospy.wait_for_message("/" + self.veh_name +"/inference_model/inference", Inference)
# local parameter
self.confidence = {}
self.inference_gain = {
"linear_velocity": [1, 1, 1], # Vx, Vy, Vz
"angular_velocity": [1, 1, 1], # Ax, Ay, Az
}
# ros parameter
self.confidence_threshold = self.setup_parameter("~confidence_threshold", 0.75)
# setup the subscriber
self.sub_msg_inference = rospy.Subscriber("~inference", Inference, self.inference_analyze, queue_size=1)
self.sub_car_cmd = rospy.Subscriber("~sub_car_cmd", Twist2DStamped, self.cb_car_cmd, queue_size=1)
# setup the publisher
self.pub_car_cmd = rospy.Publisher("~pub_car_cmd", Twist2DStamped, queue_size=1)
def inference_analyze(self, data):
if data == None:
pass
else:
zip_data = zip(data.labels, data.confidence)
self.confidence = dict(zip_data)
recognition = max(self.confidence, key=self.confidence.get)
if self.confidence[recognition] > self.confidence_threshold:
_reaction = self.reaction(recognition)
def reaction(self, recognition):
if recognition == "free":
for key in self.inference_gain.keys():
for index in range(len(self.inference_gain[key])):
self.inference_gain[key][index] = 1
elif recognition == "blocked":
for key in self.inference_gain.keys():
for index in range(len(self.inference_gain[key])):
self.inference_gain[key][index] = 0
else:
for key in self.inference_gain.keys():
for index in range(len(self.inference_gain[key])):
self.inference_gain[key][index] = 1
#self.setup_parameter("~inference_gain", inference_gain)
def cb_car_cmd(self, car_cmd_msg):
car_cmd_msg.v = car_cmd_msg.v * self.inference_gain["linear_velocity"][0]
car_cmd_msg.omega = car_cmd_msg.omega * self.inference_gain["angular_velocity"][2]
self.pub_msg(car_cmd_msg)
def pub_msg(self, car_cmd_msg):
self.pub_car_cmd.publish(car_cmd_msg)
def on_shutdown(self):
rospy.loginfo("{} Close.".format(self.node_name))
rospy.loginfo("{} shutdown.".format(self.node_name))
rospy.sleep(1)
rospy.is_shutdown=True
def setup_parameter(self, param_name, value):
# value = rospy.get_param(param_name, default_value)
# Write to parameter server for transparency
rospy.set_param(param_name, value)
rospy.loginfo("[%s] %s = %s " % (self.node_name, param_name, value))
return value
if __name__ == "__main__" :
rospy.init_node("inference_to_reaction", anonymous=False)
inference_to_reaction_node = Inference_To_Reaction()
rospy.on_shutdown(inference_to_reaction_node.on_shutdown)
rospy.spin()
| 40.333333 | 112 | 0.649311 | 3,059 | 0.8427 | 0 | 0 | 0 | 0 | 0 | 0 | 587 | 0.161708 |
bfdc8c682e1d83d06a3dc94d8bf229024b62cde2 | 2,395 | py | Python | examples/quickstart.py | farbod1277/imitation | eba376771963ce3f13b17fe23457d7235b9afc99 | [
"MIT"
] | null | null | null | examples/quickstart.py | farbod1277/imitation | eba376771963ce3f13b17fe23457d7235b9afc99 | [
"MIT"
] | null | null | null | examples/quickstart.py | farbod1277/imitation | eba376771963ce3f13b17fe23457d7235b9afc99 | [
"MIT"
] | null | null | null | """Loads CartPole-v1 demonstrations and trains BC, GAIL, and AIRL models on that data.
"""
import os
import pathlib
import pickle
import tempfile
import seals # noqa: F401
import stable_baselines3 as sb3
from imitation.algorithms import adversarial, bc
from imitation.data import rollout
from imitation.util import logger, util
dirname = os.path.dirname(__file__)
# Load pickled test demonstrations.
with open(os.path.join(dirname, "../tests/testdata/expert_models/cartpole_0/rollouts/final.pkl"), "rb") as f:
# This is a list of `imitation.data.types.Trajectory`, where
# every instance contains observations and actions for a single expert
# demonstration.
trajectories = pickle.load(f)
# Convert List[types.Trajectory] to an instance of `imitation.data.types.Transitions`.
# This is a more general dataclass containing unordered
# (observation, actions, next_observation) transitions.
transitions = rollout.flatten_trajectories(trajectories)
venv = util.make_vec_env("seals/CartPole-v0", n_envs=2)
tempdir = tempfile.TemporaryDirectory(prefix="quickstart")
tempdir_path = pathlib.Path(tempdir.name)
print(f"All Tensorboards and logging are being written inside {tempdir_path}/.")
# Train BC on expert data.
# BC also accepts as `expert_data` any PyTorch-style DataLoader that iterates over
# dictionaries containing observations and actions.
bc_logger = logger.configure(tempdir_path / "BC/")
bc_trainer = bc.BC(
venv.observation_space,
venv.action_space,
expert_data=transitions,
custom_logger=bc_logger,
)
bc_trainer.train(n_epochs=1)
# Train GAIL on expert data.
# GAIL, and AIRL also accept as `expert_data` any Pytorch-style DataLoader that
# iterates over dictionaries containing observations, actions, and next_observations.
gail_logger = logger.configure(tempdir_path / "GAIL/")
gail_trainer = adversarial.GAIL(
venv,
expert_data=transitions,
expert_batch_size=32,
gen_algo=sb3.PPO("MlpPolicy", venv, verbose=1, n_steps=1024),
custom_logger=gail_logger,
)
gail_trainer.train(total_timesteps=2048)
# Train AIRL on expert data.
airl_logger = logger.configure(tempdir_path / "AIRL/")
airl_trainer = adversarial.AIRL(
venv,
expert_data=transitions,
expert_batch_size=32,
gen_algo=sb3.PPO("MlpPolicy", venv, verbose=1, n_steps=1024),
custom_logger=airl_logger,
)
airl_trainer.train(total_timesteps=2048)
| 33.263889 | 109 | 0.772443 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,070 | 0.446764 |
bfde0530e9195e24f4af021d544ddf9f69b16e9f | 38,456 | py | Python | tests/integration/resources_permissions/test_epics_resources.py | aavcc/taiga-openshift | 7c33284573ceed38f755b8159ad83f3f68d2f7cb | [
"MIT"
] | null | null | null | tests/integration/resources_permissions/test_epics_resources.py | aavcc/taiga-openshift | 7c33284573ceed38f755b8159ad83f3f68d2f7cb | [
"MIT"
] | 12 | 2019-11-25T14:08:32.000Z | 2021-06-24T10:35:51.000Z | tests/integration/resources_permissions/test_epics_resources.py | threefoldtech/Threefold-Circles | cbc433796b25cf7af9a295af65d665a4a279e2d6 | [
"Apache-2.0"
] | 1 | 2018-06-07T10:58:15.000Z | 2018-06-07T10:58:15.000Z | # -*- coding: utf-8 -*-
# Copyright (C) 2014-2017 Andrey Antukh <niwi@niwi.nz>
# Copyright (C) 2014-2017 Jesús Espino <jespinog@gmail.com>
# Copyright (C) 2014-2017 David Barragán <bameda@dbarragan.com>
# Copyright (C) 2014-2017 Alejandro Alonso <alejandro.alonso@kaleidos.net>
# Copyright (C) 2014-2017 Anler Hernández <hello@anler.me>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import uuid
from django.core.urlresolvers import reverse
from taiga.base.utils import json
from taiga.projects import choices as project_choices
from taiga.projects.models import Project
from taiga.projects.epics.serializers import EpicSerializer
from taiga.projects.epics.models import Epic
from taiga.projects.epics.utils import attach_extra_info as attach_epic_extra_info
from taiga.projects.utils import attach_extra_info as attach_project_extra_info
from taiga.permissions.choices import MEMBERS_PERMISSIONS, ANON_PERMISSIONS
from taiga.projects.occ import OCCResourceMixin
from tests import factories as f
from tests.utils import helper_test_http_method, reconnect_signals
from taiga.projects.votes.services import add_vote
from taiga.projects.notifications.services import add_watcher
from unittest import mock
import pytest
pytestmark = pytest.mark.django_db
def setup_function(function):
reconnect_signals()
@pytest.fixture
def data():
m = type("Models", (object,), {})
m.registered_user = f.UserFactory.create()
m.project_member_with_perms = f.UserFactory.create()
m.project_member_without_perms = f.UserFactory.create()
m.project_owner = f.UserFactory.create()
m.other_user = f.UserFactory.create()
m.public_project = f.ProjectFactory(is_private=False,
anon_permissions=list(map(lambda x: x[0], ANON_PERMISSIONS)),
public_permissions=list(map(lambda x: x[0], ANON_PERMISSIONS)) + ["comment_epic"],
owner=m.project_owner,
epics_csv_uuid=uuid.uuid4().hex)
m.public_project = attach_project_extra_info(Project.objects.all()).get(id=m.public_project.id)
m.private_project1 = f.ProjectFactory(is_private=True,
anon_permissions=list(map(lambda x: x[0], ANON_PERMISSIONS)),
public_permissions=list(map(lambda x: x[0], ANON_PERMISSIONS)),
owner=m.project_owner,
epics_csv_uuid=uuid.uuid4().hex)
m.private_project1 = attach_project_extra_info(Project.objects.all()).get(id=m.private_project1.id)
m.private_project2 = f.ProjectFactory(is_private=True,
anon_permissions=[],
public_permissions=[],
owner=m.project_owner,
epics_csv_uuid=uuid.uuid4().hex)
m.private_project2 = attach_project_extra_info(Project.objects.all()).get(id=m.private_project2.id)
m.blocked_project = f.ProjectFactory(is_private=True,
anon_permissions=[],
public_permissions=[],
owner=m.project_owner,
epics_csv_uuid=uuid.uuid4().hex,
blocked_code=project_choices.BLOCKED_BY_STAFF)
m.blocked_project = attach_project_extra_info(Project.objects.all()).get(id=m.blocked_project.id)
m.public_membership = f.MembershipFactory(
project=m.public_project,
user=m.project_member_with_perms,
role__project=m.public_project,
role__permissions=list(map(lambda x: x[0], MEMBERS_PERMISSIONS)))
m.private_membership1 = f.MembershipFactory(
project=m.private_project1,
user=m.project_member_with_perms,
role__project=m.private_project1,
role__permissions=list(map(lambda x: x[0], MEMBERS_PERMISSIONS)))
f.MembershipFactory(
project=m.private_project1,
user=m.project_member_without_perms,
role__project=m.private_project1,
role__permissions=[])
m.private_membership2 = f.MembershipFactory(
project=m.private_project2,
user=m.project_member_with_perms,
role__project=m.private_project2,
role__permissions=list(map(lambda x: x[0], MEMBERS_PERMISSIONS)))
f.MembershipFactory(
project=m.private_project2,
user=m.project_member_without_perms,
role__project=m.private_project2,
role__permissions=[])
m.blocked_membership = f.MembershipFactory(
project=m.blocked_project,
user=m.project_member_with_perms,
role__project=m.blocked_project,
role__permissions=list(map(lambda x: x[0], MEMBERS_PERMISSIONS)))
f.MembershipFactory(project=m.blocked_project,
user=m.project_member_without_perms,
role__project=m.blocked_project,
role__permissions=[])
f.MembershipFactory(project=m.public_project,
user=m.project_owner,
is_admin=True)
f.MembershipFactory(project=m.private_project1,
user=m.project_owner,
is_admin=True)
f.MembershipFactory(project=m.private_project2,
user=m.project_owner,
is_admin=True)
f.MembershipFactory(project=m.blocked_project,
user=m.project_owner,
is_admin=True)
m.public_epic = f.EpicFactory(project=m.public_project,
status__project=m.public_project)
m.public_epic = attach_epic_extra_info(Epic.objects.all()).get(id=m.public_epic.id)
m.private_epic1 = f.EpicFactory(project=m.private_project1,
status__project=m.private_project1)
m.private_epic1 = attach_epic_extra_info(Epic.objects.all()).get(id=m.private_epic1.id)
m.private_epic2 = f.EpicFactory(project=m.private_project2,
status__project=m.private_project2)
m.private_epic2 = attach_epic_extra_info(Epic.objects.all()).get(id=m.private_epic2.id)
m.blocked_epic = f.EpicFactory(project=m.blocked_project,
status__project=m.blocked_project)
m.blocked_epic = attach_epic_extra_info(Epic.objects.all()).get(id=m.blocked_epic.id)
m.public_us = f.UserStoryFactory(project=m.public_project)
m.private_us1 = f.UserStoryFactory(project=m.private_project1)
m.private_us2 = f.UserStoryFactory(project=m.private_project2)
m.blocked_us = f.UserStoryFactory(project=m.blocked_project)
m.public_related_us = f.RelatedUserStory(epic=m.public_epic, user_story=m.public_us)
m.private_related_us1 = f.RelatedUserStory(epic=m.private_epic1, user_story=m.private_us1)
m.private_related_us2 = f.RelatedUserStory(epic=m.private_epic2, user_story=m.private_us2)
m.blocked_related_us = f.RelatedUserStory(epic=m.blocked_epic, user_story=m.blocked_us)
m.public_project.default_epic_status = m.public_epic.status
m.public_project.save()
m.private_project1.default_epic_status = m.private_epic1.status
m.private_project1.save()
m.private_project2.default_epic_status = m.private_epic2.status
m.private_project2.save()
m.blocked_project.default_epic_status = m.blocked_epic.status
m.blocked_project.save()
return m
def test_epic_list(client, data):
url = reverse('epics-list')
response = client.get(url)
epics_data = json.loads(response.content.decode('utf-8'))
assert len(epics_data) == 2
assert response.status_code == 200
client.login(data.registered_user)
response = client.get(url)
epics_data = json.loads(response.content.decode('utf-8'))
assert len(epics_data) == 2
assert response.status_code == 200
client.login(data.project_member_with_perms)
response = client.get(url)
epics_data = json.loads(response.content.decode('utf-8'))
assert len(epics_data) == 4
assert response.status_code == 200
client.login(data.project_owner)
response = client.get(url)
epics_data = json.loads(response.content.decode('utf-8'))
assert len(epics_data) == 4
assert response.status_code == 200
def test_epic_retrieve(client, data):
public_url = reverse('epics-detail', kwargs={"pk": data.public_epic.pk})
private_url1 = reverse('epics-detail', kwargs={"pk": data.private_epic1.pk})
private_url2 = reverse('epics-detail', kwargs={"pk": data.private_epic2.pk})
blocked_url = reverse('epics-detail', kwargs={"pk": data.blocked_epic.pk})
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
results = helper_test_http_method(client, 'get', public_url, None, users)
assert results == [200, 200, 200, 200, 200]
results = helper_test_http_method(client, 'get', private_url1, None, users)
assert results == [200, 200, 200, 200, 200]
results = helper_test_http_method(client, 'get', private_url2, None, users)
assert results == [401, 403, 403, 200, 200]
results = helper_test_http_method(client, 'get', blocked_url, None, users)
assert results == [401, 403, 403, 200, 200]
def test_epic_create(client, data):
url = reverse('epics-list')
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
create_data = json.dumps({
"subject": "test",
"ref": 1,
"project": data.public_project.pk,
"status": data.public_project.epic_statuses.all()[0].pk,
})
results = helper_test_http_method(client, 'post', url, create_data, users)
assert results == [401, 403, 403, 201, 201]
create_data = json.dumps({
"subject": "test",
"ref": 2,
"project": data.private_project1.pk,
"status": data.private_project1.epic_statuses.all()[0].pk,
})
results = helper_test_http_method(client, 'post', url, create_data, users)
assert results == [401, 403, 403, 201, 201]
create_data = json.dumps({
"subject": "test",
"ref": 3,
"project": data.private_project2.pk,
"status": data.private_project2.epic_statuses.all()[0].pk,
})
results = helper_test_http_method(client, 'post', url, create_data, users)
assert results == [401, 403, 403, 201, 201]
create_data = json.dumps({
"subject": "test",
"ref": 3,
"project": data.blocked_project.pk,
"status": data.blocked_project.epic_statuses.all()[0].pk,
})
results = helper_test_http_method(client, 'post', url, create_data, users)
assert results == [401, 403, 403, 451, 451]
def test_epic_put_update(client, data):
public_url = reverse('epics-detail', kwargs={"pk": data.public_epic.pk})
private_url1 = reverse('epics-detail', kwargs={"pk": data.private_epic1.pk})
private_url2 = reverse('epics-detail', kwargs={"pk": data.private_epic2.pk})
blocked_url = reverse('epics-detail', kwargs={"pk": data.blocked_epic.pk})
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
with mock.patch.object(OCCResourceMixin, "_validate_and_update_version"):
epic_data = EpicSerializer(data.public_epic).data
epic_data["subject"] = "test"
epic_data = json.dumps(epic_data)
results = helper_test_http_method(client, 'put', public_url, epic_data, users)
assert results == [401, 403, 403, 200, 200]
epic_data = EpicSerializer(data.private_epic1).data
epic_data["subject"] = "test"
epic_data = json.dumps(epic_data)
results = helper_test_http_method(client, 'put', private_url1, epic_data, users)
assert results == [401, 403, 403, 200, 200]
epic_data = EpicSerializer(data.private_epic2).data
epic_data["subject"] = "test"
epic_data = json.dumps(epic_data)
results = helper_test_http_method(client, 'put', private_url2, epic_data, users)
assert results == [401, 403, 403, 200, 200]
epic_data = EpicSerializer(data.blocked_epic).data
epic_data["subject"] = "test"
epic_data = json.dumps(epic_data)
results = helper_test_http_method(client, 'put', blocked_url, epic_data, users)
assert results == [401, 403, 403, 451, 451]
def test_epic_put_comment(client, data):
public_url = reverse('epics-detail', kwargs={"pk": data.public_epic.pk})
private_url1 = reverse('epics-detail', kwargs={"pk": data.private_epic1.pk})
private_url2 = reverse('epics-detail', kwargs={"pk": data.private_epic2.pk})
blocked_url = reverse('epics-detail', kwargs={"pk": data.blocked_epic.pk})
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
with mock.patch.object(OCCResourceMixin, "_validate_and_update_version"):
epic_data = EpicSerializer(data.public_epic).data
epic_data["comment"] = "test comment"
epic_data = json.dumps(epic_data)
results = helper_test_http_method(client, 'put', public_url, epic_data, users)
assert results == [401, 403, 403, 200, 200]
epic_data = EpicSerializer(data.private_epic1).data
epic_data["comment"] = "test comment"
epic_data = json.dumps(epic_data)
results = helper_test_http_method(client, 'put', private_url1, epic_data, users)
assert results == [401, 403, 403, 200, 200]
epic_data = EpicSerializer(data.private_epic2).data
epic_data["comment"] = "test comment"
epic_data = json.dumps(epic_data)
results = helper_test_http_method(client, 'put', private_url2, epic_data, users)
assert results == [401, 403, 403, 200, 200]
epic_data = EpicSerializer(data.blocked_epic).data
epic_data["comment"] = "test comment"
epic_data = json.dumps(epic_data)
results = helper_test_http_method(client, 'put', blocked_url, epic_data, users)
assert results == [401, 403, 403, 451, 451]
def test_epic_put_update_and_comment(client, data):
public_url = reverse('epics-detail', kwargs={"pk": data.public_epic.pk})
private_url1 = reverse('epics-detail', kwargs={"pk": data.private_epic1.pk})
private_url2 = reverse('epics-detail', kwargs={"pk": data.private_epic2.pk})
blocked_url = reverse('epics-detail', kwargs={"pk": data.blocked_epic.pk})
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
with mock.patch.object(OCCResourceMixin, "_validate_and_update_version"):
epic_data = EpicSerializer(data.public_epic).data
epic_data["subject"] = "test"
epic_data["comment"] = "test comment"
epic_data = json.dumps(epic_data)
results = helper_test_http_method(client, 'put', public_url, epic_data, users)
assert results == [401, 403, 403, 200, 200]
epic_data = EpicSerializer(data.private_epic1).data
epic_data["subject"] = "test"
epic_data["comment"] = "test comment"
epic_data = json.dumps(epic_data)
results = helper_test_http_method(client, 'put', private_url1, epic_data, users)
assert results == [401, 403, 403, 200, 200]
epic_data = EpicSerializer(data.private_epic2).data
epic_data["subject"] = "test"
epic_data["comment"] = "test comment"
epic_data = json.dumps(epic_data)
results = helper_test_http_method(client, 'put', private_url2, epic_data, users)
assert results == [401, 403, 403, 200, 200]
epic_data = EpicSerializer(data.blocked_epic).data
epic_data["subject"] = "test"
epic_data["comment"] = "test comment"
epic_data = json.dumps(epic_data)
results = helper_test_http_method(client, 'put', blocked_url, epic_data, users)
assert results == [401, 403, 403, 451, 451]
def test_epic_put_update_with_project_change(client):
user1 = f.UserFactory.create()
user2 = f.UserFactory.create()
user3 = f.UserFactory.create()
user4 = f.UserFactory.create()
project1 = f.ProjectFactory()
project2 = f.ProjectFactory()
epic_status1 = f.EpicStatusFactory.create(project=project1)
epic_status2 = f.EpicStatusFactory.create(project=project2)
project1.default_epic_status = epic_status1
project2.default_epic_status = epic_status2
project1.save()
project2.save()
project1 = attach_project_extra_info(Project.objects.all()).get(id=project1.id)
project2 = attach_project_extra_info(Project.objects.all()).get(id=project2.id)
f.MembershipFactory(project=project1,
user=user1,
role__project=project1,
role__permissions=list(map(lambda x: x[0], MEMBERS_PERMISSIONS)))
f.MembershipFactory(project=project2,
user=user1,
role__project=project2,
role__permissions=list(map(lambda x: x[0], MEMBERS_PERMISSIONS)))
f.MembershipFactory(project=project1,
user=user2,
role__project=project1,
role__permissions=list(map(lambda x: x[0], MEMBERS_PERMISSIONS)))
f.MembershipFactory(project=project2,
user=user3,
role__project=project2,
role__permissions=list(map(lambda x: x[0], MEMBERS_PERMISSIONS)))
epic = f.EpicFactory.create(project=project1)
epic = attach_epic_extra_info(Epic.objects.all()).get(id=epic.id)
url = reverse('epics-detail', kwargs={"pk": epic.pk})
# Test user with permissions in both projects
client.login(user1)
epic_data = EpicSerializer(epic).data
epic_data["project"] = project2.id
epic_data = json.dumps(epic_data)
response = client.put(url, data=epic_data, content_type="application/json")
assert response.status_code == 200
epic.project = project1
epic.save()
# Test user with permissions in only origin project
client.login(user2)
epic_data = EpicSerializer(epic).data
epic_data["project"] = project2.id
epic_data = json.dumps(epic_data)
response = client.put(url, data=epic_data, content_type="application/json")
assert response.status_code == 403
epic.project = project1
epic.save()
# Test user with permissions in only destionation project
client.login(user3)
epic_data = EpicSerializer(epic).data
epic_data["project"] = project2.id
epic_data = json.dumps(epic_data)
response = client.put(url, data=epic_data, content_type="application/json")
assert response.status_code == 403
epic.project = project1
epic.save()
# Test user without permissions in the projects
client.login(user4)
epic_data = EpicSerializer(epic).data
epic_data["project"] = project2.id
epic_data = json.dumps(epic_data)
response = client.put(url, data=epic_data, content_type="application/json")
assert response.status_code == 403
epic.project = project1
epic.save()
def test_epic_patch_update(client, data):
public_url = reverse('epics-detail', kwargs={"pk": data.public_epic.pk})
private_url1 = reverse('epics-detail', kwargs={"pk": data.private_epic1.pk})
private_url2 = reverse('epics-detail', kwargs={"pk": data.private_epic2.pk})
blocked_url = reverse('epics-detail', kwargs={"pk": data.blocked_epic.pk})
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
with mock.patch.object(OCCResourceMixin, "_validate_and_update_version"):
patch_data = json.dumps({"subject": "test", "version": data.public_epic.version})
results = helper_test_http_method(client, 'patch', public_url, patch_data, users)
assert results == [401, 403, 403, 200, 200]
patch_data = json.dumps({"subject": "test", "version": data.private_epic1.version})
results = helper_test_http_method(client, 'patch', private_url1, patch_data, users)
assert results == [401, 403, 403, 200, 200]
patch_data = json.dumps({"subject": "test", "version": data.private_epic2.version})
results = helper_test_http_method(client, 'patch', private_url2, patch_data, users)
assert results == [401, 403, 403, 200, 200]
patch_data = json.dumps({"subject": "test", "version": data.blocked_epic.version})
results = helper_test_http_method(client, 'patch', blocked_url, patch_data, users)
assert results == [401, 403, 403, 451, 451]
def test_epic_patch_comment(client, data):
public_url = reverse('epics-detail', kwargs={"pk": data.public_epic.pk})
private_url1 = reverse('epics-detail', kwargs={"pk": data.private_epic1.pk})
private_url2 = reverse('epics-detail', kwargs={"pk": data.private_epic2.pk})
blocked_url = reverse('epics-detail', kwargs={"pk": data.blocked_epic.pk})
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
with mock.patch.object(OCCResourceMixin, "_validate_and_update_version"):
patch_data = json.dumps({"comment": "test comment", "version": data.public_epic.version})
results = helper_test_http_method(client, 'patch', public_url, patch_data, users)
assert results == [401, 200, 200, 200, 200]
patch_data = json.dumps({"comment": "test comment", "version": data.private_epic1.version})
results = helper_test_http_method(client, 'patch', private_url1, patch_data, users)
assert results == [401, 403, 403, 200, 200]
patch_data = json.dumps({"comment": "test comment", "version": data.private_epic2.version})
results = helper_test_http_method(client, 'patch', private_url2, patch_data, users)
assert results == [401, 403, 403, 200, 200]
patch_data = json.dumps({"comment": "test comment", "version": data.blocked_epic.version})
results = helper_test_http_method(client, 'patch', blocked_url, patch_data, users)
assert results == [401, 403, 403, 451, 451]
def test_epic_patch_update_and_comment(client, data):
public_url = reverse('epics-detail', kwargs={"pk": data.public_epic.pk})
private_url1 = reverse('epics-detail', kwargs={"pk": data.private_epic1.pk})
private_url2 = reverse('epics-detail', kwargs={"pk": data.private_epic2.pk})
blocked_url = reverse('epics-detail', kwargs={"pk": data.blocked_epic.pk})
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
with mock.patch.object(OCCResourceMixin, "_validate_and_update_version"):
patch_data = json.dumps({
"subject": "test",
"comment": "test comment",
"version": data.public_epic.version
})
results = helper_test_http_method(client, 'patch', public_url, patch_data, users)
assert results == [401, 403, 403, 200, 200]
patch_data = json.dumps({
"subject": "test",
"comment": "test comment",
"version": data.private_epic1.version
})
results = helper_test_http_method(client, 'patch', private_url1, patch_data, users)
assert results == [401, 403, 403, 200, 200]
patch_data = json.dumps({
"subject": "test",
"comment": "test comment",
"version": data.private_epic2.version
})
results = helper_test_http_method(client, 'patch', private_url2, patch_data, users)
assert results == [401, 403, 403, 200, 200]
patch_data = json.dumps({
"subject": "test",
"comment": "test comment",
"version": data.blocked_epic.version
})
results = helper_test_http_method(client, 'patch', blocked_url, patch_data, users)
assert results == [401, 403, 403, 451, 451]
def test_epic_delete(client, data):
public_url = reverse('epics-detail', kwargs={"pk": data.public_epic.pk})
private_url1 = reverse('epics-detail', kwargs={"pk": data.private_epic1.pk})
private_url2 = reverse('epics-detail', kwargs={"pk": data.private_epic2.pk})
blocked_url = reverse('epics-detail', kwargs={"pk": data.blocked_epic.pk})
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
]
results = helper_test_http_method(client, 'delete', public_url, None, users)
assert results == [401, 403, 403, 204]
results = helper_test_http_method(client, 'delete', private_url1, None, users)
assert results == [401, 403, 403, 204]
results = helper_test_http_method(client, 'delete', private_url2, None, users)
assert results == [401, 403, 403, 204]
results = helper_test_http_method(client, 'delete', blocked_url, None, users)
assert results == [401, 403, 403, 451]
def test_epic_action_bulk_create(client, data):
url = reverse('epics-bulk-create')
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
bulk_data = json.dumps({
"bulk_epics": "test1\ntest2",
"project_id": data.public_epic.project.pk,
})
results = helper_test_http_method(client, 'post', url, bulk_data, users)
assert results == [401, 403, 403, 200, 200]
bulk_data = json.dumps({
"bulk_epics": "test1\ntest2",
"project_id": data.private_epic1.project.pk,
})
results = helper_test_http_method(client, 'post', url, bulk_data, users)
assert results == [401, 403, 403, 200, 200]
bulk_data = json.dumps({
"bulk_epics": "test1\ntest2",
"project_id": data.private_epic2.project.pk,
})
results = helper_test_http_method(client, 'post', url, bulk_data, users)
assert results == [401, 403, 403, 200, 200]
bulk_data = json.dumps({
"bulk_epics": "test1\ntest2",
"project_id": data.blocked_epic.project.pk,
})
results = helper_test_http_method(client, 'post', url, bulk_data, users)
assert results == [401, 403, 403, 451, 451]
def test_epic_action_upvote(client, data):
public_url = reverse('epics-upvote', kwargs={"pk": data.public_epic.pk})
private_url1 = reverse('epics-upvote', kwargs={"pk": data.private_epic1.pk})
private_url2 = reverse('epics-upvote', kwargs={"pk": data.private_epic2.pk})
blocked_url = reverse('epics-upvote', kwargs={"pk": data.blocked_epic.pk})
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
results = helper_test_http_method(client, 'post', public_url, "", users)
assert results == [401, 200, 200, 200, 200]
results = helper_test_http_method(client, 'post', private_url1, "", users)
assert results == [401, 200, 200, 200, 200]
results = helper_test_http_method(client, 'post', private_url2, "", users)
assert results == [404, 404, 404, 200, 200]
results = helper_test_http_method(client, 'post', blocked_url, "", users)
assert results == [404, 404, 404, 451, 451]
def test_epic_action_downvote(client, data):
public_url = reverse('epics-downvote', kwargs={"pk": data.public_epic.pk})
private_url1 = reverse('epics-downvote', kwargs={"pk": data.private_epic1.pk})
private_url2 = reverse('epics-downvote', kwargs={"pk": data.private_epic2.pk})
blocked_url = reverse('epics-downvote', kwargs={"pk": data.blocked_epic.pk})
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
results = helper_test_http_method(client, 'post', public_url, "", users)
assert results == [401, 200, 200, 200, 200]
results = helper_test_http_method(client, 'post', private_url1, "", users)
assert results == [401, 200, 200, 200, 200]
results = helper_test_http_method(client, 'post', private_url2, "", users)
assert results == [404, 404, 404, 200, 200]
results = helper_test_http_method(client, 'post', blocked_url, "", users)
assert results == [404, 404, 404, 451, 451]
def test_epic_voters_list(client, data):
public_url = reverse('epic-voters-list', kwargs={"resource_id": data.public_epic.pk})
private_url1 = reverse('epic-voters-list', kwargs={"resource_id": data.private_epic1.pk})
private_url2 = reverse('epic-voters-list', kwargs={"resource_id": data.private_epic2.pk})
blocked_url = reverse('epic-voters-list', kwargs={"resource_id": data.blocked_epic.pk})
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
results = helper_test_http_method(client, 'get', public_url, None, users)
assert results == [200, 200, 200, 200, 200]
results = helper_test_http_method(client, 'get', private_url1, None, users)
assert results == [200, 200, 200, 200, 200]
results = helper_test_http_method(client, 'get', private_url2, None, users)
assert results == [401, 403, 403, 200, 200]
results = helper_test_http_method(client, 'get', blocked_url, None, users)
assert results == [401, 403, 403, 200, 200]
def test_epic_voters_retrieve(client, data):
add_vote(data.public_epic, data.project_owner)
public_url = reverse('epic-voters-detail', kwargs={"resource_id": data.public_epic.pk,
"pk": data.project_owner.pk})
add_vote(data.private_epic1, data.project_owner)
private_url1 = reverse('epic-voters-detail', kwargs={"resource_id": data.private_epic1.pk,
"pk": data.project_owner.pk})
add_vote(data.private_epic2, data.project_owner)
private_url2 = reverse('epic-voters-detail', kwargs={"resource_id": data.private_epic2.pk,
"pk": data.project_owner.pk})
add_vote(data.blocked_epic, data.project_owner)
blocked_url = reverse('epic-voters-detail', kwargs={"resource_id": data.blocked_epic.pk,
"pk": data.project_owner.pk})
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
results = helper_test_http_method(client, 'get', public_url, None, users)
assert results == [200, 200, 200, 200, 200]
results = helper_test_http_method(client, 'get', private_url1, None, users)
assert results == [200, 200, 200, 200, 200]
results = helper_test_http_method(client, 'get', private_url2, None, users)
assert results == [401, 403, 403, 200, 200]
results = helper_test_http_method(client, 'get', blocked_url, None, users)
assert results == [401, 403, 403, 200, 200]
def test_epic_action_watch(client, data):
public_url = reverse('epics-watch', kwargs={"pk": data.public_epic.pk})
private_url1 = reverse('epics-watch', kwargs={"pk": data.private_epic1.pk})
private_url2 = reverse('epics-watch', kwargs={"pk": data.private_epic2.pk})
blocked_url = reverse('epics-watch', kwargs={"pk": data.blocked_epic.pk})
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
results = helper_test_http_method(client, 'post', public_url, "", users)
assert results == [401, 200, 200, 200, 200]
results = helper_test_http_method(client, 'post', private_url1, "", users)
assert results == [401, 200, 200, 200, 200]
results = helper_test_http_method(client, 'post', private_url2, "", users)
assert results == [404, 404, 404, 200, 200]
results = helper_test_http_method(client, 'post', blocked_url, "", users)
assert results == [404, 404, 404, 451, 451]
def test_epic_action_unwatch(client, data):
public_url = reverse('epics-unwatch', kwargs={"pk": data.public_epic.pk})
private_url1 = reverse('epics-unwatch', kwargs={"pk": data.private_epic1.pk})
private_url2 = reverse('epics-unwatch', kwargs={"pk": data.private_epic2.pk})
blocked_url = reverse('epics-unwatch', kwargs={"pk": data.blocked_epic.pk})
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
results = helper_test_http_method(client, 'post', public_url, "", users)
assert results == [401, 200, 200, 200, 200]
results = helper_test_http_method(client, 'post', private_url1, "", users)
assert results == [401, 200, 200, 200, 200]
results = helper_test_http_method(client, 'post', private_url2, "", users)
assert results == [404, 404, 404, 200, 200]
results = helper_test_http_method(client, 'post', blocked_url, "", users)
assert results == [404, 404, 404, 451, 451]
def test_epic_watchers_list(client, data):
public_url = reverse('epic-watchers-list', kwargs={"resource_id": data.public_epic.pk})
private_url1 = reverse('epic-watchers-list', kwargs={"resource_id": data.private_epic1.pk})
private_url2 = reverse('epic-watchers-list', kwargs={"resource_id": data.private_epic2.pk})
blocked_url = reverse('epic-watchers-list', kwargs={"resource_id": data.blocked_epic.pk})
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
results = helper_test_http_method(client, 'get', public_url, None, users)
assert results == [200, 200, 200, 200, 200]
results = helper_test_http_method(client, 'get', private_url1, None, users)
assert results == [200, 200, 200, 200, 200]
results = helper_test_http_method(client, 'get', private_url2, None, users)
assert results == [401, 403, 403, 200, 200]
results = helper_test_http_method(client, 'get', blocked_url, None, users)
assert results == [401, 403, 403, 200, 200]
def test_epic_watchers_retrieve(client, data):
add_watcher(data.public_epic, data.project_owner)
public_url = reverse('epic-watchers-detail', kwargs={"resource_id": data.public_epic.pk,
"pk": data.project_owner.pk})
add_watcher(data.private_epic1, data.project_owner)
private_url1 = reverse('epic-watchers-detail', kwargs={"resource_id": data.private_epic1.pk,
"pk": data.project_owner.pk})
add_watcher(data.private_epic2, data.project_owner)
private_url2 = reverse('epic-watchers-detail', kwargs={"resource_id": data.private_epic2.pk,
"pk": data.project_owner.pk})
add_watcher(data.blocked_epic, data.project_owner)
blocked_url = reverse('epic-watchers-detail', kwargs={"resource_id": data.blocked_epic.pk,
"pk": data.project_owner.pk})
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
results = helper_test_http_method(client, 'get', public_url, None, users)
assert results == [200, 200, 200, 200, 200]
results = helper_test_http_method(client, 'get', private_url1, None, users)
assert results == [200, 200, 200, 200, 200]
results = helper_test_http_method(client, 'get', private_url2, None, users)
assert results == [401, 403, 403, 200, 200]
results = helper_test_http_method(client, 'get', blocked_url, None, users)
assert results == [401, 403, 403, 200, 200]
def test_epics_csv(client, data):
url = reverse('epics-csv')
csv_public_uuid = data.public_project.epics_csv_uuid
csv_private1_uuid = data.private_project1.epics_csv_uuid
csv_private2_uuid = data.private_project1.epics_csv_uuid
csv_blocked_uuid = data.blocked_project.epics_csv_uuid
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
results = helper_test_http_method(client, 'get', "{}?uuid={}".format(url, csv_public_uuid), None, users)
assert results == [200, 200, 200, 200, 200]
results = helper_test_http_method(client, 'get', "{}?uuid={}".format(url, csv_private1_uuid), None, users)
assert results == [200, 200, 200, 200, 200]
results = helper_test_http_method(client, 'get', "{}?uuid={}".format(url, csv_private2_uuid), None, users)
assert results == [200, 200, 200, 200, 200]
results = helper_test_http_method(client, 'get', "{}?uuid={}".format(url, csv_blocked_uuid), None, users)
assert results == [200, 200, 200, 200, 200]
| 42.120482 | 122 | 0.661145 | 0 | 0 | 0 | 0 | 6,313 | 0.164149 | 0 | 0 | 4,555 | 0.118438 |
bfe27fe9229cd07b93680b62a614ccd6ac91ab8a | 208 | py | Python | wmt-etl/config.py | ministryofjustice/hmpps-wmt | 66a85b029e2fc2b525b299f9e2ac1803b9cf8516 | [
"MIT"
] | 3 | 2017-02-27T17:09:20.000Z | 2017-03-27T08:23:50.000Z | wmt-etl/config.py | ministryofjustice/hmpps-wmt | 66a85b029e2fc2b525b299f9e2ac1803b9cf8516 | [
"MIT"
] | 3 | 2017-03-03T16:08:20.000Z | 2017-03-16T17:19:34.000Z | wmt-etl/config.py | ministryofjustice/noms-wmt-alpha | 66a85b029e2fc2b525b299f9e2ac1803b9cf8516 | [
"MIT"
] | 1 | 2021-04-11T06:54:44.000Z | 2021-04-11T06:54:44.000Z | import os
DB_SERVER = os.getenv('WMT_DB_SERVER', 'localhost')
DB_NAME = os.getenv('WMT_DB_NAME', 'wmt_db')
DB_USERNAME = os.getenv('WMT_DB_USERNAME', 'wmt')
DB_PASSWORD = os.getenv('WMT_DB_PASSWORD', 'wmt')
| 29.714286 | 51 | 0.735577 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 91 | 0.4375 |
bfe2baa9a9cbbb83b453c87c86d2b93ecfcf0cc5 | 484 | py | Python | apps/teacheres/adminx.py | ECNU-Studio/emoc | b11d1ebe91e2d9a4bc5b74ca7be3a13137f1c53c | [
"MIT"
] | 1 | 2018-03-10T08:50:18.000Z | 2018-03-10T08:50:18.000Z | apps/teacheres/adminx.py | ECNU-Studio/emoc | b11d1ebe91e2d9a4bc5b74ca7be3a13137f1c53c | [
"MIT"
] | 13 | 2018-04-28T02:33:21.000Z | 2018-05-04T09:05:38.000Z | apps/teacheres/adminx.py | ECNU-Studio/emoc | b11d1ebe91e2d9a4bc5b74ca7be3a13137f1c53c | [
"MIT"
] | null | null | null | # _*_ coding:utf-8 _*_
import xadmin
from .models import Teacheres
from courses.models import Courses
class AddCourses(object):
model = Courses
extra = 0
#培训师
class TeacheresAdmin(object):
list_display = ['name', 'username', 'email', 'phone', 'weixin', 'password']
search_fields = ['name']
list_filter = ['name']
# 列表页直接编辑
list_editable = ['name']
model_icon = 'fa fa-user'
inlines = [AddCourses]
xadmin.site.register(Teacheres, TeacheresAdmin) | 23.047619 | 79 | 0.677686 | 338 | 0.670635 | 0 | 0 | 0 | 0 | 0 | 0 | 133 | 0.263889 |
bfe336aabdbb7507441477f437b0274a9584341b | 1,710 | py | Python | app/jitsi/main.py | RibhiEl-Zaru/jitsi-party | d51b827304f97010dcf82673d124c8f68f98cb02 | [
"MIT"
] | null | null | null | app/jitsi/main.py | RibhiEl-Zaru/jitsi-party | d51b827304f97010dcf82673d124c8f68f98cb02 | [
"MIT"
] | null | null | null | app/jitsi/main.py | RibhiEl-Zaru/jitsi-party | d51b827304f97010dcf82673d124c8f68f98cb02 | [
"MIT"
] | null | null | null | import os
import json
from .models import User
from flask import Blueprint, send_from_directory, redirect, url_for, current_app, request, jsonify
main = Blueprint('main', __name__)
@main.route('/join', methods=['GET', 'POST'])
def join():
params = request.get_json()['params']
params['ip'] = compute_ip()
user = User.create(**params)
return jsonify(user.to_json())
@main.route('/rooms')
def get_rooms():
basedir = current_app.config.get('BASE_DIR')
rooms = json.load(open(os.path.join(basedir, 'rooms.json')))
adventures = json.load(open(os.path.join(basedir, 'adventures.json')))
for adventure in adventures.values():
for node_name, adventure_node in adventure.items():
rooms[node_name] = {
'name': adventure_node.get('name', ''),
'type': 'adventure',
'text': adventure_node['text'],
'buttons': adventure_node['buttons']
}
if adventure_node.get('map'):
rooms[node_name]['map'] = adventure_node['map']
return jsonify(rooms)
@main.route('/', defaults={'path': ''})
@main.route('/<path:path>')
def serve(path):
if path and not path.startswith('client'):
return redirect(url_for('main.serve'))
return send_from_directory(current_app.static_folder, 'index.html')
def compute_ip():
headers_list = request.headers.getlist("X-Forwarded-For")
# using the 0th index of headers_list is dangerous for stuff explained here: http://esd.io/blog/flask-apps-heroku-real-ip-spoofing.html
# TODO find a way to make this ALWAYS get client IP
user_ip = headers_list[0] if headers_list else request.remote_addr
return user_ip
| 36.382979 | 140 | 0.654386 | 0 | 0 | 0 | 0 | 1,151 | 0.673099 | 0 | 0 | 420 | 0.245614 |
bfe38fe20ac1fc18d323396f06464e2c237b6a00 | 2,728 | py | Python | easyai/data_loader/pc_cls/classify_pc_augment.py | lpj0822/image_point_cloud_det | 7b20e2f42f3f2ff4881485da58ad188a1f0d0e0f | [
"MIT"
] | 1 | 2020-09-05T09:18:56.000Z | 2020-09-05T09:18:56.000Z | easyai/data_loader/pc_cls/classify_pc_augment.py | lpj0822/image_point_cloud_det | 7b20e2f42f3f2ff4881485da58ad188a1f0d0e0f | [
"MIT"
] | 8 | 2020-04-20T02:18:55.000Z | 2022-03-12T00:24:50.000Z | easyai/data_loader/pc_cls/classify_pc_augment.py | lpj0822/image_point_cloud_det | 7b20e2f42f3f2ff4881485da58ad188a1f0d0e0f | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# Author:
import numpy as np
class ClassifyPointCloudAugment():
def __init__(self):
self.is_augment_rotate = True
self.is_augment_dropout = True
self.is_augment_scale = True
self.is_augment_shift = True
self.rotation = (0, 30)
def augment(self, src_cloud):
if self.is_augment_rotate:
src_cloud[:, :3] = self.random_rotate_point_cloud(src_cloud[:, :3])
if self.is_augment_dropout:
src_cloud = self.random_point_dropout(src_cloud)
if self.is_augment_scale:
src_cloud[:, :3] = self.random_scale_point_cloud(src_cloud[:, :3])
if self.is_augment_shift:
src_cloud[:, :3] = self.random_shift_point_cloud(src_cloud[:, :3])
return src_cloud
def random_rotate_point_cloud(self, src_cloud):
"""
Rotate the point cloud along up direction with certain angle.
:param src_cloud: Nx3 array, original batch of point clouds
:return: Nx3 array, rotated batch of point clouds
"""
rotation_angle = np.random.randint(self.rotation[0], self.rotation[1]) * np.pi / 180
cosval = np.cos(rotation_angle)
sinval = np.sin(rotation_angle)
rotation_matrix = np.array([[cosval, 0, sinval],
[0, 1, 0],
[-sinval, 0, cosval]])
rotated_data = np.dot(src_cloud, rotation_matrix)
return rotated_data
def random_point_dropout(self, src_cloud, max_dropout_ratio=0.875):
dropout_ratio = np.random.random() * max_dropout_ratio # 0~0.875
drop_idx = np.where(np.random.random((src_cloud.shape[0])) <= dropout_ratio)[0]
if len(drop_idx) > 0:
src_cloud[drop_idx, :] = src_cloud[0, :] # set to the first point
return src_cloud
def random_scale_point_cloud(self, src_cloud, scale_low=0.8, scale_high=1.25):
""" Randomly scale the point cloud. Scale is per point cloud.
Input:
Nx3 array, original batch of point clouds
Return:
Nx3 array, scaled batch of point clouds
"""
scales = np.random.uniform(scale_low, scale_high)
src_cloud *= scales
return src_cloud
def random_shift_point_cloud(self, src_cloud, shift_range=0.1):
""" Randomly shift point cloud. Shift is per point cloud.
Input:
Nx3 array, original batch of point clouds
Return:
Nx3 array, shifted batch of point clouds
"""
shifts = np.random.uniform(-shift_range, shift_range, 3)
src_cloud += shifts
return src_cloud
| 38.422535 | 92 | 0.610337 | 2,650 | 0.971408 | 0 | 0 | 0 | 0 | 0 | 0 | 742 | 0.271994 |
bfe3cef09bab657f1130a7fa99db7d1c27c3b2b2 | 2,436 | py | Python | Examples/Image/Detection/FastRCNN/BrainScript/B3_VisualizeOutputROIs.py | shyamalschandra/CNTK | 0e7a6cd4cc174eab28eaf2ffc660c6380b9e4e2d | [
"MIT"
] | 17,702 | 2016-01-25T14:03:01.000Z | 2019-05-06T09:23:41.000Z | Examples/Image/Detection/FastRCNN/BrainScript/B3_VisualizeOutputROIs.py | shyamalschandra/CNTK | 0e7a6cd4cc174eab28eaf2ffc660c6380b9e4e2d | [
"MIT"
] | 3,489 | 2016-01-25T13:32:09.000Z | 2019-05-03T11:29:15.000Z | Examples/Image/Detection/FastRCNN/BrainScript/B3_VisualizeOutputROIs.py | shyamalschandra/CNTK | 0e7a6cd4cc174eab28eaf2ffc660c6380b9e4e2d | [
"MIT"
] | 5,180 | 2016-01-25T14:02:12.000Z | 2019-05-06T04:24:28.000Z | # Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE.md file in the project root
# for full license information.
# ==============================================================================
from __future__ import print_function
import os, importlib, sys
from cntk_helpers import imWidthHeight, nnPredict, applyNonMaximaSuppression, makeDirectory, visualizeResults, imshow
import PARAMETERS
####################################
# Parameters
####################################
image_set = 'test' # 'train', 'test'
def visualize_output_rois(testing=False):
p = PARAMETERS.get_parameters_for_dataset()
# no need to change these parameters
boUseNonMaximaSurpression = True
visualizationDir = os.path.join(p.resultsDir, "visualizations")
cntkParsedOutputDir = os.path.join(p.cntkFilesDir, image_set + "_parsed")
makeDirectory(p.resultsDir)
makeDirectory(visualizationDir)
# loop over all images and visualize
imdb = p.imdbs[image_set]
for imgIndex in range(0, imdb.num_images):
imgPath = imdb.image_path_at(imgIndex)
imgWidth, imgHeight = imWidthHeight(imgPath)
# evaluate classifier for all rois
labels, scores = nnPredict(imgIndex, cntkParsedOutputDir, p.cntk_nrRois, len(p.classes), None)
# remove the zero-padded rois
scores = scores[:len(imdb.roidb[imgIndex]['boxes'])]
labels = labels[:len(imdb.roidb[imgIndex]['boxes'])]
# perform non-maxima surpression. note that the detected classes in the image is not affected by this.
nmsKeepIndices = []
if boUseNonMaximaSurpression:
nmsKeepIndices = applyNonMaximaSuppression(p.nmsThreshold, labels, scores, imdb.roidb[imgIndex]['boxes'])
print ("Non-maxima surpression kept {:4} of {:4} rois (nmsThreshold={})".format(len(nmsKeepIndices), len(labels), p.nmsThreshold))
# visualize results
imgDebug = visualizeResults(imgPath, labels, scores, imdb.roidb[imgIndex]['boxes'], p.cntk_padWidth, p.cntk_padHeight,
p.classes, nmsKeepIndices, boDrawNegativeRois=True)
if not testing:
imshow(imgDebug, waitDuration=0, maxDim = 800)
# imwrite(imgDebug, visualizationDir + "/" + str(imgIndex) + os.path.basename(imgPath))
print ("DONE.")
return True
if __name__=='__main__':
visualize_output_rois()
| 41.288136 | 142 | 0.654351 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 816 | 0.334975 |
bfe43a1f982ed9fd7da447c90684b4c6aa5f68e2 | 48 | py | Python | src/deepke/name_entity_re/__init__.py | johncolezhang/DeepKE | ea4552ec42cb003a835f00fc14fb454f9a9a7183 | [
"MIT"
] | 710 | 2021-08-01T16:43:59.000Z | 2022-03-31T08:39:17.000Z | src/deepke/name_entity_re/__init__.py | johncolezhang/DeepKE | ea4552ec42cb003a835f00fc14fb454f9a9a7183 | [
"MIT"
] | 66 | 2019-06-09T12:14:31.000Z | 2021-07-27T05:54:35.000Z | src/deepke/name_entity_re/__init__.py | johncolezhang/DeepKE | ea4552ec42cb003a835f00fc14fb454f9a9a7183 | [
"MIT"
] | 183 | 2018-09-07T06:57:13.000Z | 2021-08-01T08:50:15.000Z | from .standard import *
from .few_shot import *
| 16 | 23 | 0.75 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
bfe632653e44b0565da8ed9cef1a5b13a8e77385 | 3,756 | py | Python | tests/test_momentum.py | DanielDaCosta/optimization-algorithms | f06c67c218050926fd099ed255d73c19871b9a9a | [
"MIT"
] | 6 | 2020-05-03T00:55:45.000Z | 2021-06-26T15:34:57.000Z | tests/test_momentum.py | DanielDaCosta/optimization-algorithms | f06c67c218050926fd099ed255d73c19871b9a9a | [
"MIT"
] | null | null | null | tests/test_momentum.py | DanielDaCosta/optimization-algorithms | f06c67c218050926fd099ed255d73c19871b9a9a | [
"MIT"
] | 2 | 2020-08-03T17:53:48.000Z | 2020-12-24T22:05:27.000Z | import unittest
import numpy as np
from gradient_descent.Momentum import Momentum
class TestMomentumClass(unittest.TestCase):
def setUp(self):
"""Setting up requirements for test
Params:
None
Returns:
None
"""
def f(x):
"""Apply function to point x
Args:
x (float): point on x-axis
Returns:
(float): f(x)
"""
return 4*x**2
def df(x):
"""Apply function gradient to point x
Args:
x (float): point on x-axis
Returns:
(float): df(x)
"""
return 8*x
self.momentum = Momentum(f, df, x_t=10, learning_rate=0.1,
max_iterations=1000, tolerance=1e-6,
n_history_points=1000, beta_1=0.9)
def test_initizialization(self):
"""Testing Attributes initialization
Args:
None
Returns:
None
"""
self.assertEqual(self.momentum.x_t, 10,
'incorrect initial value of x_t')
self.assertEqual(self.momentum.learning_rate, 0.1,
'incorrect value of learning_rate')
self.assertEqual(self.momentum.max_iterations, 1000,
'incorrect value of max_iterations')
self.assertEqual(self.momentum.tolerance, 1e-6,
'incorrect value of tolerance')
self.assertEqual(self.momentum.n_iterations, 0,
'incorrect value of n_iterations')
np.testing.assert_array_equal(self.momentum.convergence_points,
np.array([None]*1000),
'incorrect inialization of array \
convergence_points')
self.assertEqual(self.momentum.beta_1, 0.9,
'incorrect initilialization of beta_1')
self.assertEqual(self.momentum._Momentum__v_t, 0,
'incorrect initialization of v_t')
self.assertEqual(self.momentum._Momentum__v_t_1, 0,
'incorrect initialization of v_t_1')
def test_update_parameter(self):
"""Testing _update_parameter method
Args:
None
Returns:
None
"""
v_t = self.momentum._Momentum__v_t
self.assertAlmostEqual(self.momentum._update_parameter(10),
self.momentum.learning_rate
* (self.momentum.beta_1*v_t
+ (1-self.momentum.beta_1)
* self.momentum.df(10)),
'incorrect return of _update_parameter')
v_t = self.momentum._Momentum__v_t
self.assertAlmostEqual(self.momentum._update_parameter(3),
self.momentum.learning_rate
* (self.momentum.beta_1*v_t
+ (1-self.momentum.beta_1)*self.momentum.df(3)),
'incorrect return of _update_parameter')
def test_optimization(self):
"""Test the optimization algorithm
Args:
None
Returns:
None
"""
self.assertLessEqual(self.momentum.fit(), 1e-4,
'Failed to converge to zero for the function: \
4x**2')
self.assertGreaterEqual(self.momentum.n_iterations, 1,
"n_iterations wasn't properly updated")
if __name__ == '__main__':
unittest.main()
| 33.238938 | 79 | 0.502929 | 3,621 | 0.964058 | 0 | 0 | 0 | 0 | 0 | 0 | 1,360 | 0.362087 |
bfe67e7a43775352603cca8735626be5e0303770 | 2,436 | py | Python | recipes/Python/299133_bubblebabble/recipe-299133.py | tdiprima/code | 61a74f5f93da087d27c70b2efe779ac6bd2a3b4f | [
"MIT"
] | 2,023 | 2017-07-29T09:34:46.000Z | 2022-03-24T08:00:45.000Z | recipes/Python/299133_bubblebabble/recipe-299133.py | unhacker/code | 73b09edc1b9850c557a79296655f140ce5e853db | [
"MIT"
] | 32 | 2017-09-02T17:20:08.000Z | 2022-02-11T17:49:37.000Z | recipes/Python/299133_bubblebabble/recipe-299133.py | unhacker/code | 73b09edc1b9850c557a79296655f140ce5e853db | [
"MIT"
] | 780 | 2017-07-28T19:23:28.000Z | 2022-03-25T20:39:41.000Z | #! /usr/bin/env python
"""Compute a (somewhat more) human readable format for message
digests. This is port of the perl module Digest-BubbleBabble-0.01
(http://search.cpan.org/~btrott/Digest-BubbleBabble-0.01/)
"""
vowels = "aeiouy"
consonants = "bcdfghklmnprstvzx"
def bubblebabble(digest):
"""compute bubblebabble representation of digest.
@param digest: raw string representation of digest (e.g. what md5.digest returns)
@type digest: str
@return: bubblebabble representation of digest
@rtype: str
"""
digest = [ord(x) for x in digest]
dlen = len(digest)
seed = 1
rounds = 1+dlen/2
retval = "x"
for i in range(rounds):
if i+1<rounds or dlen % 2:
idx0 = (((digest[2*i] >> 6) & 3) + seed) % 6
idx1 = (digest[2*i] >> 2) & 15;
idx2 = ((digest[2*i] & 3) + seed / 6) % 6;
retval += "%s%s%s" % (vowels[idx0], consonants[idx1], vowels[idx2])
if i+1 < rounds:
idx3 = (digest[2 * i + 1] >> 4) & 15;
idx4 = digest[2 * i + 1] & 15;
retval += "%s-%s" % (consonants[idx3], consonants[idx4])
seed = (seed * 5 + digest[2*i] * 7 +
digest[2*i+1]) % 36;
else:
idx0 = seed % 6;
idx1 = 16;
idx2 = seed / 6;
retval += "%s%s%s" % (vowels[idx0], consonants[idx1], vowels[idx2])
retval += "x"
return retval
def hexstring2string(s):
"""convert hex representation of digest back to raw digest"""
assert (len(s) % 2 == 0)
if s.startswith("0x") or s.startswith("0X"):
s = s[2:]
return "".join([chr(eval("0x%s" % s[i:i+2])) for i in range(0, len(s), 2)])
def _test():
tests = """432cc46b5c67c9adaabdcc6c69e23d6d xibod-sycik-rilak-lydap-tipur-tifyk-sipuv-dazok-tixox
5a1edbe07020525fd28cba1ea3b76694 xikic-vikyv-besed-begyh-zagim-sevic-vomer-lunon-gexex
1c453603cdc914c1f2eeb1abddae2e03 xelag-hatyb-fafes-nehys-cysyv-vasop-rylop-vorab-fuxux
df8ec33d78ae78280e10873f5e58d5ad xulom-vebyf-tevyp-vevid-mufic-bucef-zylyh-mehyp-tuxax
02b682a73739a9fb062370eaa8bcaec9 xebir-kybyp-latif-napoz-ricid-fusiv-popir-soras-nixyx"""
# ...as computed by perl
tests = [x.split()[:2] for x in tests.split("\n")]
for digest, expected in tests:
res=bubblebabble(hexstring2string(digest))
print digest, res, ("failure", "ok")[expected==res]
if __name__=="__main__":
_test()
| 33.369863 | 101 | 0.610016 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,075 | 0.441297 |
bfe7c1cb3ac794391c15b7a6667e0e0823022367 | 657 | py | Python | jq/jq/schema.py | Thousif-S/J-Q | 478d69181bf22f23b56c75d50c212d9d4aa0b1a9 | [
"MIT"
] | null | null | null | jq/jq/schema.py | Thousif-S/J-Q | 478d69181bf22f23b56c75d50c212d9d4aa0b1a9 | [
"MIT"
] | 18 | 2020-02-12T01:05:41.000Z | 2022-03-11T23:58:14.000Z | jq/jq/schema.py | Thousif-S/J-Q | 478d69181bf22f23b56c75d50c212d9d4aa0b1a9 | [
"MIT"
] | null | null | null | import graphene
import graphql_jwt
import works.schema
import users.schema
import works.schema_relay
import people.schema
class Query(
users.schema.Query,
works.schema.Query,
works.schema_relay.RelayQuery,
people.schema.Query,
graphene.ObjectType,
):
pass
class Mutation(
users.schema.Mutation,
works.schema.Mutation,
works.schema_relay.RelayMutation,
people.schema.Mutation,
graphene.ObjectType,
):
token_auth = graphql_jwt.ObtainJSONWebToken.Field()
verify_token = graphql_jwt.Verify.Field()
refresh_token = graphql_jwt.Refresh.Field()
schema = graphene.Schema(query=Query, mutation=Mutation)
| 20.53125 | 56 | 0.750381 | 470 | 0.715373 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
bfea18a49a910958928ffae603d2f21aece25fb7 | 7,497 | py | Python | t3f/batch_ops.py | KhrulkovV/t3f | 6fb7ac695d55c302dbd6930baec8c61b5aac54e3 | [
"MIT"
] | 1 | 2021-04-25T07:10:44.000Z | 2021-04-25T07:10:44.000Z | t3f/batch_ops.py | KhrulkovV/t3f | 6fb7ac695d55c302dbd6930baec8c61b5aac54e3 | [
"MIT"
] | null | null | null | t3f/batch_ops.py | KhrulkovV/t3f | 6fb7ac695d55c302dbd6930baec8c61b5aac54e3 | [
"MIT"
] | null | null | null | import tensorflow as tf
from t3f.tensor_train_base import TensorTrainBase
from t3f.tensor_train_batch import TensorTrainBatch
from t3f import ops
def concat_along_batch_dim(tt_list):
"""Concat all TensorTrainBatch objects along batch dimension.
Args:
tt_list: a list of TensorTrainBatch objects.
Returns:
TensorTrainBatch
"""
ndims = tt_list[0].ndims()
if isinstance(tt_list, TensorTrainBase):
# Not a list but just one element, nothing to concat.
return tt_list
for batch_idx in range(len(tt_list)):
if not isinstance(tt_list[batch_idx], TensorTrainBatch):
raise ValueError('All objects in the list should be TTBatch objects, got '
'%s' % tt_list[batch_idx])
for batch_idx in range(1, len(tt_list)):
if tt_list[batch_idx].get_raw_shape() != tt_list[0].get_raw_shape():
raise ValueError('Shapes of all TT-batch objects should coincide, got %s '
'and %s' % (tt_list[0].get_raw_shape(),
tt_list[batch_idx].get_raw_shape()))
if tt_list[batch_idx].get_tt_ranks() != tt_list[0].get_tt_ranks():
raise ValueError('TT-ranks of all TT-batch objects should coincide, got '
'%s and %s' % (tt_list[0].get_tt_ranks(),
tt_list[batch_idx].get_tt_ranks()))
res_cores = []
for core_idx in range(ndims):
curr_core = tf.concat([tt.tt_cores[core_idx] for tt in tt_list], axis=0)
res_cores.append(curr_core)
try:
batch_size = sum([tt.batch_size for tt in tt_list])
except TypeError:
# The batch sizes are not defined and you can't sum Nones.
batch_size = None
return TensorTrainBatch(res_cores, tt_list[0].get_raw_shape(),
tt_list[0].get_tt_ranks(), batch_size)
def multiply_along_batch_dim(batch_tt, weights):
"""Multiply each TensorTrain in a batch by a number.
Args:
batch_tt: TensorTrainBatch object, TT-matrices or TT-tensors.
weights: 1-D tf.Tensor (or something convertible to it like np.array) of size
tt.batch_sie with weights.
Returns:
TensorTrainBatch
"""
weights = tf.convert_to_tensor(weights)
tt_cores = list(batch_tt.tt_cores)
if batch_tt.is_tt_matrix():
weights = weights[:, tf.newaxis, tf.newaxis, tf.newaxis, tf.newaxis]
else:
weights = weights[:, tf.newaxis, tf.newaxis, tf.newaxis]
tt_cores[0] = weights * tt_cores[0]
out_shape = batch_tt.get_raw_shape()
out_ranks = batch_tt.get_tt_ranks()
out_batch_size = batch_tt.batch_size
return TensorTrainBatch(tt_cores, out_shape, out_ranks, out_batch_size)
def gram_matrix(tt_vectors, matrix=None):
"""Computes Gramian matrix of a batch of TT-vecors.
If matrix is None, computes
res[i, j] = t3f.flat_inner(tt_vectors[i], tt_vectors[j]).
If matrix is present, computes
res[i, j] = t3f.flat_inner(tt_vectors[i], t3f.matmul(matrix, tt_vectors[j]))
or more shortly
res[i, j] = tt_vectors[i]^T * matrix * tt_vectors[j]
but is more efficient.
Args:
tt_vectors: TensorTrainBatch.
matrix: None, or TensorTrain matrix.
Returns:
tf.tensor with the Gram matrix.
"""
return pairwise_flat_inner(tt_vectors, tt_vectors, matrix)
def pairwise_flat_inner(tt_1, tt_2, matrix=None):
"""Computes all scalar products between two batches of TT-objects.
If matrix is None, computes
res[i, j] = t3f.flat_inner(tt_1[i], tt_2[j]).
If matrix is present, computes
res[i, j] = t3f.flat_inner(tt_1[i], t3f.matmul(matrix, tt_2[j]))
or more shortly
res[i, j] = tt_1[i]^T * matrix * tt_2[j]
but is more efficient.
Args:
tt_1: TensorTrainBatch.
tt_2: TensorTrainBatch.
matrix: None, or TensorTrain matrix.
Returns:
tf.tensor with the matrix of pairwise scalar products (flat inners).
"""
ndims = tt_1.ndims()
if matrix is None:
curr_core_1 = tt_1.tt_cores[0]
curr_core_2 = tt_2.tt_cores[0]
mode_string = 'ij' if tt_1.is_tt_matrix() else 'i'
einsum_str = 'pa{0}b,qc{0}d->pqbd'.format(mode_string)
res = tf.einsum(einsum_str, curr_core_1, curr_core_2)
for core_idx in range(1, ndims):
curr_core_1 = tt_1.tt_cores[core_idx]
curr_core_2 = tt_2.tt_cores[core_idx]
einsum_str = 'pqac,pa{0}b,qc{0}d->pqbd'.format(mode_string)
res = tf.einsum(einsum_str, res, curr_core_1, curr_core_2)
else:
# res[i, j] = tt_1[i] ^ T * matrix * tt_2[j]
if not tt_1.is_tt_matrix() or not tt_2.is_tt_matrix() or not matrix.is_tt_matrix():
raise ValueError('When passing three arguments to pairwise_flat_inner, '
'the first 2 of them should be TT-vecors and the last '
'should be a TT-matrix. Got %s, %s, and %s instead.' %
(tt_1, tt_2, matrix))
matrix_shape = matrix.get_raw_shape()
if not tt_1.get_raw_shape()[0].is_compatible_with(matrix_shape[0]):
raise ValueError('The shape of the first argument should be compatible '
'with the shape of the TT-matrix, that is it should be '
'possible to do the following matmul: '
'transpose(tt_1) * matrix. Got the first argument '
'"%s" and matrix "%s"' % (tt_1, matrix))
if not tt_2.get_raw_shape()[0].is_compatible_with(matrix_shape[1]):
raise ValueError('The shape of the second argument should be compatible '
'with the shape of the TT-matrix, that is it should be '
'possible to do the following matmul: '
'matrix * tt_2. Got the second argument '
'"%s" and matrix "%s"' % (tt_2, matrix))
vectors_1_shape = tt_1.get_shape()
if vectors_1_shape[2] == 1 and vectors_1_shape[1] != 1:
# TODO: not very efficient, better to use different order in einsum.
tt_1 = ops.transpose(tt_1)
vectors_1_shape = tt_1.get_shape()
vectors_2_shape = tt_2.get_shape()
if vectors_2_shape[2] == 1 and vectors_2_shape[1] != 1:
# TODO: not very efficient, better to use different order in einsum.
tt_2 = ops.transpose(tt_2)
vectors_2_shape = tt_2.get_shape()
if vectors_1_shape[1] != 1:
# TODO: do something so that in case the shape is undefined on compilation
# it still works.
raise ValueError('The tt_vectors_1 argument should be vectors (not '
'matrices) with shape defined on compilation.')
if vectors_2_shape[1] != 1:
# TODO: do something so that in case the shape is undefined on compilation
# it still works.
raise ValueError('The tt_vectors_2 argument should be vectors (not '
'matrices) with shape defined on compilation.')
curr_core_1 = tt_1.tt_cores[0]
curr_core_2 = tt_2.tt_cores[0]
curr_matrix_core = matrix.tt_cores[0]
# We enumerate the dummy dimension (that takes 1 value) with `k`.
res = tf.einsum('pakib,cijd,qekjf->pqbdf', curr_core_1, curr_matrix_core,
curr_core_2)
for core_idx in range(1, ndims):
curr_core_1 = tt_1.tt_cores[core_idx]
curr_core_2 = tt_2.tt_cores[core_idx]
curr_matrix_core = matrix.tt_cores[core_idx]
res = tf.einsum('pqace,pakib,cijd,qekjf->pqbdf', res, curr_core_1,
curr_matrix_core, curr_core_2)
# Squeeze to make the result of size batch_size x batch_size instead of
# batch_size x batch_size x 1 x 1.
return tf.squeeze(res)
| 40.306452 | 87 | 0.657596 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,229 | 0.430706 |
bfea29daaa8ed7a57cb3e9e6b1260c8d7dea0cc2 | 3,496 | py | Python | examples/python/sso/esi_oauth_native.py | Dusty-Meg/esi-docs | 43d2c07a371ea5b71ed51ba4e5be7dabda18ec3c | [
"MIT"
] | 130 | 2018-10-01T12:33:39.000Z | 2022-03-16T07:26:30.000Z | examples/python/sso/esi_oauth_native.py | Dusty-Meg/esi-docs | 43d2c07a371ea5b71ed51ba4e5be7dabda18ec3c | [
"MIT"
] | 30 | 2018-10-01T14:51:43.000Z | 2022-03-29T15:50:50.000Z | examples/python/sso/esi_oauth_native.py | Dusty-Meg/esi-docs | 43d2c07a371ea5b71ed51ba4e5be7dabda18ec3c | [
"MIT"
] | 59 | 2018-09-28T17:59:19.000Z | 2022-03-29T14:08:24.000Z | """ Python 3 native (desktop/mobile) OAuth 2.0 example.
This example can be run from the command line and will show you how the
OAuth 2.0 flow should be handled if you are a web based application.
Prerequisites:
* Create an SSO application at developers.eveonline.com with the scope
"esi-characters.read_blueprints.v1" and the callback URL
"https://localhost/callback/". Note: never use localhost as a callback
in released applications.
* Have a Python 3 environment available to you (possibly by using a
virtual environment: https://virtualenv.pypa.io/en/stable/).
* Run pip install -r requirements.txt with this directory as your root.
To run this example, make sure you have completed the prerequisites and then
run the following command from this directory as the root:
>>> python esi_oauth_native.py
then follow the prompts.
"""
import base64
import hashlib
import secrets
from shared_flow import print_auth_url
from shared_flow import send_token_request
from shared_flow import handle_sso_token_response
def main():
""" Takes you through a local example of the OAuth 2.0 native flow."""
print("This program will take you through an example OAuth 2.0 flow "
"that you should be using if you are building a desktop or mobile "
"application. Follow the prompts and enter the info asked for.")
# Generate the PKCE code challenge
random = base64.urlsafe_b64encode(secrets.token_bytes(32))
m = hashlib.sha256()
m.update(random)
d = m.digest()
code_challenge = base64.urlsafe_b64encode(d).decode().replace("=", "")
client_id = input("Copy your SSO application's client ID and enter it "
"here: ")
print("\nBecause this is a desktop/mobile application, you should use "
"the PKCE protocol when contacting the EVE SSO. In this case, that "
"means sending a base 64 encoded sha256 hashed 32 byte string "
"called a code challenge. This 32 byte string should be ephemeral "
"and never stored anywhere. The code challenge string generated for "
"this program is {} and the hashed code challenge is {}. \nNotice "
"that the query parameter of the following URL will contain this "
"code challenge.".format(random, code_challenge))
input("\nPress any key to continue:")
print_auth_url(client_id, code_challenge=code_challenge)
auth_code = input("Copy the \"code\" query parameter and enter it here: ")
code_verifier = random
form_values = {
"grant_type": "authorization_code",
"client_id": client_id,
"code": auth_code,
"code_verifier": code_verifier
}
print("\nBecause this is using PCKE protocol, your application never has "
"to share its secret key with the SSO. Instead, this next request "
"will send the base 64 encoded unhashed value of the code "
"challenge, called the code verifier, in the request body so EVE's "
"SSO knows your application was not tampered with since the start "
"of this process. The code verifier generated for this program is "
"{} derived from the raw string {}".format(code_verifier, random))
input("\nPress any key to continue:")
res = send_token_request(form_values)
handle_sso_token_response(res)
if __name__ == "__main__":
main()
| 39.280899 | 80 | 0.678776 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,360 | 0.675057 |
bfea6d4f7615680bd982d58e764a76dea6be50fb | 1,763 | py | Python | queue_services/entity-emailer/tests/unit/email_processors/test_affiliation_notification.py | argush3/lear | 804820ea93a9ca44d1a474ce7a903bb0a808aacb | [
"Apache-2.0"
] | 8 | 2019-06-19T16:16:15.000Z | 2021-08-28T23:56:40.000Z | queue_services/entity-emailer/tests/unit/email_processors/test_affiliation_notification.py | argush3/lear | 804820ea93a9ca44d1a474ce7a903bb0a808aacb | [
"Apache-2.0"
] | 796 | 2019-03-07T19:25:50.000Z | 2022-03-31T20:32:57.000Z | queue_services/entity-emailer/tests/unit/email_processors/test_affiliation_notification.py | argush3/lear | 804820ea93a9ca44d1a474ce7a903bb0a808aacb | [
"Apache-2.0"
] | 82 | 2019-01-30T20:06:14.000Z | 2022-03-29T20:38:31.000Z | # Copyright © 2021 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The Unit Tests for the Affiliation email processor."""
from unittest.mock import patch
from entity_emailer.email_processors import affiliation_notification
from tests.unit import prep_alteration_filing
def test_notifications(app, session):
"""Assert Affiliation notification is created."""
subject = 'How to use BCRegistry.ca'
company_name = 'Company Name'
testing_email = 'test@test.com'
token = 'token'
filing = prep_alteration_filing(session, 'BC1234567', 'DRAFT', company_name)
# test processor
with patch.object(affiliation_notification, 'get_recipients', return_value=testing_email):
email = affiliation_notification.process(
{
'data': {
'filing': {
'header': {'filingId': filing.id}
}
},
'type': 'bc.registry.affiliation',
'identifier': 'BC1234567'
},
token
)
assert email['content']['subject'] == company_name + ' - ' + subject
assert testing_email in email['recipients']
assert email['content']['body']
| 36.729167 | 94 | 0.653999 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 936 | 0.530612 |
bfebea815f2a5344d8b51e99f3afdbd91a02e2b8 | 756 | py | Python | examples/resourcetracking/client.py | brubbel/Pyro4 | 791f5aff6e0c89e74264843defdd694cdaf99cc5 | [
"MIT"
] | 638 | 2015-01-04T14:59:55.000Z | 2022-03-29T02:28:39.000Z | examples/resourcetracking/client.py | brubbel/Pyro4 | 791f5aff6e0c89e74264843defdd694cdaf99cc5 | [
"MIT"
] | 173 | 2015-01-05T17:29:19.000Z | 2021-12-25T01:47:07.000Z | examples/resourcetracking/client.py | brubbel/Pyro4 | 791f5aff6e0c89e74264843defdd694cdaf99cc5 | [
"MIT"
] | 103 | 2015-01-10T10:00:08.000Z | 2022-03-06T14:19:20.000Z | from __future__ import print_function
import sys
import random
import Pyro4
if sys.version_info < (3, 0):
input = raw_input
uri = input("Enter the URI of the server object: ")
with Pyro4.Proxy(uri) as proxy:
print("currently allocated resources:", proxy.list())
name1 = hex(random.randint(0, 999999))[-4:]
name2 = hex(random.randint(0, 999999))[-4:]
print("allocating resource...", name1)
proxy.allocate(name1)
print("allocating resource...", name2)
proxy.allocate(name2)
input("\nhit Enter now to continue normally or ^C/break to abort the connection forcefully:")
print("free resources normally...")
proxy.free(name1)
proxy.free(name2)
print("allocated resources:", proxy.list())
print("done.")
| 26.068966 | 97 | 0.685185 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 261 | 0.345238 |
bfecc16ed4f1a523424ef2baa4157fca46f28b44 | 259 | py | Python | servicelayerdms/exceptions.py | DimensionDataCBUSydney/servicelayer-dms-python | 9fd28223fb7a2a2bfb144d5047d3c505b5175ae4 | [
"Apache-2.0"
] | null | null | null | servicelayerdms/exceptions.py | DimensionDataCBUSydney/servicelayer-dms-python | 9fd28223fb7a2a2bfb144d5047d3c505b5175ae4 | [
"Apache-2.0"
] | null | null | null | servicelayerdms/exceptions.py | DimensionDataCBUSydney/servicelayer-dms-python | 9fd28223fb7a2a2bfb144d5047d3c505b5175ae4 | [
"Apache-2.0"
] | null | null | null | class DmsRequestError(Exception):
def __init__(self, error_message, status_code):
self.message = "Error requesting DMS API %s with code %s" \
% (error_message, status_code)
super(DmsRequestError, self).__init__(self.message)
| 43.166667 | 67 | 0.683398 | 258 | 0.996139 | 0 | 0 | 0 | 0 | 0 | 0 | 42 | 0.162162 |
bfedec9341ad808415e8f390392546b8caf1b03c | 1,843 | py | Python | config_manager.py | jbytes1027/notify-mirror | b4a6bb94578dd4c9130731f153f901868f26cded | [
"CC0-1.0"
] | null | null | null | config_manager.py | jbytes1027/notify-mirror | b4a6bb94578dd4c9130731f153f901868f26cded | [
"CC0-1.0"
] | null | null | null | config_manager.py | jbytes1027/notify-mirror | b4a6bb94578dd4c9130731f153f901868f26cded | [
"CC0-1.0"
] | null | null | null | import pathlib
import configparser
class ConfigManager:
DEFAULT_CONFIG_PATH = "~/.config/notify-sync.ini"
SETTING_NOTIFICATION_ICON = "icon"
SETTING_NOTIFICATION_TIMEOUT = "timeout" # in ms
# SETTING_NOTIFICATION_URGENCY = "urgency" # 0,1,2 low, avg, urgent
SETTING_NOTIFICATION_EXEC = "exec_on_click"
def __init__(self, config_path=DEFAULT_CONFIG_PATH):
self.config_path = config_path
self.config = configparser.ConfigParser()
path = pathlib.PosixPath(self.config_path).expanduser()
if path.exists():
# read config file
with open(path, "r") as config_file:
self.config.read_file(config_file)
else:
if not path.parent.exists():
# create config dir
path.parent.mkdir(parents=True)
# set default settings
self.config["GENERAL"] = {
"api_key": "",
"notify_on_error": "no",
"notify_on_connection_changed": "no",
}
self.config["DEFAULT NOTIFICATION"] = {
"icon": "given",
"timeout": "default",
# "urgency": "default",
"exec_on_click": "",
}
# create config file
with open(path, "w") as config_file:
self.config.write(config_file)
def get_notification_setting(self, android_notification, setting):
if (
android_notification.package in self.config
and setting in self.config[android_notification.package]
):
return self.config[android_notification.package][setting]
else:
return self.config["DEFAULT NOTIFICATION"][setting]
def get_api_key(self):
return self.config["GENERAL"]["api_key"]
| 32.910714 | 74 | 0.580033 | 1,805 | 0.979381 | 0 | 0 | 0 | 0 | 0 | 0 | 427 | 0.231687 |
bfeeaef350523448c6cf158714c62a0d2c908669 | 714 | py | Python | ass_tag_parser/draw_struct.py | bubblesub/ass_tag_parser | 89bd9ac47aa51a954604c41c6e633c938b2a1d62 | [
"MIT"
] | 10 | 2020-04-14T15:58:25.000Z | 2022-03-26T05:38:09.000Z | ass_tag_parser/draw_struct.py | rr-/ass_tag_parser | a758a3871b8904879a2588162335aa61863b370e | [
"MIT"
] | 4 | 2019-11-07T15:03:39.000Z | 2021-08-04T21:11:20.000Z | ass_tag_parser/draw_struct.py | rr-/ass_tag_parser | a758a3871b8904879a2588162335aa61863b370e | [
"MIT"
] | 1 | 2019-11-07T14:30:56.000Z | 2019-11-07T14:30:56.000Z | from dataclasses import dataclass
from typing import Optional
from ass_tag_parser.common import Meta
@dataclass
class AssDrawPoint:
x: float
y: float
class AssDrawCmd:
meta: Optional[Meta] = None
@dataclass
class AssDrawCmdMove(AssDrawCmd):
pos: AssDrawPoint
close: bool
@dataclass
class AssDrawCmdLine(AssDrawCmd):
points: list[AssDrawPoint]
@dataclass
class AssDrawCmdBezier(AssDrawCmd):
points: tuple[AssDrawPoint, AssDrawPoint, AssDrawPoint]
@dataclass
class AssDrawCmdSpline(AssDrawCmd):
points: list[AssDrawPoint]
@dataclass
class AssDrawCmdExtendSpline(AssDrawCmd):
points: list[AssDrawPoint]
@dataclass
class AssDrawCmdCloseSpline(AssDrawCmd):
pass
| 15.521739 | 59 | 0.768908 | 511 | 0.715686 | 0 | 0 | 539 | 0.754902 | 0 | 0 | 0 | 0 |
bfef9d0f02f12a417622fbc30f923a0fe176cb9c | 166 | py | Python | scripts/src/keyword.py | Artie18/goto | b31ad5044a894e33ad330fb0d01fa9e116cfddeb | [
"Apache-2.0"
] | null | null | null | scripts/src/keyword.py | Artie18/goto | b31ad5044a894e33ad330fb0d01fa9e116cfddeb | [
"Apache-2.0"
] | null | null | null | scripts/src/keyword.py | Artie18/goto | b31ad5044a894e33ad330fb0d01fa9e116cfddeb | [
"Apache-2.0"
] | null | null | null | class Keyword:
def __init__(self, keyword):
if keyword in ['add', 'find']:
self.value = keyword
else:
self.value = 'find'
| 23.714286 | 38 | 0.512048 | 165 | 0.993976 | 0 | 0 | 0 | 0 | 0 | 0 | 17 | 0.10241 |
bfefe39bcfdb486b0d377809401f5593f41268c6 | 4,710 | py | Python | feedparser/parsers/json.py | briangedev/feedparser | de4f786b0c726380af825610da833e0dc76cf282 | [
"BSD-2-Clause"
] | 1 | 2022-01-26T06:45:59.000Z | 2022-01-26T06:45:59.000Z | feedparser/parsers/json.py | briangedev/feedparser | de4f786b0c726380af825610da833e0dc76cf282 | [
"BSD-2-Clause"
] | null | null | null | feedparser/parsers/json.py | briangedev/feedparser | de4f786b0c726380af825610da833e0dc76cf282 | [
"BSD-2-Clause"
] | null | null | null | # The JSON feed parser
# Copyright 2017 Beat Bolli
# All rights reserved.
#
# This file is a part of feedparser.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import json
from ..datetimes import _parse_date
from ..sanitizer import _sanitize_html
from ..util import FeedParserDict
class _JsonFeedParser(object):
VERSIONS = {
'https://jsonfeed.org/version/1': 'json1',
'https://jsonfeed.org/version/1.1': 'json11',
}
FEED_FIELDS = (
('title', 'title'),
('icon', 'image'),
('home_page_url', 'link'),
('description', 'description'),
)
ITEM_FIELDS = (
('title', 'title'),
('id', 'guid'),
('url', 'link'),
('summary', 'summary'),
('external_url', 'source'),
)
def __init__(self, baseuri=None, baselang=None, encoding=None):
self.baseuri = baseuri or ''
self.lang = baselang or None
self.encoding = encoding or 'utf-8' # character encoding
self.version = None
self.feeddata = FeedParserDict()
self.namespacesInUse = []
def feed(self, data):
data = json.loads(data)
v = data.get('version', '')
try:
self.version = self.VERSIONS[v]
except KeyError:
raise ValueError("Unrecognized JSONFeed version '%s'" % v)
for src, dst in self.FEED_FIELDS:
if src in data:
self.feeddata[dst] = data[src]
if 'author' in data:
self.parse_author(data['author'], self.feeddata)
# TODO: hubs; expired has no RSS equivalent
self.entries = [self.parse_entry(e) for e in data['items']]
def parse_entry(self, e):
entry = FeedParserDict()
for src, dst in self.ITEM_FIELDS:
if src in e:
entry[dst] = e[src]
if 'content_text' in e:
entry['content'] = c = FeedParserDict()
c['value'] = e['content_text']
c['type'] = 'text'
elif 'content_html' in e:
entry['content'] = c = FeedParserDict()
c['value'] = _sanitize_html(e['content_html'],
self.encoding, 'application/json')
c['type'] = 'html'
if 'date_published' in e:
entry['published'] = e['date_published']
entry['published_parsed'] = _parse_date(e['date_published'])
if 'date_updated' in e:
entry['updated'] = e['date_modified']
entry['updated_parsed'] = _parse_date(e['date_modified'])
if 'tags' in e:
entry['category'] = e['tags']
if 'author' in e:
self.parse_author(e['author'], entry)
if 'attachments' in e:
entry['enclosures'] = [self.parse_attachment(a) for a in e['attachments']]
return entry
def parse_author(self, parent, dest):
dest['author_detail'] = detail = FeedParserDict()
if 'name' in parent:
dest['author'] = detail['name'] = parent['name']
if 'url' in parent:
if parent['url'].startswith('mailto:'):
detail['email'] = parent['url'][7:]
else:
detail['href'] = parent['url']
def parse_attachment(self, attachment):
enc = FeedParserDict()
enc['href'] = attachment['url']
enc['type'] = attachment['mime_type']
if 'size_in_bytes' in attachment:
enc['length'] = attachment['size_in_bytes']
return enc
| 35.681818 | 86 | 0.614225 | 3,193 | 0.677919 | 0 | 0 | 0 | 0 | 0 | 0 | 2,238 | 0.475159 |
bff00f35cfa0182f724bf836ca255a2bb5de2883 | 4,211 | py | Python | aux/testUnicode.py | AlainLich/logtools | 584e575d25f0ebcd7a51cc6d5aefb530f80f6d22 | [
"Apache-2.0"
] | 2 | 2021-06-08T21:48:18.000Z | 2022-03-09T05:50:13.000Z | aux/testUnicode.py | AlainLich/logtools | 584e575d25f0ebcd7a51cc6d5aefb530f80f6d22 | [
"Apache-2.0"
] | null | null | null | aux/testUnicode.py | AlainLich/logtools | 584e575d25f0ebcd7a51cc6d5aefb530f80f6d22 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# -*- mode: Python -*-
#
# (C) Alain Lichnewsky, 2021
#
import os
import sys
import re
import unicodedata
import unittest
from hypothesis import given, assume, settings, HealthCheck
import hypothesis.strategies as st
# using a try block so that this makes sense if exported to logtools/aux
# and used as a test case in this settings
try:
import logtools.utils
except Exception as err:
# This is related to the development environment -------------------
from pathlib import Path
home = str(Path.home())
path = [home + "/src/logtools"]
if 'VENVPY' in os.environ:
path.extend(os.environ['VENVPY'].split(":"))
path.extend(sys.path)
sys.path = path
# END related to the development environment -------------------
import logtools.utils
# enables to modify some globals
MAX_SAMPLES = None
if __name__ == "__main__":
if "-v" in sys.argv:
MAX_SAMPLES = 50
settings.register_profile("default", suppress_health_check=(HealthCheck.too_slow,))
settings.load_profile(os.getenv(u'HYPOTHESIS_PROFILE', 'default'))
if MAX_SAMPLES is None:
MAX_SAMPLES = 5
#Unicode alphabet
ALPHABET_UCWild = st.characters(
blacklist_characters="/?#", blacklist_categories=("Cs",))
ALPHABET_UCTame = st.characters(
blacklist_characters="/?#", blacklist_categories=("Cs",),
max_codepoint=0xFD, min_codepoint=0x40)
# somewhat restricted Greek
ALPHABET_UCGreek = st.characters(
blacklist_characters="/?#", blacklist_categories=("Cs",),
max_codepoint=0x3BF, min_codepoint=0x390)
# somewhat restricted Hebrew
ALPHABET_UCHebrew = st.characters(
blacklist_characters="/?#", blacklist_categories=("Cs",),
max_codepoint=0x5DF, min_codepoint=0x5BF)
# Combine a set of printables
ALPHABET_UC = st.one_of(ALPHABET_UCHebrew, ALPHABET_UCGreek , ALPHABET_UCTame)
# Recall
#Unicode Greek and Coptic: U+0370–U+03FF
#Unicode Hebrew block extends from U+0590 to U+05FF and from U+FB1D to U+FB4F.
random_uc_string = st.text(alphabet=ALPHABET_UC, min_size=2, max_size=8)
#
# Run under unittest
#
class TestEncoding(unittest.TestCase):
DO_DEBUG_PRINT = False
@settings(max_examples=MAX_SAMPLES)
@given(random_uc_string)
def test_ustring(self, s):
'''
Show generated strings
'''
form = 'NFKD'
sNorm = unicodedata.normalize(form, s)
print(f"test_ustring received:'{s}',\tnormalized ({form}):'{sNorm}'",
file=sys.stderr)
@settings(max_examples=MAX_SAMPLES)
@given(random_uc_string)
def test_nustring(self, s):
'''
Show generated strings
'''
form = 'NFKD'
sNormEnc = unicodedata.normalize(form, s).encode('ascii','ignore')
print(f"test_nustring received:'{s}',\tnormalized({form})/encoded(ascii) :'{sNormEnc}'",
file=sys.stderr)
@settings(max_examples=MAX_SAMPLES)
@given(random_uc_string)
def test_ucodeNorm(self, s):
'''
Show generated strings
'''
form = 'NFKD'
sNormEnc = logtools.utils.ucodeNorm(s)
print(f"test_nustring received:'{s}',\tucodeNorm returns :'{sNormEnc}'",
file=sys.stderr)
if __name__ == "__main__":
if "-h" in sys.argv:
description = """\
Function:
This is a test allowing to figure out in more detail the functionality
of the unicode python package.
This may run either under tox or standalone. When standalone
flags -h and -v are recognized, other flags are dealt with by unittest.main
and may select test cases.
Flags:
-h print this help and quit
-v print information messages on stderr; also reduces MAX_SAMPLES to 50
Autonomous CLI syntax:
python3 [-h] [-v] [TestUnicode[.<testname>]]
e.g. python3 TestEncoding.test_match_re
"""
print(description)
sys.exit(0)
if "-v" in sys.argv:
sys.argv = [x for x in sys.argv if x != "-v"]
TestEncoding.DO_DEBUG_PRINT = True
sys.stderr.write("Set verbose mode\n")
unittest.main()
| 28.452703 | 96 | 0.647827 | 1,134 | 0.269167 | 0 | 0 | 1,049 | 0.248991 | 0 | 0 | 1,709 | 0.405649 |
bff1b2f3bc2fa9c88c4478ee4d6e6619827c343f | 2,586 | py | Python | pawlytics.py | damjess/ssb-bunnies-page | ef7fcfd195acaa8ef9e6630b9dacef77599d25cf | [
"MIT"
] | null | null | null | pawlytics.py | damjess/ssb-bunnies-page | ef7fcfd195acaa8ef9e6630b9dacef77599d25cf | [
"MIT"
] | null | null | null | pawlytics.py | damjess/ssb-bunnies-page | ef7fcfd195acaa8ef9e6630b9dacef77599d25cf | [
"MIT"
] | null | null | null | """
Pawlytics API retreival tool
Author: jess@menidae.com
Requires crontab to run tool hourly:
0 * * * * /path/to/python3 /path/to/file/pawlytics.py
"""
from urllib.request import Request
from urllib.request import urlopen
from typing import Dict
from urllib.request import HTTPError
import json
from json import dumps
AUTH0_CLIENT_ID = 'your client id here'
AUTH0_CLIENT_SECRET = 'your client secret here'
AUTH0_AUDIENCE = 'https://api.pawlytics.com'
AUTH0_USER = 'your email here'
AUTH0_PASSWORD = 'your password here'
AUTH0_HEADERS: Dict[str, str] = {
'Content-Type': 'application/json'
}
AUTH0_URL = 'https://pawlytics.auth0.com/oauth/token'
AUTH0_BODY = dumps({
'client_id': AUTH0_CLIENT_ID,
'client_secret': AUTH0_CLIENT_SECRET,
'audience': AUTH0_AUDIENCE,
'username': AUTH0_USER,
'password': AUTH0_PASSWORD,
'grant_type': 'password',
'scope': 'openid',
'response_type': 'token id_token'
})
AUTH0_REQUEST = Request(
url=AUTH0_URL,
data=AUTH0_BODY.encode('utf-8'),
headers=AUTH0_HEADERS,
method='POST'
)
try:
AUTH0_RESPONSE = urlopen(AUTH0_REQUEST)
except HTTPError as error:
print(error.read().decode())
quit()
AUTH0_RESPONSE_BODY = AUTH0_RESPONSE.read()
AUTH0_DECODED_BODY = AUTH0_RESPONSE_BODY.decode()
AUTH0_JSON_LOADED = json.loads(AUTH0_DECODED_BODY)
TOKEN = AUTH0_JSON_LOADED["access_token"]
HEADERS: Dict[str, str] = {
'Accept-Encoding': 'gzip, deflate, br',
'Content-Type': 'application/json',
'Accept': 'application/json',
'Connection': 'keep-alive',
'DNT': '1',
'Origin': 'file://',
'authorization': 'Bearer {token}'.format(
token=TOKEN
)
}
URL = 'https://api.pawlytics.com/api/graphql'
BODY = """{"query": "query OrgPets {
organization_pets(
organization_id: \\"your organization id\\"
){
status
adoption_fee{
amount
currency
}
pet {
id
name
status
species
breed_rabbit
mixed
estimated_birth_date
description
special_needs
distinguishing_marks
weight_lbs
youtube_video_url
gender
siblings {
id
name
}
images {
url
}
}
}
}"}"""
REQUEST = Request(
url=URL,
data=BODY.encode('utf-8'),
headers=HEADERS,
method='POST'
)
try:
RESPONSE = urlopen(REQUEST)
except HTTPError as error:
print(error.read().decode())
quit()
RESPONSE_BODY = RESPONSE.read()
with open('response.json', 'w') as writable_file:
writable_file.write(RESPONSE_BODY.decode())
| 19.590909 | 53 | 0.658546 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,401 | 0.508715 |
bff34ed954bb535419e69d62635bb3311abae523 | 1,099 | py | Python | vault_updater.py | alchem1ster/AddOns-Update-Tool | 82976d1e444b03f50010ff3272f7758317168f31 | [
"Unlicense"
] | 8 | 2021-12-06T14:27:16.000Z | 2022-01-17T22:17:06.000Z | vault_updater.py | alchem1ster/AddOns-Update-Tool | 82976d1e444b03f50010ff3272f7758317168f31 | [
"Unlicense"
] | 2 | 2021-12-28T20:08:33.000Z | 2022-01-05T22:42:32.000Z | vault_updater.py | alchem1ster/AddOns-Update-Tool | 82976d1e444b03f50010ff3272f7758317168f31 | [
"Unlicense"
] | null | null | null | """Vault update CLI"""
import sys
from argparse import ArgumentParser, FileType, _ArgumentGroup
from yaml import safe_load
from utils.log import log
from utils.vault import Vault
parser = ArgumentParser(description="Manual update of repositories Vault")
required: _ArgumentGroup = parser.add_argument_group("required arguments")
required.add_argument(
"-n", "--name", help="Vault name", required=True, metavar="NAME"
)
required.add_argument(
"-c",
"--cfg",
help="path to config file",
required=True,
metavar="PATH",
type=FileType("r"),
)
def main():
"""Entry point"""
with args.cfg as fobj:
try:
data = safe_load(fobj)
except Exception:
log.critical(
"Your %s has the wrong Config structure", args.config.name
)
sys.exit(1)
if data:
vault_db = Vault(args.name)
for url, branch in data.items():
vault_db.new_or_update(url.strip(), branch.strip())
vault_db.refresh()
if __name__ == "__main__":
args = parser.parse_args()
main()
| 23.382979 | 74 | 0.629663 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 217 | 0.197452 |
bff3b90a3063963cf92d3a3b5bb3b7ba2a7f479c | 13,004 | py | Python | tf-lstm-crf-batch/model.py | adapt-sjtu/novelner | a8f54ffe3360a4fdfa4b154a6668e74459a61ec3 | [
"Apache-2.0"
] | 8 | 2017-06-08T09:48:25.000Z | 2020-07-09T10:51:36.000Z | tf-lstm-crf-batch/model.py | adapt-sjtu/novelner | a8f54ffe3360a4fdfa4b154a6668e74459a61ec3 | [
"Apache-2.0"
] | 1 | 2018-07-07T12:38:25.000Z | 2018-07-07T12:38:25.000Z | tf-lstm-crf-batch/model.py | adapt-sjtu/novelner | a8f54ffe3360a4fdfa4b154a6668e74459a61ec3 | [
"Apache-2.0"
] | 4 | 2018-01-13T01:37:51.000Z | 2019-08-05T12:25:32.000Z | import os
import numpy as np
import tensorflow as tf
import cPickle
from utils import shared, get_name
from nn import HiddenLayer, EmbeddingLayer, LSTM, forward
class Model(object):
"""
Network architecture.
"""
def __init__(self, parameters=None, models_path=None, model_path=None):
"""
Initialize the model. We either provide the parameters and a path where
we store the models, or the location of a trained model.
"""
if model_path is None:
assert parameters and models_path
# Create a name based on the parameters
self.parameters = parameters
self.name = get_name(parameters)
# Model location
model_path = os.path.join(models_path, self.name)
self.model_path = model_path
self.mappings_path = os.path.join(model_path, 'mappings.pkl')
self.parameters_path = os.path.join(model_path, 'parameters.pkl')
# Create directory for the model if it does not exist
if not os.path.exists(self.model_path):
os.makedirs(self.model_path)
# Save the parameters to disk
with open(self.parameters_path, 'wb') as f:
cPickle.dump(parameters, f)
else:
assert parameters is None and models_path is None
# Model location
self.model_path = model_path
self.mappings_path = os.path.join(model_path, 'mappings.pkl')
self.parameters_path = os.path.join(model_path, 'parameters.pkl')
# Load the parameters and the mappings from disk
with open(self.parameters_path, 'rb') as f:
self.parameters = cPickle.load(f)
self.reload_mappings()
def save_mappings(self, id_to_word, id_to_char, id_to_tag):
"""
We need to save the mappings if we want to use the model later.
"""
self.id_to_word = id_to_word
self.id_to_char = id_to_char
self.id_to_tag = id_to_tag
with open(self.mappings_path, 'wb') as f:
mappings = {
'id_to_word': self.id_to_word,
'id_to_char': self.id_to_char,
'id_to_tag': self.id_to_tag,
}
cPickle.dump(mappings, f)
def reload_mappings(self):
"""
Load mappings from disk.
"""
with open(self.mappings_path, 'rb') as f:
mappings = cPickle.load(f)
self.id_to_word = mappings['id_to_word']
self.id_to_char = mappings['id_to_char']
self.id_to_tag = mappings['id_to_tag']
def build(self,
dropout,
char_dim,
char_lstm_dim,
char_bidirect,
word_dim,
word_lstm_dim,
word_bidirect,
lr_method,
lr_rate,
clip_norm,
crf,
is_train,
**kwargs
):
"""
Build the network.
"""
# Training parameters
n_words = len(self.id_to_word)
n_chars = len(self.id_to_char)
n_tags = len(self.id_to_tag)
# Network variables
self.word_ids = tf.placeholder(tf.int32, shape=[None, None], name='word_ids') # shape:[batch_size, max_word_len]
self.word_pos_ids = tf.placeholder(tf.int32, shape=[None], name='word_pos_ids') # shape: [batch_size]
self.char_for_ids = tf.placeholder(tf.int32, shape=[None, None, None], name='char_for_ids') # shape: [batch_size, word_max_len, char_max_len]
self.char_rev_ids = tf.placeholder(tf.int32, shape=[None, None, None], name='char_rev_ids') # shape: [batch_size, word_max_len, char_max_len]
self.char_pos_ids = tf.placeholder(tf.int32, shape=[None, None], name='char_pos_ids') # shape: [batch_size*word_max_len, char_max_len]
self.tag_ids = tf.placeholder(tf.int32, shape=[None, None], name='tag_ids') # shape: [batch_size,word_max_len]
self.tag_id_trans = tf.placeholder(tf.int32, shape=[None, None, None], name='tag_id_trans') # shape: [batch_size,word_max_len+1,2]
self.tag_id_index = tf.placeholder(tf.int32, shape=[None, None, None], name='tag_id_index') # shape: [batch_size,word_max_len,2]
# Final input (all word features)
input_dim = 0
inputs = []
#
# Word inputs
#
if word_dim:
input_dim += word_dim
with tf.device("/cpu:0"):
word_layer = EmbeddingLayer(n_words, word_dim, name='word_layer')
word_input = word_layer.link(self.word_ids)
inputs.append(word_input)
#
# Phars inputs
#
if char_dim:
input_dim += char_lstm_dim
char_layer = EmbeddingLayer(n_chars, char_dim, name='char_layer')
char_lstm_for = LSTM(char_dim, char_lstm_dim, with_batch=True,
name='char_lstm_for')
char_lstm_rev = LSTM(char_dim, char_lstm_dim, with_batch=True,
name='char_lstm_rev')
with tf.device("/cpu:0"):
char_for_embedding_batch = char_layer.link(self.char_for_ids)
char_rev_embedding_batch = char_layer.link(self.char_rev_ids)
shape_for = tf.shape(char_for_embedding_batch)
# reshape from [batch_size, word_max_len, char_max_len, char_dim] to [batch_size*word_max_len, char_max_len, char_dim]
char_for_embedding = tf.reshape(char_for_embedding_batch,
(shape_for[0]*shape_for[1], shape_for[2], shape_for[3]))
shape_rev = tf.shape(char_rev_embedding_batch)
char_rev_embedding = tf.reshape(char_rev_embedding_batch,
(shape_rev[0] * shape_rev[1], shape_rev[2], shape_rev[3]))
char_lstm_for_states = char_lstm_for.link(char_for_embedding)
char_lstm_rev_states = char_lstm_rev.link(char_rev_embedding)
char_lstm_for_h_trans = tf.transpose(char_lstm_for_states[1], (1, 0, 2), name='char_lstm_for_h_trans')
char_lstm_rev_h_trans = tf.transpose(char_lstm_rev_states[1], (1, 0, 2), name='char_lstm_rev_h_trans')
char_for_output = tf.gather_nd(char_lstm_for_h_trans, self.char_pos_ids, name='char_for_output')
char_rev_output = tf.gather_nd(char_lstm_rev_h_trans, self.char_pos_ids, name='char_rev_output')
char_for_output_batch = tf.reshape(char_for_output, (shape_for[0], shape_for[1], char_lstm_dim))
char_rev_output_batch = tf.reshape(char_rev_output, (shape_rev[0], shape_rev[1], char_lstm_dim))
inputs.append(char_for_output_batch)
if char_bidirect:
inputs.append(char_rev_output_batch)
input_dim += char_lstm_dim
inputs = tf.concat(inputs, axis=-1)
# Dropout on final input
assert dropout < 1 and 0.0 <= dropout
if dropout:
input_train = tf.nn.dropout(inputs, 1 - dropout)
if is_train:
inputs = input_train
# LSTM for words
word_lstm_for = LSTM(input_dim, word_lstm_dim, with_batch=True,
name='word_lstm_for')
word_lstm_rev = LSTM(input_dim, word_lstm_dim, with_batch=True,
name='word_lstm_rev')
# fordword hidden output
word_states_for = word_lstm_for.link(inputs)
word_lstm_for_output = tf.transpose(word_states_for[1], (1, 0, 2), name='word_lstm_for_h_trans')
# reverse hidden ouput
inputs_rev = tf.reverse_sequence(inputs, self.word_pos_ids, seq_dim=1, batch_dim=0)
word_states_rev = word_lstm_rev.link(inputs_rev)
word_lstm_rev_h_trans = tf.transpose(word_states_rev[1], (1, 0, 2), name='word_lstm_rev_h_trans')
word_lstm_rev_output = tf.reverse_sequence(word_lstm_rev_h_trans, self.word_pos_ids, seq_dim=1, batch_dim=0)
if word_bidirect:
final_output = tf.concat([word_lstm_for_output, word_lstm_rev_output],axis=-1)
tanh_layer = HiddenLayer(2 * word_lstm_dim, word_lstm_dim, name='tanh_layer', activation='tanh')
final_output = tanh_layer.link(final_output)
else:
final_output = word_lstm_for_output
final_layer = HiddenLayer(word_lstm_dim, n_tags, name='final_layer')
tags_scores = final_layer.link(final_output)
# No CRF
if not crf:
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=self.tag_ids, logits=tags_scores, name='xentropy')
cost = tf.reduce_mean(cross_entropy, name='xentropy_mean')
else:
transitions = shared((n_tags + 2, n_tags + 2), 'transitions')
small = -1000
b_s = np.array([[small] * n_tags + [0, small]]).astype(np.float32)
e_s = np.array([[small] * n_tags + [small, 0]]).astype(np.float32)
# for batch observation
#def recurrence(prev, obs):
# s_len = tf.shape(obs)[0]
# obvs = tf.concat([obs, small * tf.ones((s_len, 2))], axis=1)
# observations = tf.concat([b_s, obvs, e_s], axis=0)
# return observations
#tags_scores_shape = tf.shape(tags_scores)
#obs_initial = tf.ones((tags_scores_shape[1] + 2, n_tags + 2))
#obs_batch = tf.scan(fn=recurrence, elems=tags_scores, initializer=obs_initial)
# Score from tags
def recurrence_real_score(prev,obs):
tags_score = obs[0]
tag_id_index_ = obs[1]
tag_id_trans_= obs[2]
word_pos_ = obs[3] + 1
tags_score_slice = tags_score[0:word_pos_,:]
tag_id_index_slice = tag_id_index_[0:word_pos_,:]
tag_id_trans_slice = tag_id_trans_[0:(word_pos_+1),:]
real_path_score = tf.reduce_sum(tf.gather_nd(tags_score_slice, tag_id_index_slice))
real_path_score += tf.reduce_sum(tf.gather_nd(transitions, tag_id_trans_slice))
return tf.reshape(real_path_score,[])
real_path_score_list = tf.scan(fn=recurrence_real_score, elems=[tags_scores, self.tag_id_index, self.tag_id_trans, self.word_pos_ids], initializer=0.0)
def recurrence_all_path(prev, obs):
tags_score = obs[0]
word_pos_ = obs[1] + 1
tags_score_slice = tags_score[0:word_pos_,:]
s_len = tf.shape(tags_score_slice)[0]
obvs = tf.concat([tags_score_slice, small * tf.ones((s_len, 2))], axis=1)
observations = tf.concat([b_s, obvs, e_s], axis=0)
all_paths_scores = forward(observations, transitions)
return tf.reshape(all_paths_scores,[])
all_paths_scores_list = tf.scan(fn=recurrence_all_path, elems=[tags_scores, self.word_pos_ids], initializer=0.0)
cost = - tf.reduce_mean(real_path_score_list - all_paths_scores_list)
# Network parameters
if not crf:
f_score = tf.nn.softmax(tags_scores)
else:
def recurrence_predict(prev, obs):
tags_score = obs[0]
word_pos_ = obs[1] + 1
tags_score_slice = tags_score[0:word_pos_,:]
s_len = tf.shape(tags_score_slice)[0]
obvs = tf.concat([tags_score_slice, small * tf.ones((s_len, 2))], axis=1)
observations = tf.concat([b_s, obvs, e_s], axis=0)
all_paths_scores = forward(observations, transitions, viterbi=True, return_alpha=False, return_best_sequence=True)
all_paths_scores = tf.concat([all_paths_scores, tf.zeros([tf.shape(tags_score)[0]-s_len], tf.int32)], axis=0)
return all_paths_scores
f_score = tf.scan(fn=recurrence_predict, elems=[tags_scores, self.word_pos_ids], initializer=tf.zeros([tf.shape(tags_scores)[1]+2], tf.int32))
# Optimization
tvars = tf.trainable_variables()
grads = tf.gradients(cost, tvars)
if clip_norm > 0:
grads, _ = tf.clip_by_global_norm(grads, clip_norm)
if lr_method == 'sgd':
optimizer = tf.train.GradientDescentOptimizer(lr_rate)
elif lr_method == 'adagrad':
optimizer = tf.train.AdagradOptimizer(lr_rate)
elif lr_method == 'adadelta':
optimizer = tf.train.AdadeltaOptimizer(lr_rate)
elif lr_method == 'adam':
optimizer = tf.train.AdamOptimizer(lr_rate)
elif lr_method == 'rmsprop':
optimizer = tf.train.RMSPropOptimizer(lr_rate)
else:
raise("Not implemented learning method: %s" % lr_method)
train_op = optimizer.apply_gradients(zip(grads, tvars))
return cost, f_score, train_op
| 49.823755 | 163 | 0.607967 | 12,839 | 0.987312 | 0 | 0 | 0 | 0 | 0 | 0 | 2,278 | 0.175177 |
bff41132c693f640921216d313ea2aaf31ff07fc | 1,076 | py | Python | revolt/plugins/commands/context.py | ppotatoo/revolt.py | 56d09b050e26898c6051eaee3fdd268a5b16bc22 | [
"MIT"
] | null | null | null | revolt/plugins/commands/context.py | ppotatoo/revolt.py | 56d09b050e26898c6051eaee3fdd268a5b16bc22 | [
"MIT"
] | null | null | null | revolt/plugins/commands/context.py | ppotatoo/revolt.py | 56d09b050e26898c6051eaee3fdd268a5b16bc22 | [
"MIT"
] | null | null | null | from revolt.types.file import File
from revolt.embed import Embed
from revolt.message import Message
from typing import Optional
class Context:
def __init__(self, message, bot):
self.message = message
self.bot = bot
async def send(self, content: Optional[str] = None, embeds: Optional[list[Embed]] = None, embed: Optional[Embed] = None, attachments: Optional[list[File]] = None) -> Message:
"""Sends a message in a channel, you must send at least one of either `content`, `embeds` or `attachments`
Parameters
-----------
content: Optional[:class:`str`]
The content of the message, this will not include system message's content
attachments: Optional[list[:class:`File`]]
The attachments of the message
embeds: Optional[list[:class:`Embed`]]
The embeds of the message
Returns
--------
:class:`Message`
The message that was just sent
"""
return await self.message.channel.send(content, embeds, embed, attachments) | 37.103448 | 178 | 0.635688 | 945 | 0.878253 | 0 | 0 | 0 | 0 | 833 | 0.774164 | 566 | 0.526022 |
bff7028914bc52a8b384670d2126ddb356c5fd23 | 2,860 | py | Python | 372_palanced.py | RLeary/rdp_challenges | b14cc27bf3afd1d373f26e5d154e6c0cf3d7ca05 | [
"MIT"
] | null | null | null | 372_palanced.py | RLeary/rdp_challenges | b14cc27bf3afd1d373f26e5d154e6c0cf3d7ca05 | [
"MIT"
] | null | null | null | 372_palanced.py | RLeary/rdp_challenges | b14cc27bf3afd1d373f26e5d154e6c0cf3d7ca05 | [
"MIT"
] | null | null | null | """Given a string containing only the characters x and y, find whether there are the same number of xs and ys.
balanced("xxxyyy") => true
balanced("yyyxxx") => true
balanced("xxxyyyy") => false
balanced("yyxyxxyxxyyyyxxxyxyx") => true
balanced("xyxxxxyyyxyxxyxxyy") => false
balanced("") => true
balanced("x") => false
"""
"""Optional bonus
Given a string containing only lowercase letters, find whether every letter that appears in the string appears the same number of times. Don't forget to handle the empty string ("") correctly!
balanced_bonus("xxxyyyzzz") => true
balanced_bonus("abccbaabccba") => true
balanced_bonus("xxxyyyzzzz") => false
balanced_bonus("abcdefghijklmnopqrstuvwxyz") => true
balanced_bonus("pqq") => false
balanced_bonus("fdedfdeffeddefeeeefddf") => false
balanced_bonus("www") => true
balanced_bonus("x") => true
balanced_bonus("") => true
Note that balanced_bonus behaves differently than balanced for a few inputs, e.g. "x".
"""
def balanced(string):
x_count, y_count = 0, 0
for char in string:
if char == 'x':
x_count = x_count + 1
elif char =='y':
y_count = y_count + 1
else:
print("Strings may only contain 'x' or 'y'. Exiting")
exit()
if x_count == y_count:
return True
else:
return False
# From reddit comments
def balanced_bonus(string):
if len(string) == 0:
return True
letters = {}
for letter in string:
try:
letters[letter] += 1
except:
letters[letter] = 1
return len(set(letters.values())) == 1
print("balanced(\"xxxyyy\"): ", balanced("xxxyyy"))
print("balanced(\"yyyxxx\"): ", balanced("yyyxxx"))
print("balanced(\"xxxyyyy\"): ", balanced("xxxyyyy"))
print("balanced(\"yyxyxxyxxyyyyxxxyxyx\"): ", balanced("yyxyxxyxxyyyyxxxyxyx"))
print("balanced(\"xyxxxxyyyxyxxyxxyy\"): ", balanced("xyxxxxyyyxyxxyxxyy"))
print("balanced(\"\"): ", balanced(""))
print("balanced(\"x\"): ", balanced("x"))
#print("balanced(\"xxxyyya\"): ", balanced("xxxyyya"))
print("balanced_bonus(\"xxxyyyzzz\"): ", balanced_bonus("xxxyyyzzz"))
print("balanced_bonus(\"abccbaabccba\"): ", balanced_bonus("abccbaabccba"))
print("balanced_bonus(\"xxxyyyzzzz\"): ", balanced_bonus("xxxyyyzzzz"))
print("balanced_bonus(\"abcdefghijklmnopqrstuvwxyz\"): ", balanced_bonus("abcdefghijklmnopqrstuvwxyz"))
print("balanced_bonus(\"pqq\"): ", balanced_bonus("pqq"))
print("balanced_bonus(\"fdedfdeffeddefeeeefddf\"): ", balanced_bonus("fdedfdeffeddefeeeefddf"))
print("balanced_bonus(\"www\"): ", balanced_bonus("www"))
print("balanced_bonus(\"\"): ", balanced_bonus(""))
print("balanced_bonus(\"x\"): ", balanced_bonus("x"))
# one line solution from reddit:
#def balanced(string):
# return string.lower().count("x") == string.lower().count("y") | 35.308642 | 193 | 0.662587 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,895 | 0.662587 |
bff7d21b750936973cbd6252e64cc8e48e9a1836 | 4,108 | py | Python | src/gnnff/utils/data.py | ken2403/gnnff | 13e2428ef2fe6524572356f8df8412e91a3690af | [
"MIT"
] | null | null | null | src/gnnff/utils/data.py | ken2403/gnnff | 13e2428ef2fe6524572356f8df8412e91a3690af | [
"MIT"
] | null | null | null | src/gnnff/utils/data.py | ken2403/gnnff | 13e2428ef2fe6524572356f8df8412e91a3690af | [
"MIT"
] | null | null | null | import numpy as np
import torch
from torch.utils.data import DataLoader
from torch.utils.data.sampler import RandomSampler
from gnnff.data.keys import Keys
from gnnff.data.split import train_test_split
__all__ = ["get_loader"]
def get_loader(dataset, args, split_path, logging=None):
"""
Parameters
----------
dataset : gnnff.data.Celldata
dataset of cell.
args : Namespace
Namespace dict.
split_path : str
path to split file.
logging : logging
logger
Returns
-------
train_data, val_loader, test_loader : torch.utils.data.DataLoader
"""
# create or load dataset splits depending on args.mode
if args.mode == "train":
if logging is not None:
logging.info("create splits...")
data_train, data_val, data_test = train_test_split(
dataset, *args.split, split_file=split_path
)
else:
if logging is not None:
logging.info("loading exiting split file ...")
data_train, data_val, data_test = train_test_split(
dataset, split_file=split_path
)
if logging is not None:
logging.info("create data loader ...")
train_loader = DataLoader(
dataset=data_train,
batch_size=args.batch_size,
sampler=RandomSampler(data_train),
num_workers=4,
pin_memory=args.cuda,
collate_fn=_collate_aseatoms,
)
val_loader = DataLoader(
dataset=data_val,
batch_size=args.batch_size,
shuffle=True,
num_workers=2,
pin_memory=args.cuda,
collate_fn=_collate_aseatoms,
)
if len(data_test) != 0:
test_loader = DataLoader(
dataset=data_test,
batch_size=args.batch_size,
shuffle=True,
num_workers=2,
pin_memory=args.cuda,
collate_fn=_collate_aseatoms,
)
elif len(data_test) == 0:
test_loader = None
return train_loader, val_loader, test_loader
def _collate_aseatoms(examples):
"""
Build batch from systems and properties & apply padding
Parameters
----------
examples : list
Returns
-------
dict : [str->torch.Tensor]
mini-batch of atomistic systems
References
----------
.. [1] https://github.com/ken2403/schnetpack/blob/master/src/schnetpack/data/loader.py
"""
properties = examples[0]
# initialize maximum sizes
max_size = {
prop: np.array(val.size(), dtype=np.int32) for prop, val in properties.items()
}
# get maximum sizes
for properties in examples[1:]:
for prop, val in properties.items():
max_size[prop] = np.maximum(
max_size[prop], np.array(val.size(), dtype=np.int32)
)
# initialize batch
batch = {
p: torch.zeros(len(examples), *[int(ss) for ss in size]).type(
examples[0][p].type()
)
for p, size in max_size.items()
}
has_atom_mask = Keys.atom_mask in batch.keys()
has_neighbor_mask = Keys.neighbor_mask in batch.keys()
if not has_neighbor_mask:
batch[Keys.neighbor_mask] = torch.zeros_like(batch[Keys.neighbors]).float()
if not has_atom_mask:
batch[Keys.atom_mask] = torch.zeros_like(batch[Keys.Z]).float()
# build batch and pad
for k, properties in enumerate(examples):
for prop, val in properties.items():
shape = val.size()
s = (k,) + tuple([slice(0, d) for d in shape])
batch[prop][s] = val
# add mask
if not has_neighbor_mask:
nbh = properties[Keys.neighbors]
shape = nbh.size()
s = (k,) + tuple([slice(0, d) for d in shape])
mask = nbh >= 0
batch[Keys.neighbor_mask][s] = mask
batch[Keys.neighbors][s] = nbh * mask.long()
if not has_atom_mask:
z = properties[Keys.Z]
shape = z.size()
s = (k,) + tuple([slice(0, d) for d in shape])
batch[Keys.atom_mask][s] = z > 0
return batch
| 28.136986 | 90 | 0.590798 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 907 | 0.220789 |
bff840c3000c3f2fd947707aa1ec3f8be36ae525 | 1,510 | py | Python | setup.py | canvasslabs/packageurl-python | ea036912050ddeecff4741486de0d992fbe6bb0c | [
"MIT"
] | null | null | null | setup.py | canvasslabs/packageurl-python | ea036912050ddeecff4741486de0d992fbe6bb0c | [
"MIT"
] | null | null | null | setup.py | canvasslabs/packageurl-python | ea036912050ddeecff4741486de0d992fbe6bb0c | [
"MIT"
] | 1 | 2018-10-06T21:40:41.000Z | 2018-10-06T21:40:41.000Z | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# SPDX-License-Identifier: MIT
from __future__ import absolute_import
from __future__ import print_function
from glob import glob
from os.path import basename
from os.path import splitext
from setuptools import find_packages
from setuptools import setup
setup(
name='packageurl-python',
version='0.9.4',
license='MIT',
description='A "purl" aka. Package URL parser and builder',
long_description='Python library to parse and build "purl" aka. Package URLs. '
'This is a microlibrary implementing the purl spec at https://github.com/package-url',
author='the purl authors',
url='https://github.com/package-url/packageurl-python',
packages=find_packages('src'),
package_dir={'': 'src'},
py_modules=[splitext(basename(path))[0] for path in glob('src/*.py')],
include_package_data=True,
zip_safe=False,
platforms='any',
keywords='package, url, package manager, package url',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Topic :: Software Development :: Libraries',
'Topic :: Utilities',
],
)
| 32.826087 | 90 | 0.660927 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 828 | 0.548344 |
bff9635e2cdc0eb91f5d1e1167976170aacd1110 | 747 | py | Python | fduser/cms_plugins.py | ForumDev/forumdev-user | 1d5e03531466c30160c2b4cb9a48ca9e7bda61ba | [
"MIT"
] | null | null | null | fduser/cms_plugins.py | ForumDev/forumdev-user | 1d5e03531466c30160c2b4cb9a48ca9e7bda61ba | [
"MIT"
] | null | null | null | fduser/cms_plugins.py | ForumDev/forumdev-user | 1d5e03531466c30160c2b4cb9a48ca9e7bda61ba | [
"MIT"
] | 1 | 2020-10-12T06:28:18.000Z | 2020-10-12T06:28:18.000Z | from cms.plugin_base import CMSPluginBase
from cms.models.pluginmodel import CMSPlugin
from cms.plugin_pool import plugin_pool
from fduser import models
from django.utils.translation import ugettext as _
from django.contrib.sites.models import Site
class Users(CMSPluginBase):
model = CMSPlugin # Model where data about this plugin is saved
module = _("UserList")
name = _("User List") # Name of the plugin
render_template = "fduser/list.html" # template to render the plugin with
def render(self, context, instance, placeholder):
context['users'] = models.User.objects.all
context['site'] = Site.objects.get_current()
return context
plugin_pool.register_plugin(Users) # register the plugin
| 35.571429 | 78 | 0.740295 | 436 | 0.583668 | 0 | 0 | 0 | 0 | 0 | 0 | 174 | 0.232932 |
bff996f616a6fc45be900334f0ea9b69e69ce5c8 | 5,214 | py | Python | us_pls/_download/download_service.py | drawjk705/us-pls | 6b9a696b18c18de81b279862f182fb990604b998 | [
"MIT"
] | null | null | null | us_pls/_download/download_service.py | drawjk705/us-pls | 6b9a696b18c18de81b279862f182fb990604b998 | [
"MIT"
] | null | null | null | us_pls/_download/download_service.py | drawjk705/us-pls | 6b9a696b18c18de81b279862f182fb990604b998 | [
"MIT"
] | null | null | null | # pyright: reportUnknownMemberType=false
import logging
import zipfile
from pathlib import Path
from typing import Dict
import requests
from us_pls._config import Config
from us_pls._download.interface import IDownloadService
from us_pls._download.models import DatafileType, DownloadType
from us_pls._logger.interface import ILoggerFactory
from us_pls._persistence.interface import IOnDiskCache
from us_pls._scraper.interface import IScrapingService
BASE_URL = "https://www.imls.gov"
class DownloadService(IDownloadService):
_config: Config
_scraper: IScrapingService
_cache: IOnDiskCache
_logger: logging.Logger
def __init__(
self,
config: Config,
scraper: IScrapingService,
cache: IOnDiskCache,
logger_factory: ILoggerFactory,
) -> None:
self._config = config
self._scraper = scraper
self._cache = cache
self._logger = logger_factory.get_logger(__name__)
def download(self) -> None:
scraped_dict = self._scraper.scrape_files()
scraped_dict_for_year = scraped_dict.get(str(self._config.year))
if scraped_dict_for_year is None:
self._logger.info(f"There is no data for {self._config.year}")
return
self._try_download_resource(
scraped_dict_for_year, "Documentation", DownloadType.Documentation
)
self._try_download_resource(scraped_dict_for_year, "CSV", DownloadType.CsvZip)
self._try_download_resource(
scraped_dict_for_year,
"Data Element Definitions",
DownloadType.DataElementDefinitions,
)
self._clean_up_readme()
def _try_download_resource(
self, scraped_dict: Dict[str, str], resource: str, download_type: DownloadType
) -> None:
route = scraped_dict.get(resource)
self._logger.debug(f"Trying to download {resource}")
if route is None:
self._logger.warning(
f"The resource `{resource}` does not exist for {self._config.year}"
)
return
if self._resource_already_exists(download_type):
self._logger.debug(
f"Resources have already been downloaded for {download_type.value}"
)
return
url = f"{BASE_URL}/{route[1:] if route.startswith('/') else route}"
res = requests.get(url)
if res.status_code != 200:
msg = f"Received a non-200 status code for {url}: {res.status_code}"
self._logger.warning(msg)
return
self._write_content(
download_type,
res.content,
should_unzip=str(download_type.value).endswith(".zip"),
)
def _resource_already_exists(self, download_type: DownloadType) -> bool:
if download_type in [
DownloadType.Documentation,
DownloadType.DataElementDefinitions,
]:
return self._cache.exists(download_type.value)
elif download_type == DownloadType.CsvZip:
return all(
[
self._cache.exists(str(datafile_type.value))
for datafile_type in DatafileType
]
)
return False
def _write_content(
self, download_type: DownloadType, content: bytes, should_unzip: bool = False
) -> None:
self._cache.put(content, download_type.value)
if should_unzip:
zip_path = self._cache.cache_path / Path(download_type.value)
with zipfile.ZipFile(zip_path, "r") as zip_ref:
zip_ref.extractall(self._cache.cache_path)
self._move_content()
self._cache.remove(zip_path)
def _move_content(self) -> None:
for path in self._cache.cache_path.iterdir():
if not path.is_dir():
self._rename(path)
continue
for sub_path in path.iterdir():
self._rename(sub_path)
self._cache.remove(path)
def _rename(self, path: Path) -> None:
new_name: str = path.name
if "_ae_" in path.name.lower() or "ld" in path.name.lower():
new_name = DatafileType.SystemData.value
elif "_outlet_" in path.name.lower() or "out" in path.name.lower():
new_name = DatafileType.OutletData.value
elif "_state_" in path.name.lower() or "sum" in path.name.lower():
new_name = DatafileType.SummaryData.value
elif "readme" in path.name.lower():
new_name = "README.txt"
self._cache.rename(path, Path(new_name))
def _clean_up_readme(self):
self._logger.debug("Cleaning up readme")
readme_text = self._cache.get(
"README.txt",
"txt",
encoding="utf-8",
errors="surrogateescape",
)
if readme_text is None:
self._logger.debug("No readme exists for this year")
return
cleaned_readme_text = "".join([c if ord(c) < 128 else "'" for c in readme_text])
self._cache.put(
bytes(cleaned_readme_text, "utf-8"),
"README.txt",
)
| 30.670588 | 88 | 0.615075 | 4,722 | 0.905639 | 0 | 0 | 0 | 0 | 0 | 0 | 625 | 0.11987 |
bff9a7b958be7c7bb61197dabe8314ce6f7b3270 | 332 | py | Python | dlbot/dlbot.py | sergyp/dlbot | 6b23e14fc3022189d7ba2e7e2b70434c11e9af99 | [
"MIT"
] | null | null | null | dlbot/dlbot.py | sergyp/dlbot | 6b23e14fc3022189d7ba2e7e2b70434c11e9af99 | [
"MIT"
] | 1 | 2017-11-28T04:29:46.000Z | 2017-11-28T04:29:46.000Z | dlbot/dlbot.py | sergyp/dlbot | 6b23e14fc3022189d7ba2e7e2b70434c11e9af99 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from dlbot import default_settings
from flask import Flask
app = Flask(__name__, instance_relative_config=True)
app.config.from_object(default_settings)
app.config.from_pyfile('dlbot.cfg', silent=True)
@app.route('/')
def hello_world():
return 'Hello, World!'
if __name__ == '__main__':
app.run()
| 19.529412 | 52 | 0.740964 | 0 | 0 | 0 | 0 | 61 | 0.183735 | 0 | 0 | 60 | 0.180723 |
bffa85e9bedbc5efa7cc9534442416b01817938b | 95 | py | Python | sourcecode/pathwaysearch/fingerprint/__init__.py | CC-SXF/PyMiner | d103d54eeaa5653c8f8bc03f78fd4a96e0acefe7 | [
"MIT"
] | 2 | 2022-01-20T07:38:00.000Z | 2022-01-20T07:56:39.000Z | sourcecode/pathwaysearch/fingerprint/__init__.py | CC-SXF/PyMiner | d103d54eeaa5653c8f8bc03f78fd4a96e0acefe7 | [
"MIT"
] | null | null | null | sourcecode/pathwaysearch/fingerprint/__init__.py | CC-SXF/PyMiner | d103d54eeaa5653c8f8bc03f78fd4a96e0acefe7 | [
"MIT"
] | 1 | 2022-01-11T14:29:48.000Z | 2022-01-11T14:29:48.000Z | # -*- coding: utf-8 -*-
"""
Created on Sun Mar 28 10:15:21 2021
@author: CC-SXF
"""
| 10.555556 | 36 | 0.505263 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 88 | 0.926316 |