hexsha stringlengths 40 40 | size int64 7 1.04M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 247 | max_stars_repo_name stringlengths 4 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 368k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 247 | max_issues_repo_name stringlengths 4 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 247 | max_forks_repo_name stringlengths 4 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.04M | avg_line_length float64 1.77 618k | max_line_length int64 1 1.02M | alphanum_fraction float64 0 1 | original_content stringlengths 7 1.04M | filtered:remove_function_no_docstring int64 -102 942k | filtered:remove_class_no_docstring int64 -354 977k | filtered:remove_delete_markers int64 0 60.1k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2b0ac647bbd3deb1f083b056eed2ce34e523bc7a | 413 | py | Python | http_async_client/enums.py | jossefaz/async-http-client | 74fe7540d1781d64d41161350019bc5ccfec6426 | [
"MIT"
] | 1 | 2021-12-05T21:01:59.000Z | 2021-12-05T21:01:59.000Z | http_async_client/enums.py | jossefaz/http-async-client | 74fe7540d1781d64d41161350019bc5ccfec6426 | [
"MIT"
] | null | null | null | http_async_client/enums.py | jossefaz/http-async-client | 74fe7540d1781d64d41161350019bc5ccfec6426 | [
"MIT"
] | null | null | null | from enum import Enum
class SupportedProtocols(Enum):
"""Contains a list of all supported protocols (currently only http and https
Value:
http : "http"
https : "https"
"""
http = "http"
https = "https"
class Methods(Enum):
"""Contains a list of all supported HTTP Verbs
"""
get = "GET"
post = "POST"
put = "PUT"
patch = "PATCH"
delete = "DELETE" | 19.666667 | 80 | 0.583535 | from enum import Enum
class SupportedProtocols(Enum):
"""Contains a list of all supported protocols (currently only http and https
Value:
http : "http"
https : "https"
"""
http = "http"
https = "https"
class Methods(Enum):
"""Contains a list of all supported HTTP Verbs
"""
get = "GET"
post = "POST"
put = "PUT"
patch = "PATCH"
delete = "DELETE" | 0 | 0 | 0 |
aa433084afdee825b776b336960d26127329bc76 | 104 | py | Python | conet/dataset_utils/bengali_convert.py | steermomo/conet | 21d60fcb4ab9a01a00aa4d9cd0bdee79ea35cc4b | [
"MIT"
] | null | null | null | conet/dataset_utils/bengali_convert.py | steermomo/conet | 21d60fcb4ab9a01a00aa4d9cd0bdee79ea35cc4b | [
"MIT"
] | null | null | null | conet/dataset_utils/bengali_convert.py | steermomo/conet | 21d60fcb4ab9a01a00aa4d9cd0bdee79ea35cc4b | [
"MIT"
] | 1 | 2020-05-18T10:05:24.000Z | 2020-05-18T10:05:24.000Z | from conet.datasets.bengali_dataset import DATA_DIR
from sklearn.model_selection import StratifiedKFold
| 34.666667 | 51 | 0.894231 | from conet.datasets.bengali_dataset import DATA_DIR
from sklearn.model_selection import StratifiedKFold
| 0 | 0 | 0 |
ca077393cfe3d9b562b9a2aeb65df8b81d21219b | 642 | py | Python | project/payments/payment_methods/payments_stripe/views.py | steetstyle/Django-Ecommerce-API | 89c2c973e560346a5be74019709dc9a9f8ab7b2a | [
"MIT"
] | null | null | null | project/payments/payment_methods/payments_stripe/views.py | steetstyle/Django-Ecommerce-API | 89c2c973e560346a5be74019709dc9a9f8ab7b2a | [
"MIT"
] | null | null | null | project/payments/payment_methods/payments_stripe/views.py | steetstyle/Django-Ecommerce-API | 89c2c973e560346a5be74019709dc9a9f8ab7b2a | [
"MIT"
] | null | null | null | from django.http import JsonResponse
import stripe
# Create your views here. | 32.1 | 69 | 0.646417 | from django.http import JsonResponse
import stripe
# Create your views here.
def custom_webhook():
# Use an existing Customer ID if this is a returning customer
customer = stripe.Customer.create()
ephemeralKey = stripe.EphemeralKey.create(
customer=customer['id'],
stripe_version='2020-08-27',
)
paymentIntent = stripe.PaymentIntent.create(
amount=1099,
currency='try',
customer=customer['id']
)
return JsonResponse({"paymentIntent":paymentIntent.client_secret,
"ephemeralKey":ephemeralKey.secret,
"customer":customer.id}) | 542 | 0 | 22 |
fa2f98208db00208b3176b879fdcf683b48c7520 | 1,106 | py | Python | cffi/build.py | chaichairiko/libyang-python | 46c2b1afc653c0b135c9fb70864314c38c909277 | [
"MIT"
] | null | null | null | cffi/build.py | chaichairiko/libyang-python | 46c2b1afc653c0b135c9fb70864314c38c909277 | [
"MIT"
] | null | null | null | cffi/build.py | chaichairiko/libyang-python | 46c2b1afc653c0b135c9fb70864314c38c909277 | [
"MIT"
] | null | null | null | # Copyright (c) 2018-2019 Robin Jarry
# SPDX-License-Identifier: MIT
import os
import shlex
from typing import List
import cffi
HERE = os.path.dirname(__file__)
BUILDER = cffi.FFI()
with open(os.path.join(HERE, "cdefs.h")) as f:
BUILDER.cdef(f.read())
HEADERS = search_paths("LIBYANG_HEADERS")
LIBRARIES = search_paths("LIBYANG_LIBRARIES")
EXTRA_CFLAGS = ["-Werror", "-std=c99"]
EXTRA_CFLAGS += shlex.split(os.environ.get("LIBYANG_EXTRA_CFLAGS", ""))
EXTRA_LDFLAGS = shlex.split(os.environ.get("LIBYANG_EXTRA_LDFLAGS", ""))
with open(os.path.join(HERE, "source.c")) as f:
BUILDER.set_source(
"_libyang",
f.read(),
libraries=["yang"],
extra_compile_args=EXTRA_CFLAGS,
extra_link_args=EXTRA_LDFLAGS,
include_dirs=HEADERS,
library_dirs=LIBRARIES,
py_limited_api=False,
)
if __name__ == "__main__":
BUILDER.compile()
| 23.531915 | 72 | 0.652803 | # Copyright (c) 2018-2019 Robin Jarry
# SPDX-License-Identifier: MIT
import os
import shlex
from typing import List
import cffi
HERE = os.path.dirname(__file__)
BUILDER = cffi.FFI()
with open(os.path.join(HERE, "cdefs.h")) as f:
BUILDER.cdef(f.read())
def search_paths(env_var: str) -> List[str]:
paths = []
for p in os.environ.get(env_var, "").strip().split(":"):
p = p.strip()
if p:
paths.append(p)
return paths
HEADERS = search_paths("LIBYANG_HEADERS")
LIBRARIES = search_paths("LIBYANG_LIBRARIES")
EXTRA_CFLAGS = ["-Werror", "-std=c99"]
EXTRA_CFLAGS += shlex.split(os.environ.get("LIBYANG_EXTRA_CFLAGS", ""))
EXTRA_LDFLAGS = shlex.split(os.environ.get("LIBYANG_EXTRA_LDFLAGS", ""))
with open(os.path.join(HERE, "source.c")) as f:
BUILDER.set_source(
"_libyang",
f.read(),
libraries=["yang"],
extra_compile_args=EXTRA_CFLAGS,
extra_link_args=EXTRA_LDFLAGS,
include_dirs=HEADERS,
library_dirs=LIBRARIES,
py_limited_api=False,
)
if __name__ == "__main__":
BUILDER.compile()
| 180 | 0 | 23 |
e96677f83406870340bfdc9b9bd5499663417454 | 1,652 | py | Python | AWS/lambda/postEvent/lambda_function.py | jphacks/A_2104 | 60b623c4e90968deedd92e76a80db6e61fda2682 | [
"MIT"
] | 1 | 2021-10-31T06:20:37.000Z | 2021-10-31T06:20:37.000Z | AWS/lambda/postEvent/lambda_function.py | jphacks/A_2104 | 60b623c4e90968deedd92e76a80db6e61fda2682 | [
"MIT"
] | null | null | null | AWS/lambda/postEvent/lambda_function.py | jphacks/A_2104 | 60b623c4e90968deedd92e76a80db6e61fda2682 | [
"MIT"
] | null | null | null | import os
import json
import urllib
import requests
import datetime
def calc_transportation(body):
"""
移動の予定のみreturn.
"""
margin = 5*60 #margin before next event start
ret = []
for i in range(len(body)):
indict = {}
if i==0:
dur = get_time(os.environ["HOME"],body[i]["location"],body[i]["mode"])
else:
dur = get_time(body[i-1]["location"],body[i]["location"],body[i]["mode"])
t_delta = datetime.timedelta(seconds=(dur+margin))
t_delta_margin = datetime.timedelta(seconds=margin)
dt = datetime.datetime.fromisoformat(body[i]["start"])
indict["title"] = "移動"
indict["start"] = (dt - t_delta).isoformat()
indict["end"] = (dt - t_delta_margin).isoformat()
indict["backgroundColor"] = "#FFCC99"
ret.append(indict)
return ret
| 25.415385 | 96 | 0.596852 | import os
import json
import urllib
import requests
import datetime
def get_time(origins,destinations,mode):
qs = {"origins":origins,"destinations":destinations,"mode":mode,"key":os.environ["API_KEY"]}
d_qs = urllib.parse.urlencode(qs)
url = os.environ["API_ENDPOINT"] + d_qs
payload={}
headers = {}
response = requests.request("GET", url, headers=headers, data=payload)
res = json.loads(response.text)
return res['rows'][0]['elements'][0]['duration']['value']
def calc_transportation(body):
"""
移動の予定のみreturn.
"""
margin = 5*60 #margin before next event start
ret = []
for i in range(len(body)):
indict = {}
if i==0:
dur = get_time(os.environ["HOME"],body[i]["location"],body[i]["mode"])
else:
dur = get_time(body[i-1]["location"],body[i]["location"],body[i]["mode"])
t_delta = datetime.timedelta(seconds=(dur+margin))
t_delta_margin = datetime.timedelta(seconds=margin)
dt = datetime.datetime.fromisoformat(body[i]["start"])
indict["title"] = "移動"
indict["start"] = (dt - t_delta).isoformat()
indict["end"] = (dt - t_delta_margin).isoformat()
indict["backgroundColor"] = "#FFCC99"
ret.append(indict)
return ret
def lambda_handler(event, context):
events = event["body"]
transport_events = calc_transportation(events)
combined_events = events + transport_events
res = [combined_events,transport_events]
return {
'statusCode': 200,
'body': json.dumps(res)
}
| 683 | 0 | 46 |
b7eb75c28aedca28c1d0a029af50f7a64f2e5cb6 | 944 | py | Python | examples/runMinimalExample.py | vmavrikopoulou/NetworkSimulator | fd31e7a8d25c7670b73b6fdfa37570fb28348b3f | [
"MIT"
] | null | null | null | examples/runMinimalExample.py | vmavrikopoulou/NetworkSimulator | fd31e7a8d25c7670b73b6fdfa37570fb28348b3f | [
"MIT"
] | null | null | null | examples/runMinimalExample.py | vmavrikopoulou/NetworkSimulator | fd31e7a8d25c7670b73b6fdfa37570fb28348b3f | [
"MIT"
] | null | null | null | # we need to import this in order to be able to import the simulator
# it does not have to do anything other than be imported.
from util import path
from examples.minimalExample.SimulatorCrownstone import SimulatorCrownstone
from simulator import SimulationGui, JsonFileStore, Simulator
config = JsonFileStore('./minimalExample/config.json').getData()
userModule = JsonFileStore('./minimalExample/userData.json').getData()
root = SimulatorCrownstone(1, 0, 0)
root.root = True
simulatorCrownstones = [
root,
SimulatorCrownstone(2, 5, 3),
SimulatorCrownstone(3, 10, 6),
SimulatorCrownstone(4, 15, 9),
SimulatorCrownstone(5, 15, 13),
]
a = SimulationGui()
a.loadSimulatorCrownstones(simulatorCrownstones)
a.loadConfig(config)
b = Simulator()
b.loadCrownstones(simulatorCrownstones)
b.loadConfig(config)
a.loadSimulator(b) # this will load the user module into the simulator as a broadcaster.
a.startSimulation(2)
#a.run() | 30.451613 | 88 | 0.772246 | # we need to import this in order to be able to import the simulator
# it does not have to do anything other than be imported.
from util import path
from examples.minimalExample.SimulatorCrownstone import SimulatorCrownstone
from simulator import SimulationGui, JsonFileStore, Simulator
config = JsonFileStore('./minimalExample/config.json').getData()
userModule = JsonFileStore('./minimalExample/userData.json').getData()
root = SimulatorCrownstone(1, 0, 0)
root.root = True
simulatorCrownstones = [
root,
SimulatorCrownstone(2, 5, 3),
SimulatorCrownstone(3, 10, 6),
SimulatorCrownstone(4, 15, 9),
SimulatorCrownstone(5, 15, 13),
]
a = SimulationGui()
a.loadSimulatorCrownstones(simulatorCrownstones)
a.loadConfig(config)
b = Simulator()
b.loadCrownstones(simulatorCrownstones)
b.loadConfig(config)
a.loadSimulator(b) # this will load the user module into the simulator as a broadcaster.
a.startSimulation(2)
#a.run() | 0 | 0 | 0 |
e48b5c7f457f1d527389dccb56fae7187cb1b8dd | 711 | py | Python | dmriprep/workflows/dwi/conversions/nii_to_mif/utils.py | GalBenZvi/dmriprep | 7c4c0e1b01e3d941a7fafcbb6b001605cb1f2b0b | [
"Apache-2.0"
] | null | null | null | dmriprep/workflows/dwi/conversions/nii_to_mif/utils.py | GalBenZvi/dmriprep | 7c4c0e1b01e3d941a7fafcbb6b001605cb1f2b0b | [
"Apache-2.0"
] | 1 | 2022-03-22T13:22:18.000Z | 2022-03-22T13:22:18.000Z | dmriprep/workflows/dwi/conversions/nii_to_mif/utils.py | GalBenZvi/dmriprep | 7c4c0e1b01e3d941a7fafcbb6b001605cb1f2b0b | [
"Apache-2.0"
] | null | null | null | def locate_associated_files(in_file: str):
"""
Locates associated json (and possibly bvec & bval) files.
Parameters
----------
in_file : str
Input file.
Returns
-------
Tuple[str, str, str]
Tuple of associated json (and possibly bvec & bval) files.
"""
from dmriprep.config import config
from nipype.interfaces.base import traits
associated_extenstions = ["json", "bvec", "bval"]
layout = config.execution.layout
output = {}
for key in associated_extenstions:
output[key] = (
layout.get_nearest(in_file, extension=key) or traits.Undefined
)
return [output.get(key) for key in associated_extenstions]
| 27.346154 | 74 | 0.631505 | def locate_associated_files(in_file: str):
"""
Locates associated json (and possibly bvec & bval) files.
Parameters
----------
in_file : str
Input file.
Returns
-------
Tuple[str, str, str]
Tuple of associated json (and possibly bvec & bval) files.
"""
from dmriprep.config import config
from nipype.interfaces.base import traits
associated_extenstions = ["json", "bvec", "bval"]
layout = config.execution.layout
output = {}
for key in associated_extenstions:
output[key] = (
layout.get_nearest(in_file, extension=key) or traits.Undefined
)
return [output.get(key) for key in associated_extenstions]
| 0 | 0 | 0 |
0a78fe572f3538ff406389eb9f2f11dde5ab5b33 | 1,306 | py | Python | d_immutable_pair.py | DruidGreeneyes/rivet-core.py | 9402f719b4f81f876baa34048e0575d3585842de | [
"MIT"
] | null | null | null | d_immutable_pair.py | DruidGreeneyes/rivet-core.py | 9402f719b4f81f876baa34048e0575d3585842de | [
"MIT"
] | null | null | null | d_immutable_pair.py | DruidGreeneyes/rivet-core.py | 9402f719b4f81f876baa34048e0575d3585842de | [
"MIT"
] | null | null | null | from multipledispatch import dispatch
| 34.368421 | 108 | 0.474732 | from multipledispatch import dispatch
def immutable_pair(left_name='left', right_name='right'):
def wrapper(Cls):
class PairClass(object):
__slots__ = ['_parts']
@dispatch(object, object)
def __init__(self, left, right):
self._parts = (left, right)
def __getitem__(self, part):
if part == left_name or part == 0:
return self._parts[0]
elif part == right_name or part == 1:
return self._parts[1]
else:
raise IndexError("{} only contains "
"entries for {}(0) and {}(1).\n"
"You have asked for: {}".format(str(Cls), left_name, right_name, part))
def __str__(self):
return "|".join(map(str, self._parts))
def __repr__(self):
return str(self)
def __getattribute__(self, s):
try:
x = super(PairClass, self).__getattribute__(s)
except AttributeError:
pass
else:
return x
return self.oInstance.__getattribute__(s)
return PairClass
return wrapper | 1,245 | 0 | 23 |
3bca0bb330dfcde740470c11763d9116b9ce4302 | 280 | py | Python | Apostila_Bimestre_II/da_Apostila/primos_SEM_lista.py | viniciusm0raes/python | c4d4f1a08d1e4de105109e1f67fae9fcc20d7fce | [
"MIT"
] | null | null | null | Apostila_Bimestre_II/da_Apostila/primos_SEM_lista.py | viniciusm0raes/python | c4d4f1a08d1e4de105109e1f67fae9fcc20d7fce | [
"MIT"
] | null | null | null | Apostila_Bimestre_II/da_Apostila/primos_SEM_lista.py | viniciusm0raes/python | c4d4f1a08d1e4de105109e1f67fae9fcc20d7fce | [
"MIT"
] | null | null | null | n1 = int(input("Informe um número: "))
n2 = int(input("Informe um número maior que o anterior: "))
for x in range(n1, n2 + 1):
cont=0
for y in range(n1, x + 1):
if x%y==0:
cont+=1
if cont<=2:
print("{} ".format(y), end="") | 25.454545 | 59 | 0.482143 | n1 = int(input("Informe um número: "))
n2 = int(input("Informe um número maior que o anterior: "))
for x in range(n1, n2 + 1):
cont=0
for y in range(n1, x + 1):
if x%y==0:
cont+=1
if cont<=2:
print("{} ".format(y), end="") | 0 | 0 | 0 |
98b61cbbc8ebb89f425e80f389f82538965ad923 | 5,146 | py | Python | tests/compiler/sample_input/titanic/titanic.py | ngpgn/contextual-ai | deb119395ced5242f243b2b31c074507e96646c0 | [
"Apache-2.0"
] | 83 | 2020-06-17T04:07:29.000Z | 2022-03-12T13:45:24.000Z | tests/compiler/sample_input/titanic/titanic.py | ngpgn/contextual-ai | deb119395ced5242f243b2b31c074507e96646c0 | [
"Apache-2.0"
] | 15 | 2020-06-30T09:22:19.000Z | 2021-11-11T10:52:40.000Z | tests/compiler/sample_input/titanic/titanic.py | ngpgn/contextual-ai | deb119395ced5242f243b2b31c074507e96646c0 | [
"Apache-2.0"
] | 11 | 2020-06-17T17:01:24.000Z | 2022-02-27T18:53:03.000Z | #Libraries
import pickle
# To ignore warnings
import warnings
import numpy as np
import pandas as pd
import re as re
from sklearn.tree import DecisionTreeClassifier
warnings.filterwarnings("ignore")
################################################################################
### Data Ingestion & Splitting
################################################################################
#Load data
train_data = pd.read_csv('train.csv')
test_data = pd.read_csv('test.csv')
all_data = [train_data, test_data]
data = pd.concat([train_data, test_data], sort=False)
data.to_csv('titanic.csv', index=False)
################################################################################
### Data & Feature Processing
################################################################################
#Feature 1-2 ready to use, manipulating others
#Feature 3
for data in all_data:
data['family_size'] = data['SibSp'] + data['Parch'] + 1
#Feature 3.1
for data in all_data:
data['is_alone'] = 0
data.loc[data['family_size'] == 1, 'is_alone'] = 1
#Feature 4
for data in all_data:
data['Embarked'] = data['Embarked'].fillna('S')
#Feature 5
for data in all_data:
data['Fare'] = data['Fare'].fillna(data['Fare'].median())
train_data['category_fare'] = pd.qcut(train_data['Fare'], 4)
#Feature 6
for data in all_data:
age_avg = data['Age'].mean()
age_std = data['Age'].std()
age_null = data['Age'].isnull().sum()
random_list = np.random.randint(age_avg - age_std, age_avg + age_std , size = age_null)
data['Age'][np.isnan(data['Age'])] = random_list
data['Age'] = data['Age'].astype(int)
train_data['category_age'] = pd.cut(train_data['Age'], 5)
#Feature 7
for data in all_data:
data['title'] = data['Name'].apply(get_title)
for data in all_data:
data['title'] = data['title'].replace(['Lady', 'Countess','Capt', 'Col','Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'],'Rare')
data['title'] = data['title'].replace('Mlle','Miss')
data['title'] = data['title'].replace('Ms','Miss')
data['title'] = data['title'].replace('Mme','Mrs')
#Map Data
for data in all_data:
#Mapping Sex
sex_map = { 'female':0 , 'male':1 }
data['Sex'] = data['Sex'].map(sex_map).astype(int)
#Mapping Title
title_map = {'Mr':1, 'Miss':2, 'Mrs':3, 'Master':4, 'Rare':5}
data['title'] = data['title'].map(title_map)
data['title'] = data['title'].fillna(0)
#Mapping Embarked
embark_map = {'S':0, 'C':1, 'Q':2}
data['Embarked'] = data['Embarked'].map(embark_map).astype(int)
#Mapping Fare
data.loc[ data['Fare'] <= 7.91, 'Fare'] = 0
data.loc[(data['Fare'] > 7.91) & (data['Fare'] <= 14.454), 'Fare'] = 1
data.loc[(data['Fare'] > 14.454) & (data['Fare'] <= 31), 'Fare'] = 2
data.loc[ data['Fare'] > 31, 'Fare'] = 3
data['Fare'] = data['Fare'].astype(int)
#Mapping Age
data.loc[ data['Age'] <= 16, 'Age'] = 0
data.loc[(data['Age'] > 16) & (data['Age'] <= 32), 'Age'] = 1
data.loc[(data['Age'] > 32) & (data['Age'] <= 48), 'Age'] = 2
data.loc[(data['Age'] > 48) & (data['Age'] <= 64), 'Age'] = 3
data.loc[ data['Age'] > 64, 'Age'] = 4
#5 Feature Selection
#5.1 Create list of columns to drop
drop_elements = ["Name", "Ticket", "Cabin", "SibSp", "Parch", "family_size"]
#5.3 Drop columns from both data sets
train_data = train_data.drop(drop_elements, axis = 1)
train_data = train_data.drop(['PassengerId','category_fare', 'category_age'], axis = 1)
test_data = test_data.drop(drop_elements, axis = 1)
#5.4 Double check
print("Training data")
print(train_data.head)
print("Test data")
print(test_data.head)
################################################################################
### Model Training & Persist as pickle
################################################################################
#6 Do training with decision tree
X_train = train_data.drop("Survived", axis=1)
Y_train = train_data["Survived"]
decision_tree = DecisionTreeClassifier()
decision_tree.fit(X_train, Y_train)
pkl = open('model.pkl', 'wb')
pickle.dump(decision_tree, pkl)
decision_tree = None
X_train.to_csv("train_data.csv", index=False)
################################################################################
### Model Loading & Inference
################################################################################
#7.1 Prepare prediction data & Model
model_pkl = open('model.pkl', 'rb')
model = pickle.load(model_pkl)
X_test = test_data.drop("PassengerId", axis=1).copy()
#7.2 Do predict
accuracy = round(model.score(X_train, Y_train) * 100, 2)
print('=========================')
print("Model Accuracy: ",accuracy)
print('=========================')
#7.3 Run prediction on entire test data
Y_pred = model.predict(X_test)
result = pd.DataFrame({
"PassengerId":test_data["PassengerId"],
"Survived": Y_pred
})
result.to_csv('result.csv', index = False) | 32.77707 | 139 | 0.547998 | #Libraries
import pickle
# To ignore warnings
import warnings
import numpy as np
import pandas as pd
import re as re
from sklearn.tree import DecisionTreeClassifier
warnings.filterwarnings("ignore")
################################################################################
### Data Ingestion & Splitting
################################################################################
#Load data
train_data = pd.read_csv('train.csv')
test_data = pd.read_csv('test.csv')
all_data = [train_data, test_data]
data = pd.concat([train_data, test_data], sort=False)
data.to_csv('titanic.csv', index=False)
################################################################################
### Data & Feature Processing
################################################################################
#Feature 1-2 ready to use, manipulating others
#Feature 3
for data in all_data:
data['family_size'] = data['SibSp'] + data['Parch'] + 1
#Feature 3.1
for data in all_data:
data['is_alone'] = 0
data.loc[data['family_size'] == 1, 'is_alone'] = 1
#Feature 4
for data in all_data:
data['Embarked'] = data['Embarked'].fillna('S')
#Feature 5
for data in all_data:
data['Fare'] = data['Fare'].fillna(data['Fare'].median())
train_data['category_fare'] = pd.qcut(train_data['Fare'], 4)
#Feature 6
for data in all_data:
age_avg = data['Age'].mean()
age_std = data['Age'].std()
age_null = data['Age'].isnull().sum()
random_list = np.random.randint(age_avg - age_std, age_avg + age_std , size = age_null)
data['Age'][np.isnan(data['Age'])] = random_list
data['Age'] = data['Age'].astype(int)
train_data['category_age'] = pd.cut(train_data['Age'], 5)
#Feature 7
def get_title(name):
title_search = re.search(' ([A-Za-z]+)\. ', name)
if title_search:
return title_search.group(1)
return ""
for data in all_data:
data['title'] = data['Name'].apply(get_title)
for data in all_data:
data['title'] = data['title'].replace(['Lady', 'Countess','Capt', 'Col','Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'],'Rare')
data['title'] = data['title'].replace('Mlle','Miss')
data['title'] = data['title'].replace('Ms','Miss')
data['title'] = data['title'].replace('Mme','Mrs')
#Map Data
for data in all_data:
#Mapping Sex
sex_map = { 'female':0 , 'male':1 }
data['Sex'] = data['Sex'].map(sex_map).astype(int)
#Mapping Title
title_map = {'Mr':1, 'Miss':2, 'Mrs':3, 'Master':4, 'Rare':5}
data['title'] = data['title'].map(title_map)
data['title'] = data['title'].fillna(0)
#Mapping Embarked
embark_map = {'S':0, 'C':1, 'Q':2}
data['Embarked'] = data['Embarked'].map(embark_map).astype(int)
#Mapping Fare
data.loc[ data['Fare'] <= 7.91, 'Fare'] = 0
data.loc[(data['Fare'] > 7.91) & (data['Fare'] <= 14.454), 'Fare'] = 1
data.loc[(data['Fare'] > 14.454) & (data['Fare'] <= 31), 'Fare'] = 2
data.loc[ data['Fare'] > 31, 'Fare'] = 3
data['Fare'] = data['Fare'].astype(int)
#Mapping Age
data.loc[ data['Age'] <= 16, 'Age'] = 0
data.loc[(data['Age'] > 16) & (data['Age'] <= 32), 'Age'] = 1
data.loc[(data['Age'] > 32) & (data['Age'] <= 48), 'Age'] = 2
data.loc[(data['Age'] > 48) & (data['Age'] <= 64), 'Age'] = 3
data.loc[ data['Age'] > 64, 'Age'] = 4
#5 Feature Selection
#5.1 Create list of columns to drop
drop_elements = ["Name", "Ticket", "Cabin", "SibSp", "Parch", "family_size"]
#5.3 Drop columns from both data sets
train_data = train_data.drop(drop_elements, axis = 1)
train_data = train_data.drop(['PassengerId','category_fare', 'category_age'], axis = 1)
test_data = test_data.drop(drop_elements, axis = 1)
#5.4 Double check
print("Training data")
print(train_data.head)
print("Test data")
print(test_data.head)
################################################################################
### Model Training & Persist as pickle
################################################################################
#6 Do training with decision tree
X_train = train_data.drop("Survived", axis=1)
Y_train = train_data["Survived"]
decision_tree = DecisionTreeClassifier()
decision_tree.fit(X_train, Y_train)
pkl = open('model.pkl', 'wb')
pickle.dump(decision_tree, pkl)
decision_tree = None
X_train.to_csv("train_data.csv", index=False)
################################################################################
### Model Loading & Inference
################################################################################
#7.1 Prepare prediction data & Model
model_pkl = open('model.pkl', 'rb')
model = pickle.load(model_pkl)
X_test = test_data.drop("PassengerId", axis=1).copy()
#7.2 Do predict
accuracy = round(model.score(X_train, Y_train) * 100, 2)
print('=========================')
print("Model Accuracy: ",accuracy)
print('=========================')
#7.3 Run prediction on entire test data
Y_pred = model.predict(X_test)
result = pd.DataFrame({
"PassengerId":test_data["PassengerId"],
"Survived": Y_pred
})
result.to_csv('result.csv', index = False) | 125 | 0 | 22 |
3fd682ab39a18cddfb20ea63d3293a46a7c8e7e1 | 11,758 | py | Python | Example/Gutenkunst2007/von_Dassow_2000/spgNet.py | bcdaniels/SloppyCell | 17e68127a6aba19056a5067748a2d18241cc4d76 | [
"BSD-3-Clause"
] | 2 | 2020-05-26T19:29:39.000Z | 2020-08-26T20:54:52.000Z | Example/Gutenkunst2007/von_Dassow_2000/spgNet.py | bcdaniels/SloppyCell | 17e68127a6aba19056a5067748a2d18241cc4d76 | [
"BSD-3-Clause"
] | 1 | 2020-05-26T16:50:49.000Z | 2021-07-08T20:35:35.000Z | Example/Gutenkunst2007/von_Dassow_2000/spgNet.py | jurquiza/SloppyCellUrquiza2019 | a9f64d9d4172c82735813f09e48f36777a714e9c | [
"BSD-3-Clause"
] | 3 | 2017-09-12T03:12:01.000Z | 2018-10-19T11:08:09.000Z | from SloppyCell.ReactionNetworks import *
# This file was distributed with Ingenue
f = file('spg1_4cell.net')
lines = f.readlines()
for ii, line in enumerate(lines):
if line.startswith('&width'):
width = int(line.split()[1])
elif line.startswith('&height'):
height = int(line.split()[1])
elif line.startswith('&Network'):
net_id = line.split()[1]
elif line.startswith('&Genes'):
begin_genes = ii
elif line.startswith('&Interactions'):
begin_ints = ii
elif line.startswith('&ParameterValues'):
begin_params = ii
elif line.startswith('&InitLevels'):
begin_inits = ii
net = Network(net_id)
if height > 1:
for ii in range(height):
for jj in range(width):
net.add_compartment('cell_%i_%i' % (ii, jj))
else:
for jj in range(width):
net.add_compartment('cell_%i' % (jj))
ii = begin_genes + 1
while True:
line = lines[ii].strip()
if line.startswith('&endGenes'):
break
if line.startswith('&'):
# Process this species
species_id = line[1:]
ii += 1
# Skip to the end of this species entry
while not lines[ii].strip().startswith('&end'):
line = lines[ii]
first, second = line.split()
if first == '&Location':
on_membrane = (second == 'membrane')
ii += 1
for comp_ii, comp_id in enumerate(net.compartments.keys()):
if not on_membrane:
net.add_species('%s_%s' % (species_id, comp_id), comp_id,
name = r'%s_{%i}' % (species_id, comp_ii))
else:
for jj in range(6):
id = '%s_%s_side_%i' % (species_id, comp_id, jj)
name = r'%s_{%i, %i}' % (species_id, comp_ii, jj)
net.add_species(id, comp_id, name = name)
ii += 1
for comp_id in net.compartments.keys():
net.set_var_constant('B_%s' % comp_id, True)
ii = begin_params + 1
while True:
line = lines[ii].strip()
if line.startswith('&endParameterValues'):
break
if line.startswith('&'):
# Process this parameter
temp = line.split()
param_id, param_val = temp[0][1:], float(temp[1])
net.add_parameter(param_id, param_val)
ii += 1
# Create all the appropriate parameter names
for param_id in net.parameters.keys():
if param_id == 'K_PTC_HH':
name = r'k_{PTCHH}'
elif param_id.startswith('K_'):
term = param_id.split('_')[1]
name = r'\kappa_{%s}' % term
elif param_id.startswith('nu_'):
term = param_id.split('_')[1]
name = r'\nu_{%s}' % term
elif param_id.startswith('H_'):
term = param_id.split('_')[1]
name = r'H_{%s}' % term
elif param_id.startswith('alpha_'):
term = param_id.split('_')[1]
name = r'\alpha_{%s}' % term
else:
name = param_id
net.parameters.get(param_id).name = name
net.parameters.get('Endo_WG').name = r'r_{EndoWG}'
net.parameters.get('Exo_WG').name = r'r_{ExoWG}'
net.parameters.get('Mxfer_WG').name = r'r_{MxferWG}'
net.parameters.get('LMxfer_WG').name = r'r_{LMxferWG}'
net.parameters.get('LMxfer_PTC').name = r'r_{LMxferPTC}'
net.parameters.get('LMxfer_HH').name = r'r_{LMxferHH}'
net.parameters.get('maxHH').name = r'\left[HH\right]_0'
net.parameters.get('maxPTC').name = r'\left[PTC\right]_0'
net.parameters.get('C_CID').name = r'C_{CID}'
net.add_parameter('T_0', 1.0, is_optimizable=False)
ii = begin_inits + 1
while True:
line = lines[ii].strip()
if line.startswith('&endInitLevels'):
break
elif line.startswith('&BackgroundLevel'):
spec_id = line.split()[1]
value = float(line.split()[2])
for var_id in net.species.keys():
if var_id.startswith(spec_id):
net.set_var_ic(var_id, value)
elif line.startswith('&ColumnIC'):
spec_id = lines[ii + 1].split()[1]
value = float(lines[ii + 2].split()[1])
column = int(lines[ii + 3].split()[1])
cell_id = net.compartments.keys()[column]
for var_id in net.species.keys():
if var_id.startswith(spec_id) and var_id.count(cell_id):
net.set_var_ic(var_id, value)
ii += 3
ii += 1
net.add_func_def('phi', ['X', 'k_X', 'v_X'], '(X**2)**(v_X/2) / ((k_X**2)**(v_X/2) + (X**2)**(v_X/2))',
name = r'\phi')
net.add_func_def('psi', ['X', 'k_X', 'v_X'], '1 - phi(X, k_X, v_X)',
name = r'\psi')
for comp_ii, comp_id in enumerate(net.compartments.keys()):
# rhs for en_i
# First define EWG^{tot}_{n(i,j)}
net.add_parameter('EWG_tot_pres_%s' % comp_id, is_optimizable=False,
name=r'{EWG_{n(%i, j)}}^{tot}' % comp_ii)
net.add_assignment_rule('EWG_tot_pres_%s' % comp_id,
presented_by_neighbors(net, comp_id, 'EWG'))
rule_str = 'T_0/H_en * (phi(EWG_tot_pres_%(comp)s * psi(CN_%(comp)s, K_CNen, nu_CNen), K_WGen, nu_WGen) - en_%(comp)s)'
net.add_rate_rule('en_%s' % comp_id, rule_str % {'comp': comp_id})
# rhs for EN_i
rule_str = 'T_0/H_EN * (en_%(comp)s - EN_%(comp)s)'
net.add_rate_rule('EN_%s' % comp_id, rule_str % {'comp': comp_id})
# rhs for wg_i
num = '(beta_wg * phi(CID_%(comp)s * psi (CN_%(comp)s, K_CNwg, nu_CNwg), K_CIDwg, nu_CIDwg) + alpha_wg * phi(IWG_%(comp)s, K_WGwg, nu_WGwg))' % {'comp': comp_id}
denom = '(1 + beta_wg * phi(CID_%(comp)s * psi(CN_%(comp)s, K_CNwg, nu_CNwg), K_CIDwg, nu_CIDwg) + alpha_wg * phi(IWG_%(comp)s, K_WGwg, nu_WGwg))' % {'comp': comp_id}
rule_dict = {'num' : num, 'denom' : denom, 'comp' : comp_id}
rule_str = 'T_0/H_wg * %(num)s/%(denom)s - T_0/H_wg * wg_%(comp)s'
net.add_rate_rule('wg_%s' % comp_id, rule_str % rule_dict)
# rhs for IWG_i
net.add_parameter('EWG_tot_%s' % comp_id, is_optimizable=False,
name=r'{EWG_{%i}}^{tot}' % comp_ii)
net.add_assignment_rule('EWG_tot_%s' % comp_id,
total_in_cell(net, comp_id, 'EWG'))
rule_str = 'T_0/H_IWG * (wg_%(comp)s - IWG_%(comp)s) + T_0*(Endo_WG * EWG_tot_%(comp)s - Exo_WG * IWG_%(comp)s)'
net.add_rate_rule('IWG_%s' % comp_id, rule_str % {'comp': comp_id})
#rhs for EWG_i_j
for side_jj in range(6):
terms = ['T_0 * Exo_WG * IWG_%(comp)s/6',
'-(T_0 * Endo_WG * EWG_%(comp)s_side_%(side)i)',
'T_0 * Mxfer_WG * (%(sub)s - EWG_%(comp)s_side_%(side)i)',
'T_0 * LMxfer_WG * (EWG_%(comp)s_side_%(prev)i + EWG_%(comp)s_side_%(next)i - 2*EWG_%(comp)s_side_%(side)i)',
'-T_0/H_EWG * EWG_%(comp)s_side_%(side)i']
rule_str = ' + '.join(terms)
rule_dict = {'sub': opposite_side(net, comp_id, side_jj, 'EWG'),
'comp': comp_id,
'side': side_jj,
'prev': (side_jj - 1) % 6,
'next': (side_jj + 1) % 6}
net.add_rate_rule('EWG_%s_side_%i' % (comp_id, side_jj),
rule_str % rule_dict)
# rhs for ptc_i
rule_str = 'T_0/H_ptc * (phi(CID_%(comp)s * psi(CN_%(comp)s, K_CNptc, nu_CNptc), K_CIDptc, nu_CIDptc) - ptc_%(comp)s)'
rule_dict = {'comp': comp_id}
net.add_rate_rule('ptc_%s' % comp_id,
rule_str % rule_dict)
# rhs for PTC_i_j
for side_jj in range(6):
terms = ['T_0/H_PTC * (ptc_%(comp)s/6 - PTC_%(comp)s_side_%(side)i)',
'-(T_0 * K_PTC_HH * maxHH * %(sub1)s * PTC_%(comp)s_side_%(side)i)',
'T_0 * LMxfer_PTC * (PTC_%(comp)s_side_%(prev)i + PTC_%(comp)s_side_%(next)i - 2*PTC_%(comp)s_side_%(side)i)']
rule_str = ' + '.join(terms)
rule_dict = {'sub1': opposite_side(net, comp_id, side_jj, 'HH'),
'comp': comp_id,
'side': side_jj,
'prev': (side_jj - 1) % 6,
'next': (side_jj + 1) % 6}
net.add_rate_rule('PTC_%s_side_%i' % (comp_id, side_jj),
rule_str % rule_dict)
# rhs for cid_i
rule_str = 'T_0/H_cid * (phi(B_%(comp)s * psi(EN_%(comp)s, K_ENcid, nu_ENcid), K_Bcid, nu_Bcid) - cid_%(comp)s)'
rule_dict = {'comp': comp_id}
net.add_rate_rule('cid_%s' % comp_id,
rule_str % rule_dict)
# rhs for CID_i
net.add_parameter('PTC_tot_%s' % comp_id, is_optimizable=False,
name=r'{PTC_{%i}}^{tot}' % comp_ii)
net.add_assignment_rule('PTC_tot_%s' % comp_id,
total_in_cell(net, comp_id, 'PTC'))
rule_str = 'T_0/H_CID * (cid_%(comp)s - CID_%(comp)s) - T_0 * C_CID * CID_%(comp)s * phi(PTC_tot_%(comp)s, K_PTCCID, nu_PTCCID)'
rule_dict = {'comp': comp_id}
net.add_rate_rule('CID_%s' % comp_id,
rule_str % rule_dict)
# rhs for CN_i
rule_str = 'T_0 * C_CID * CID_%(comp)s * phi(PTC_tot_%(comp)s, K_PTCCID, nu_PTCCID) - T_0 * CN_%(comp)s/H_CN'
rule_dict = {'comp': comp_id}
net.add_rate_rule('CN_%s' % comp_id,
rule_str % rule_dict)
# rhs for hh_i
rule_str = 'T_0/H_hh * (phi(EN_%(comp)s * psi(CN_%(comp)s, K_CNhh, nu_CNhh), K_ENhh, nu_ENhh) - hh_%(comp)s)'
rule_dict = {'comp': comp_id}
net.add_rate_rule('hh_%s' % comp_id,
rule_str % rule_dict)
# rhs for HH_i_j
for side_jj in range(6):
terms = ['T_0/H_HH * (hh_%(comp)s/6 - HH_%(comp)s_side_%(side)s)'
'-T_0 * K_PTC_HH * maxPTC * %(sub1)s * HH_%(comp)s_side_%(side)s',
'T_0 * LMxfer_HH * (HH_%(comp)s_side_%(prev)i + HH_%(comp)s_side_%(next)i - 2*HH_%(comp)s_side_%(side)i)']
rule_str = ' + '.join(terms)
rule_dict = {'sub1': opposite_side(net, comp_id, side_jj, 'PTC'),
'comp': comp_id,
'side': side_jj,
'prev': (side_jj - 1) % 6,
'next': (side_jj + 1) % 6}
net.add_rate_rule('HH_%s_side_%i' % (comp_id, side_jj),
rule_str % rule_dict)
# rhs for PH_i_j
for side_jj in range(6):
rule_str= 'T_0 * K_PTC_HH * maxHH * %(sub1)s * PTC_%(comp)s_side_%(side)i - T_0*PH_%(comp)s_side_%(side)i / H_PH'
rule_dict = {'sub1': opposite_side(net, comp_id, side_jj, 'HH'),
'comp': comp_id,
'side': side_jj}
net.add_rate_rule('PH_%s_side_%i' % (comp_id, side_jj),
rule_str % rule_dict)
| 41.111888 | 170 | 0.559024 | from SloppyCell.ReactionNetworks import *
# This file was distributed with Ingenue
f = file('spg1_4cell.net')
lines = f.readlines()
for ii, line in enumerate(lines):
if line.startswith('&width'):
width = int(line.split()[1])
elif line.startswith('&height'):
height = int(line.split()[1])
elif line.startswith('&Network'):
net_id = line.split()[1]
elif line.startswith('&Genes'):
begin_genes = ii
elif line.startswith('&Interactions'):
begin_ints = ii
elif line.startswith('&ParameterValues'):
begin_params = ii
elif line.startswith('&InitLevels'):
begin_inits = ii
net = Network(net_id)
if height > 1:
for ii in range(height):
for jj in range(width):
net.add_compartment('cell_%i_%i' % (ii, jj))
else:
for jj in range(width):
net.add_compartment('cell_%i' % (jj))
ii = begin_genes + 1
while True:
line = lines[ii].strip()
if line.startswith('&endGenes'):
break
if line.startswith('&'):
# Process this species
species_id = line[1:]
ii += 1
# Skip to the end of this species entry
while not lines[ii].strip().startswith('&end'):
line = lines[ii]
first, second = line.split()
if first == '&Location':
on_membrane = (second == 'membrane')
ii += 1
for comp_ii, comp_id in enumerate(net.compartments.keys()):
if not on_membrane:
net.add_species('%s_%s' % (species_id, comp_id), comp_id,
name = r'%s_{%i}' % (species_id, comp_ii))
else:
for jj in range(6):
id = '%s_%s_side_%i' % (species_id, comp_id, jj)
name = r'%s_{%i, %i}' % (species_id, comp_ii, jj)
net.add_species(id, comp_id, name = name)
ii += 1
for comp_id in net.compartments.keys():
net.set_var_constant('B_%s' % comp_id, True)
ii = begin_params + 1
while True:
line = lines[ii].strip()
if line.startswith('&endParameterValues'):
break
if line.startswith('&'):
# Process this parameter
temp = line.split()
param_id, param_val = temp[0][1:], float(temp[1])
net.add_parameter(param_id, param_val)
ii += 1
# Create all the appropriate parameter names
for param_id in net.parameters.keys():
if param_id == 'K_PTC_HH':
name = r'k_{PTCHH}'
elif param_id.startswith('K_'):
term = param_id.split('_')[1]
name = r'\kappa_{%s}' % term
elif param_id.startswith('nu_'):
term = param_id.split('_')[1]
name = r'\nu_{%s}' % term
elif param_id.startswith('H_'):
term = param_id.split('_')[1]
name = r'H_{%s}' % term
elif param_id.startswith('alpha_'):
term = param_id.split('_')[1]
name = r'\alpha_{%s}' % term
else:
name = param_id
net.parameters.get(param_id).name = name
net.parameters.get('Endo_WG').name = r'r_{EndoWG}'
net.parameters.get('Exo_WG').name = r'r_{ExoWG}'
net.parameters.get('Mxfer_WG').name = r'r_{MxferWG}'
net.parameters.get('LMxfer_WG').name = r'r_{LMxferWG}'
net.parameters.get('LMxfer_PTC').name = r'r_{LMxferPTC}'
net.parameters.get('LMxfer_HH').name = r'r_{LMxferHH}'
net.parameters.get('maxHH').name = r'\left[HH\right]_0'
net.parameters.get('maxPTC').name = r'\left[PTC\right]_0'
net.parameters.get('C_CID').name = r'C_{CID}'
net.add_parameter('T_0', 1.0, is_optimizable=False)
ii = begin_inits + 1
while True:
line = lines[ii].strip()
if line.startswith('&endInitLevels'):
break
elif line.startswith('&BackgroundLevel'):
spec_id = line.split()[1]
value = float(line.split()[2])
for var_id in net.species.keys():
if var_id.startswith(spec_id):
net.set_var_ic(var_id, value)
elif line.startswith('&ColumnIC'):
spec_id = lines[ii + 1].split()[1]
value = float(lines[ii + 2].split()[1])
column = int(lines[ii + 3].split()[1])
cell_id = net.compartments.keys()[column]
for var_id in net.species.keys():
if var_id.startswith(spec_id) and var_id.count(cell_id):
net.set_var_ic(var_id, value)
ii += 3
ii += 1
net.add_func_def('phi', ['X', 'k_X', 'v_X'], '(X**2)**(v_X/2) / ((k_X**2)**(v_X/2) + (X**2)**(v_X/2))',
name = r'\phi')
net.add_func_def('psi', ['X', 'k_X', 'v_X'], '1 - phi(X, k_X, v_X)',
name = r'\psi')
def presented_by_neighbors(net, comp_id, spec_id):
ii = net.compartments.keys().index(comp_id)
next = net.compartments.keys()[(ii + 1) % len(net.compartments)]
prev = net.compartments.keys()[(ii - 1) % len(net.compartments)]
terms = ['%s_%s_side_3' % (spec_id, comp_id),
'%s_%s_side_4' % (spec_id, next),
'%s_%s_side_5' % (spec_id, next),
'%s_%s_side_0' % (spec_id, comp_id),
'%s_%s_side_1' % (spec_id, prev),
'%s_%s_side_2' % (spec_id, prev),
]
return '+'.join(terms)
def total_in_cell(net, comp_id, spec_id):
terms = ['%s_%s_side_%i' % (spec_id, comp_id, ii) for ii in range(6)]
return '+'.join(terms)
def opposite_side(net, comp_id, side_jj, spec_id):
ii = net.compartments.keys().index(comp_id)
next = net.compartments.keys()[(ii + 1) % len(net.compartments)]
prev = net.compartments.keys()[(ii - 1) % len(net.compartments)]
if side_jj in [1, 2]:
cell_id = next
elif side_jj in [0, 3]:
cell_id = comp_id
elif side_jj in [4, 5]:
cell_id = prev
return '%s_%s_side_%i' % (spec_id, cell_id, (side_jj + 3)%6)
for comp_ii, comp_id in enumerate(net.compartments.keys()):
# rhs for en_i
# First define EWG^{tot}_{n(i,j)}
net.add_parameter('EWG_tot_pres_%s' % comp_id, is_optimizable=False,
name=r'{EWG_{n(%i, j)}}^{tot}' % comp_ii)
net.add_assignment_rule('EWG_tot_pres_%s' % comp_id,
presented_by_neighbors(net, comp_id, 'EWG'))
rule_str = 'T_0/H_en * (phi(EWG_tot_pres_%(comp)s * psi(CN_%(comp)s, K_CNen, nu_CNen), K_WGen, nu_WGen) - en_%(comp)s)'
net.add_rate_rule('en_%s' % comp_id, rule_str % {'comp': comp_id})
# rhs for EN_i
rule_str = 'T_0/H_EN * (en_%(comp)s - EN_%(comp)s)'
net.add_rate_rule('EN_%s' % comp_id, rule_str % {'comp': comp_id})
# rhs for wg_i
num = '(beta_wg * phi(CID_%(comp)s * psi (CN_%(comp)s, K_CNwg, nu_CNwg), K_CIDwg, nu_CIDwg) + alpha_wg * phi(IWG_%(comp)s, K_WGwg, nu_WGwg))' % {'comp': comp_id}
denom = '(1 + beta_wg * phi(CID_%(comp)s * psi(CN_%(comp)s, K_CNwg, nu_CNwg), K_CIDwg, nu_CIDwg) + alpha_wg * phi(IWG_%(comp)s, K_WGwg, nu_WGwg))' % {'comp': comp_id}
rule_dict = {'num' : num, 'denom' : denom, 'comp' : comp_id}
rule_str = 'T_0/H_wg * %(num)s/%(denom)s - T_0/H_wg * wg_%(comp)s'
net.add_rate_rule('wg_%s' % comp_id, rule_str % rule_dict)
# rhs for IWG_i
net.add_parameter('EWG_tot_%s' % comp_id, is_optimizable=False,
name=r'{EWG_{%i}}^{tot}' % comp_ii)
net.add_assignment_rule('EWG_tot_%s' % comp_id,
total_in_cell(net, comp_id, 'EWG'))
rule_str = 'T_0/H_IWG * (wg_%(comp)s - IWG_%(comp)s) + T_0*(Endo_WG * EWG_tot_%(comp)s - Exo_WG * IWG_%(comp)s)'
net.add_rate_rule('IWG_%s' % comp_id, rule_str % {'comp': comp_id})
#rhs for EWG_i_j
for side_jj in range(6):
terms = ['T_0 * Exo_WG * IWG_%(comp)s/6',
'-(T_0 * Endo_WG * EWG_%(comp)s_side_%(side)i)',
'T_0 * Mxfer_WG * (%(sub)s - EWG_%(comp)s_side_%(side)i)',
'T_0 * LMxfer_WG * (EWG_%(comp)s_side_%(prev)i + EWG_%(comp)s_side_%(next)i - 2*EWG_%(comp)s_side_%(side)i)',
'-T_0/H_EWG * EWG_%(comp)s_side_%(side)i']
rule_str = ' + '.join(terms)
rule_dict = {'sub': opposite_side(net, comp_id, side_jj, 'EWG'),
'comp': comp_id,
'side': side_jj,
'prev': (side_jj - 1) % 6,
'next': (side_jj + 1) % 6}
net.add_rate_rule('EWG_%s_side_%i' % (comp_id, side_jj),
rule_str % rule_dict)
# rhs for ptc_i
rule_str = 'T_0/H_ptc * (phi(CID_%(comp)s * psi(CN_%(comp)s, K_CNptc, nu_CNptc), K_CIDptc, nu_CIDptc) - ptc_%(comp)s)'
rule_dict = {'comp': comp_id}
net.add_rate_rule('ptc_%s' % comp_id,
rule_str % rule_dict)
# rhs for PTC_i_j
for side_jj in range(6):
terms = ['T_0/H_PTC * (ptc_%(comp)s/6 - PTC_%(comp)s_side_%(side)i)',
'-(T_0 * K_PTC_HH * maxHH * %(sub1)s * PTC_%(comp)s_side_%(side)i)',
'T_0 * LMxfer_PTC * (PTC_%(comp)s_side_%(prev)i + PTC_%(comp)s_side_%(next)i - 2*PTC_%(comp)s_side_%(side)i)']
rule_str = ' + '.join(terms)
rule_dict = {'sub1': opposite_side(net, comp_id, side_jj, 'HH'),
'comp': comp_id,
'side': side_jj,
'prev': (side_jj - 1) % 6,
'next': (side_jj + 1) % 6}
net.add_rate_rule('PTC_%s_side_%i' % (comp_id, side_jj),
rule_str % rule_dict)
# rhs for cid_i
rule_str = 'T_0/H_cid * (phi(B_%(comp)s * psi(EN_%(comp)s, K_ENcid, nu_ENcid), K_Bcid, nu_Bcid) - cid_%(comp)s)'
rule_dict = {'comp': comp_id}
net.add_rate_rule('cid_%s' % comp_id,
rule_str % rule_dict)
# rhs for CID_i
net.add_parameter('PTC_tot_%s' % comp_id, is_optimizable=False,
name=r'{PTC_{%i}}^{tot}' % comp_ii)
net.add_assignment_rule('PTC_tot_%s' % comp_id,
total_in_cell(net, comp_id, 'PTC'))
rule_str = 'T_0/H_CID * (cid_%(comp)s - CID_%(comp)s) - T_0 * C_CID * CID_%(comp)s * phi(PTC_tot_%(comp)s, K_PTCCID, nu_PTCCID)'
rule_dict = {'comp': comp_id}
net.add_rate_rule('CID_%s' % comp_id,
rule_str % rule_dict)
# rhs for CN_i
rule_str = 'T_0 * C_CID * CID_%(comp)s * phi(PTC_tot_%(comp)s, K_PTCCID, nu_PTCCID) - T_0 * CN_%(comp)s/H_CN'
rule_dict = {'comp': comp_id}
net.add_rate_rule('CN_%s' % comp_id,
rule_str % rule_dict)
# rhs for hh_i
rule_str = 'T_0/H_hh * (phi(EN_%(comp)s * psi(CN_%(comp)s, K_CNhh, nu_CNhh), K_ENhh, nu_ENhh) - hh_%(comp)s)'
rule_dict = {'comp': comp_id}
net.add_rate_rule('hh_%s' % comp_id,
rule_str % rule_dict)
# rhs for HH_i_j
for side_jj in range(6):
terms = ['T_0/H_HH * (hh_%(comp)s/6 - HH_%(comp)s_side_%(side)s)'
'-T_0 * K_PTC_HH * maxPTC * %(sub1)s * HH_%(comp)s_side_%(side)s',
'T_0 * LMxfer_HH * (HH_%(comp)s_side_%(prev)i + HH_%(comp)s_side_%(next)i - 2*HH_%(comp)s_side_%(side)i)']
rule_str = ' + '.join(terms)
rule_dict = {'sub1': opposite_side(net, comp_id, side_jj, 'PTC'),
'comp': comp_id,
'side': side_jj,
'prev': (side_jj - 1) % 6,
'next': (side_jj + 1) % 6}
net.add_rate_rule('HH_%s_side_%i' % (comp_id, side_jj),
rule_str % rule_dict)
# rhs for PH_i_j
for side_jj in range(6):
rule_str= 'T_0 * K_PTC_HH * maxHH * %(sub1)s * PTC_%(comp)s_side_%(side)i - T_0*PH_%(comp)s_side_%(side)i / H_PH'
rule_dict = {'sub1': opposite_side(net, comp_id, side_jj, 'HH'),
'comp': comp_id,
'side': side_jj}
net.add_rate_rule('PH_%s_side_%i' % (comp_id, side_jj),
rule_str % rule_dict)
| 1,101 | 0 | 69 |
2d350f237cb6a15f4e2d76839103995ddf1d0c43 | 797 | py | Python | 25_read_write_text_files.py | nagasudhirpulla/python_wrldc_training | c3a3216c0a11e1dac03d4637b4b59b28f1bb83c6 | [
"MIT"
] | null | null | null | 25_read_write_text_files.py | nagasudhirpulla/python_wrldc_training | c3a3216c0a11e1dac03d4637b4b59b28f1bb83c6 | [
"MIT"
] | null | null | null | 25_read_write_text_files.py | nagasudhirpulla/python_wrldc_training | c3a3216c0a11e1dac03d4637b4b59b28f1bb83c6 | [
"MIT"
] | 2 | 2020-09-30T16:32:18.000Z | 2020-10-23T01:13:51.000Z | '''
read or write text files in python
'''
# %%
# read a text file
# open the file for reading
with open("dumps/test.txt", mode='r') as f:
# read all the file content
fStr = f.read()
# please note that once again calling f.read() will return empty string
print(fStr)
# this will print the whole file contents
# %%
# load all lines into a list
with open("dumps/test.txt", mode='r') as f:
# load all the lines of the file into an array
textLines = f.readlines()
print(textLines)
# %%
# writing text to a file
# with mode = 'w', old text will be deleted
# with mode = 'a', the new text will be appended to the old text
with open("dumps/test.txt", mode='w') as f:
f.write("The first line\n")
f.write("This is the second line\nThis the third line")
# %%
| 24.151515 | 74 | 0.647428 | '''
read or write text files in python
'''
# %%
# read a text file
# open the file for reading
with open("dumps/test.txt", mode='r') as f:
# read all the file content
fStr = f.read()
# please note that once again calling f.read() will return empty string
print(fStr)
# this will print the whole file contents
# %%
# load all lines into a list
with open("dumps/test.txt", mode='r') as f:
# load all the lines of the file into an array
textLines = f.readlines()
print(textLines)
# %%
# writing text to a file
# with mode = 'w', old text will be deleted
# with mode = 'a', the new text will be appended to the old text
with open("dumps/test.txt", mode='w') as f:
f.write("The first line\n")
f.write("This is the second line\nThis the third line")
# %%
| 0 | 0 | 0 |
a4ea7e6fbf2052ec5420e3e4dba35c0df3d27671 | 1,401 | py | Python | Kooleposhti/accounts/backends.py | ParizanTeam/Kooleposhti-Backend | e7d819b3e93836f1a893cc51541056cbf681d1c6 | [
"MIT"
] | null | null | null | Kooleposhti/accounts/backends.py | ParizanTeam/Kooleposhti-Backend | e7d819b3e93836f1a893cc51541056cbf681d1c6 | [
"MIT"
] | null | null | null | Kooleposhti/accounts/backends.py | ParizanTeam/Kooleposhti-Backend | e7d819b3e93836f1a893cc51541056cbf681d1c6 | [
"MIT"
] | null | null | null | from django.contrib.auth.backends import BaseBackend, UserModel, ModelBackend
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Permission
from django.db.models import Exists, OuterRef, Q
import rest_framework_simplejwt.serializers
from pprint import pprint
class MyModelBackend(ModelBackend):
"""
Authenticates against settings.AUTH_USER_MODEL.
"""
| 40.028571 | 88 | 0.668094 | from django.contrib.auth.backends import BaseBackend, UserModel, ModelBackend
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Permission
from django.db.models import Exists, OuterRef, Q
import rest_framework_simplejwt.serializers
from pprint import pprint
class MyModelBackend(ModelBackend):
"""
Authenticates against settings.AUTH_USER_MODEL.
"""
def authenticate(self, request, username=None, password=None, email=None, **kwargs):
username = None if username == '' else username
email = None if email == '' else email
if username is None:
username = kwargs.get(UserModel.USERNAME_FIELD)
if (email is None and username is None) or password is None:
return
try:
if not username is None:
user = UserModel._default_manager.get_by_natural_key(username)
elif not email is None:
user = UserModel._default_manager.get(
email=request.data['email'])
except UserModel.DoesNotExist:
# Run the default password hasher once to reduce the timing
# difference between an existing and a nonexistent user (#20760).
UserModel().set_password(password)
else:
if user.check_password(password) and self.user_can_authenticate(user):
return user
| 974 | 0 | 27 |
217ccd524a9351d8524155ddb70230b3fa79c06b | 1,963 | py | Python | server/algos/euler/tests/integration/test_setup.py | yizhang7210/Acre | c98cf8a4fdfb223a1958e8e61df759f889a1b13f | [
"MIT"
] | 2 | 2017-11-27T21:55:21.000Z | 2017-12-30T03:34:40.000Z | server/algos/euler/tests/integration/test_setup.py | yizhang7210/Acre | c98cf8a4fdfb223a1958e8e61df759f889a1b13f | [
"MIT"
] | 30 | 2017-09-06T12:00:08.000Z | 2018-06-20T22:47:46.000Z | server/algos/euler/tests/integration/test_setup.py | yizhang7210/Acre | c98cf8a4fdfb223a1958e8e61df759f889a1b13f | [
"MIT"
] | 1 | 2021-04-05T13:59:37.000Z | 2021-04-05T13:59:37.000Z | # pylint: disable=missing-docstring
import datetime
from core.models import instruments
from datasource.models import candles
| 32.180328 | 78 | 0.522669 | # pylint: disable=missing-docstring
import datetime
from core.models import instruments
from datasource.models import candles
class TestSetup():
@classmethod
def set_up_instruments(cls):
cls.eur_usd = instruments.Instrument(name='EUR_USD', multiplier=10000)
cls.eur_usd.save()
@classmethod
def set_up_candles(cls):
# EUR_USD 1
bid = {'o': 1.29845, 'h': 1.30001, 'l': 1.29222, 'c': 1.29288}
ask = {'o': 1.29863, 'h': 1.30027, 'l': 1.29235, 'c': 1.29343}
day_one = candles.create_one(
bid=bid, ask=ask,
instrument=cls.eur_usd,
start_time=datetime.datetime(2017, 9, 3, 17),
granularity='D',
volume=8
)
# EUR_USD 2
bid = {'o': 1.29288, 'h': 1.29945, 'l': 1.29045, 'c': 1.29455}
ask = {'o': 1.29343, 'h': 1.29967, 'l': 1.29063, 'c': 1.29563}
day_two = candles.create_one(
bid=bid, ask=ask,
instrument=cls.eur_usd,
start_time=datetime.datetime(2017, 9, 4, 17),
granularity='D',
volume=5
)
# EUR_USD 3
bid = {'o': 1.29455, 'h': 1.29878, 'l': 1.29045, 'c': 1.29521}
ask = {'o': 1.29563, 'h': 1.29902, 'l': 1.29056, 'c': 1.29533}
day_three = candles.create_one(
bid=bid, ask=ask,
instrument=cls.eur_usd,
start_time=datetime.datetime(2017, 9, 5, 17),
granularity='D',
volume=5
)
# EUR_USD 4
bid = {'o': 1.29521, 'h': 1.29678, 'l': 1.29345, 'c': 1.29444}
ask = {'o': 1.29533, 'h': 1.29702, 'l': 1.29356, 'c': 1.29462}
day_four = candles.create_one(
bid=bid, ask=ask,
instrument=cls.eur_usd,
start_time=datetime.datetime(2017, 9, 6, 17),
granularity='D',
volume=5
)
candles.insert_many([day_one, day_two, day_three, day_four])
| 1,728 | 84 | 23 |
4496f05342c70d3584c3394094ffc8795187282b | 12,643 | py | Python | arknights_mower/solvers/recruit.py | YuiTH/arknights-mower | 01e65569ee13a470dde09911d2237331a8ed5b3d | [
"MIT"
] | null | null | null | arknights_mower/solvers/recruit.py | YuiTH/arknights-mower | 01e65569ee13a470dde09911d2237331a8ed5b3d | [
"MIT"
] | null | null | null | arknights_mower/solvers/recruit.py | YuiTH/arknights-mower | 01e65569ee13a470dde09911d2237331a8ed5b3d | [
"MIT"
] | null | null | null | from __future__ import annotations
from ..ocr import ocrhandle, ocr_rectify
from ..utils import segment
from ..utils.device import Device
from ..utils.log import logger
from ..utils.recognize import Recognizer, Scene, RecognizeError
from ..utils.solver import BaseSolver
from ..data import recruit_tag, recruit_agent
class RecruitPoss(object):
""" 记录公招标签组合的可能性数据 """
class RecruitSolver(BaseSolver):
"""
自动进行公招
"""
def run(self, priority: list[str] = None) -> None:
"""
:param priority: list[str], 优先考虑的公招干员,默认为高稀有度优先
"""
self.priority = priority
self.recruiting = 0
logger.info('Start: 公招')
logger.info(f'目标干员:{priority if priority else "无,高稀有度优先"}')
super().run()
def recruit_tags(self) -> bool:
""" 识别公招标签的逻辑 """
needs = self.find('career_needs', judge=False)
avail_level = self.find('available_level', judge=False)
budget = self.find('recruit_budget', judge=False)
up = needs[0][1] - 80
down = needs[1][1] + 60
left = needs[1][0]
right = avail_level[0][0]
while True:
# ocr the recruitment tags and rectify
img = self.recog.img[up:down, left:right]
ocr = ocrhandle.predict(img)
for x in ocr:
if x[1] not in recruit_tag:
x[1] = ocr_rectify(img, x, recruit_tag, '公招标签')
# recruitment tags
tags = [x[1] for x in ocr]
logger.info(f'公招标签:{tags}')
# choose tags
choose, best = self.tags_choose(tags, self.priority)
if best.choose < (1 << 5) and best.min <= 3:
# refresh
if self.tap_element('recruit_refresh', detected=True):
self.tap_element('double_confirm', 0.8,
interval=3, judge=False)
continue
break
logger.info(f'选择:{choose}')
# tap selected tags
for x in ocr:
color = self.recog.img[up+x[2][0][1]-5, left+x[2][0][0]-5]
if (color[2] < 100) != (x[1] not in choose):
self.device.tap((left+x[2][0][0]-5, up+x[2][0][1]-5))
if best.choose < (1 << 5):
# 09:00
self.tap_element('one_hour', 0.2, 0.8, 0)
else:
# 03:50
[self.tap_element('one_hour', 0.2, 0.2, 0) for _ in range(2)]
[self.tap_element('one_hour', 0.5, 0.2, 0) for _ in range(5)]
# start recruit
self.tap((avail_level[1][0], budget[0][1]), interval=5)
def recruit_result(self) -> bool:
""" 识别公招招募到的干员 """
agent = None
ocr = ocrhandle.predict(self.recog.img)
for x in ocr:
if x[1][-3:] == '的信物':
agent = x[1][:-3]
agent_ocr = x
break
if agent is None:
logger.warning('未能识别到干员名称')
else:
if agent not in recruit_agent.keys():
agent_with_suf = [x+'的信物' for x in recruit_agent.keys()]
agent = ocr_rectify(
self.recog.img, agent_ocr, agent_with_suf, '干员名称')[:-3]
if agent in recruit_agent.keys():
if 2 <= recruit_agent[agent]['stars'] <= 4:
logger.info(f'获得干员:{agent}')
else:
logger.critical(f'获得干员:{agent}')
self.tap((self.recog.w // 2, self.recog.h // 2))
def tags_choose(self, tags: list[str], priority: list[str]) -> tuple[list[str], RecruitPoss]:
""" 公招标签选择核心逻辑 """
if priority is None:
priority = []
if len(priority) and isinstance(priority[0], str):
priority = [[x] for x in priority]
possibility: dict[int, RecruitPoss] = {}
agent_level_dict = {}
# 挨个干员判断可能性
for x in recruit_agent.values():
agent_name = x['name']
agent_level = x['stars']
agent_tags = x['tags']
agent_level_dict[agent_name] = agent_level
# 高级资深干员需要有特定的 tag
if agent_level == 6 and '高级资深干员' not in tags:
continue
# 统计 9 小时公招的可能性
valid_9 = None
if 3 <= agent_level <= 6:
valid_9 = 0
if agent_level == 6 and '高级资深干员' in tags:
valid_9 |= (1 << tags.index('高级资深干员'))
if agent_level == 5 and '资深干员' in tags:
valid_9 |= (1 << tags.index('资深干员'))
for tag in agent_tags:
if tag in tags:
valid_9 |= (1 << tags.index(tag))
# 统计 3 小时公招的可能性
valid_3 = None
if 1 <= agent_level <= 4:
valid_3 = 0
for tag in agent_tags:
if tag in tags:
valid_3 |= (1 << tags.index(tag))
# 枚举所有可能的标签组合子集
for o in range(1 << 5):
if valid_9 is not None and o & valid_9 == o:
if o not in possibility.keys():
possibility[o] = RecruitPoss(o)
possibility[o].ls.append(agent_name)
possibility[o].max = max(possibility[o].max, agent_level)
possibility[o].min = min(possibility[o].min, agent_level)
possibility[o].lv2a3 |= 2 <= agent_level <= 3
_o = o + (1 << 5)
if valid_3 is not None and o & valid_3 == o:
if _o not in possibility.keys():
possibility[_o] = RecruitPoss(_o)
possibility[_o].ls.append(agent_name)
possibility[_o].max = max(possibility[_o].max, agent_level)
possibility[_o].min = min(possibility[_o].min, agent_level)
possibility[_o].lv2a3 |= 2 <= agent_level <= 3
# 检查是否存在无法从公开招募中获得的干员
for considering in priority:
for x in considering:
if agent_level_dict.get(x) is None:
logger.error(f'该干员并不能在公开招募中获得:{x}')
raise RuntimeError
best = RecruitPoss(0)
# 按照优先级判断,必定选中同一星级干员
# 附加限制:min_level == agent_level
if best.poss == 0:
logger.debug('choose: priority, min_level == agent_level')
for considering in priority:
for o in possibility.keys():
possibility[o].poss = 0
for x in considering:
if x in possibility[o].ls:
agent_level = agent_level_dict[x]
if agent_level != 1 and agent_level == possibility[o].min:
possibility[o].poss += 1 / len(possibility[o].ls)
elif agent_level == 1 and agent_level == possibility[o].min == possibility[o].max:
# 必定选中一星干员的特殊逻辑
possibility[o].poss += 1 / len(possibility[o].ls)
if best < possibility[o]:
best = possibility[o]
if best.poss > 0:
break
# 按照优先级判断,若目标干员 1 星且该组合不存在 2/3 星的可能,则选择
# 附加限制:min_level == agent_level == 1 and not lv2a3
if best.poss == 0:
logger.debug('choose: priority, min_level == agent_level == 1 and not lv2a3')
for considering in priority:
for o in possibility.keys():
possibility[o].poss = 0
for x in considering:
if x in possibility[o].ls:
agent_level = agent_level_dict[x]
if agent_level == possibility[o].min == 1 and not possibility[o].lv2a3:
# 特殊判断:选中一星和四星干员的 Tag 组合
possibility[o].poss += 1 / len(possibility[o].ls)
if best < possibility[o]:
best = possibility[o]
if best.poss > 0:
break
# 按照优先级判断,必定选中星级 >= 4 的干员
# 附加限制:min_level >= 4
if best.poss == 0:
logger.debug('choose: priority, min_level >= 4')
for considering in priority:
for o in possibility.keys():
possibility[o].poss = 0
if possibility[o].min >= 4:
for x in considering:
if x in possibility[o].ls:
possibility[o].poss += 1 / len(possibility[o].ls)
if best < possibility[o]:
best = possibility[o]
if best.poss > 0:
break
# 按照等级下限判断,必定选中星级 >= 4 的干员
# 附加限制:min_level >= 4
if best.poss == 0:
logger.debug('choose: min_level >= 4')
for o in possibility.keys():
possibility[o].poss = 0
if possibility[o].min >= 4:
possibility[o].poss = possibility[o].min
if best < possibility[o]:
best = possibility[o]
# 按照优先级判断,检查其概率
if best.poss == 0:
logger.debug('choose: priority')
for considering in priority:
for o in possibility.keys():
possibility[o].poss = 0
for x in considering:
if x in possibility[o].ls:
possibility[o].poss += 1 / len(possibility[o].ls)
if best < possibility[o]:
best = possibility[o]
if best.poss > 0:
break
# 按照等级下限判断,默认高稀有度优先
if best.poss == 0:
logger.debug('choose: min_level')
for o in possibility.keys():
possibility[o].poss = possibility[o].min
if best < possibility[o]:
best = possibility[o]
logger.debug(f'poss: {possibility}')
logger.debug(f'best: {best}')
# 返回选择的标签列表
choose = []
for i in range(len(tags)):
if best.choose & (1 << i):
choose.append(tags[i])
return choose, best
| 38.663609 | 110 | 0.489678 | from __future__ import annotations
from ..ocr import ocrhandle, ocr_rectify
from ..utils import segment
from ..utils.device import Device
from ..utils.log import logger
from ..utils.recognize import Recognizer, Scene, RecognizeError
from ..utils.solver import BaseSolver
from ..data import recruit_tag, recruit_agent
class RecruitPoss(object):
""" 记录公招标签组合的可能性数据 """
def __init__(self, choose: int, max: int = 0, min: int = 7) -> None:
self.choose = choose # 标签选择(按位),第 6 个标志位表示是否选满招募时限,0 为选满,1 为选 03:50
self.max = max # 等级上限
self.min = min # 等级下限
self.poss = 0 # 可能性
self.lv2a3 = False # 是否包含等级为 2 和 3 的干员
self.ls = [] # 可能的干员列表
def __lt__(self, another: RecruitPoss) -> bool:
return (self.poss) < (another.poss)
def __str__(self) -> str:
return "%s,%s,%s,%s,%s" % (self.choose, self.max, self.min, self.poss, self.ls)
def __repr__(self) -> str:
return "%s,%s,%s,%s,%s" % (self.choose, self.max, self.min, self.poss, self.ls)
class RecruitSolver(BaseSolver):
"""
自动进行公招
"""
def __init__(self, device: Device = None, recog: Recognizer = None) -> None:
super().__init__(device, recog)
def run(self, priority: list[str] = None) -> None:
"""
:param priority: list[str], 优先考虑的公招干员,默认为高稀有度优先
"""
self.priority = priority
self.recruiting = 0
logger.info('Start: 公招')
logger.info(f'目标干员:{priority if priority else "无,高稀有度优先"}')
super().run()
def transition(self) -> bool:
if self.scene() == Scene.INDEX:
self.tap_element('index_recruit')
elif self.scene() == Scene.RECRUIT_MAIN:
segments = segment.recruit(self.recog.img)
tapped = False
for idx, seg in enumerate(segments):
if self.recruiting & (1 << idx) != 0:
continue
if self.tap_element('recruit_finish', scope=seg, detected=True):
tapped = True
break
required = self.find('job_requirements', scope=seg)
if required is None:
self.tap(seg)
tapped = True
self.recruiting |= (1 << idx)
break
if not tapped:
return True
elif self.scene() == Scene.RECRUIT_TAGS:
return self.recruit_tags()
elif self.scene() == Scene.SKIP:
self.tap_element('skip')
elif self.scene() == Scene.RECRUIT_AGENT:
return self.recruit_result()
elif self.scene() == Scene.MATERIEL:
self.tap_element('materiel_ico')
elif self.scene() == Scene.LOADING:
self.sleep(3)
elif self.scene() == Scene.CONNECTING:
self.sleep(3)
elif self.get_navigation():
self.tap_element('nav_recruit')
elif self.scene() != Scene.UNKNOWN:
self.back_to_index()
else:
raise RecognizeError('Unknown scene')
def recruit_tags(self) -> bool:
""" 识别公招标签的逻辑 """
needs = self.find('career_needs', judge=False)
avail_level = self.find('available_level', judge=False)
budget = self.find('recruit_budget', judge=False)
up = needs[0][1] - 80
down = needs[1][1] + 60
left = needs[1][0]
right = avail_level[0][0]
while True:
# ocr the recruitment tags and rectify
img = self.recog.img[up:down, left:right]
ocr = ocrhandle.predict(img)
for x in ocr:
if x[1] not in recruit_tag:
x[1] = ocr_rectify(img, x, recruit_tag, '公招标签')
# recruitment tags
tags = [x[1] for x in ocr]
logger.info(f'公招标签:{tags}')
# choose tags
choose, best = self.tags_choose(tags, self.priority)
if best.choose < (1 << 5) and best.min <= 3:
# refresh
if self.tap_element('recruit_refresh', detected=True):
self.tap_element('double_confirm', 0.8,
interval=3, judge=False)
continue
break
logger.info(f'选择:{choose}')
# tap selected tags
for x in ocr:
color = self.recog.img[up+x[2][0][1]-5, left+x[2][0][0]-5]
if (color[2] < 100) != (x[1] not in choose):
self.device.tap((left+x[2][0][0]-5, up+x[2][0][1]-5))
if best.choose < (1 << 5):
# 09:00
self.tap_element('one_hour', 0.2, 0.8, 0)
else:
# 03:50
[self.tap_element('one_hour', 0.2, 0.2, 0) for _ in range(2)]
[self.tap_element('one_hour', 0.5, 0.2, 0) for _ in range(5)]
# start recruit
self.tap((avail_level[1][0], budget[0][1]), interval=5)
def recruit_result(self) -> bool:
""" 识别公招招募到的干员 """
agent = None
ocr = ocrhandle.predict(self.recog.img)
for x in ocr:
if x[1][-3:] == '的信物':
agent = x[1][:-3]
agent_ocr = x
break
if agent is None:
logger.warning('未能识别到干员名称')
else:
if agent not in recruit_agent.keys():
agent_with_suf = [x+'的信物' for x in recruit_agent.keys()]
agent = ocr_rectify(
self.recog.img, agent_ocr, agent_with_suf, '干员名称')[:-3]
if agent in recruit_agent.keys():
if 2 <= recruit_agent[agent]['stars'] <= 4:
logger.info(f'获得干员:{agent}')
else:
logger.critical(f'获得干员:{agent}')
self.tap((self.recog.w // 2, self.recog.h // 2))
def tags_choose(self, tags: list[str], priority: list[str]) -> tuple[list[str], RecruitPoss]:
""" 公招标签选择核心逻辑 """
if priority is None:
priority = []
if len(priority) and isinstance(priority[0], str):
priority = [[x] for x in priority]
possibility: dict[int, RecruitPoss] = {}
agent_level_dict = {}
# 挨个干员判断可能性
for x in recruit_agent.values():
agent_name = x['name']
agent_level = x['stars']
agent_tags = x['tags']
agent_level_dict[agent_name] = agent_level
# 高级资深干员需要有特定的 tag
if agent_level == 6 and '高级资深干员' not in tags:
continue
# 统计 9 小时公招的可能性
valid_9 = None
if 3 <= agent_level <= 6:
valid_9 = 0
if agent_level == 6 and '高级资深干员' in tags:
valid_9 |= (1 << tags.index('高级资深干员'))
if agent_level == 5 and '资深干员' in tags:
valid_9 |= (1 << tags.index('资深干员'))
for tag in agent_tags:
if tag in tags:
valid_9 |= (1 << tags.index(tag))
# 统计 3 小时公招的可能性
valid_3 = None
if 1 <= agent_level <= 4:
valid_3 = 0
for tag in agent_tags:
if tag in tags:
valid_3 |= (1 << tags.index(tag))
# 枚举所有可能的标签组合子集
for o in range(1 << 5):
if valid_9 is not None and o & valid_9 == o:
if o not in possibility.keys():
possibility[o] = RecruitPoss(o)
possibility[o].ls.append(agent_name)
possibility[o].max = max(possibility[o].max, agent_level)
possibility[o].min = min(possibility[o].min, agent_level)
possibility[o].lv2a3 |= 2 <= agent_level <= 3
_o = o + (1 << 5)
if valid_3 is not None and o & valid_3 == o:
if _o not in possibility.keys():
possibility[_o] = RecruitPoss(_o)
possibility[_o].ls.append(agent_name)
possibility[_o].max = max(possibility[_o].max, agent_level)
possibility[_o].min = min(possibility[_o].min, agent_level)
possibility[_o].lv2a3 |= 2 <= agent_level <= 3
# 检查是否存在无法从公开招募中获得的干员
for considering in priority:
for x in considering:
if agent_level_dict.get(x) is None:
logger.error(f'该干员并不能在公开招募中获得:{x}')
raise RuntimeError
best = RecruitPoss(0)
# 按照优先级判断,必定选中同一星级干员
# 附加限制:min_level == agent_level
if best.poss == 0:
logger.debug('choose: priority, min_level == agent_level')
for considering in priority:
for o in possibility.keys():
possibility[o].poss = 0
for x in considering:
if x in possibility[o].ls:
agent_level = agent_level_dict[x]
if agent_level != 1 and agent_level == possibility[o].min:
possibility[o].poss += 1 / len(possibility[o].ls)
elif agent_level == 1 and agent_level == possibility[o].min == possibility[o].max:
# 必定选中一星干员的特殊逻辑
possibility[o].poss += 1 / len(possibility[o].ls)
if best < possibility[o]:
best = possibility[o]
if best.poss > 0:
break
# 按照优先级判断,若目标干员 1 星且该组合不存在 2/3 星的可能,则选择
# 附加限制:min_level == agent_level == 1 and not lv2a3
if best.poss == 0:
logger.debug('choose: priority, min_level == agent_level == 1 and not lv2a3')
for considering in priority:
for o in possibility.keys():
possibility[o].poss = 0
for x in considering:
if x in possibility[o].ls:
agent_level = agent_level_dict[x]
if agent_level == possibility[o].min == 1 and not possibility[o].lv2a3:
# 特殊判断:选中一星和四星干员的 Tag 组合
possibility[o].poss += 1 / len(possibility[o].ls)
if best < possibility[o]:
best = possibility[o]
if best.poss > 0:
break
# 按照优先级判断,必定选中星级 >= 4 的干员
# 附加限制:min_level >= 4
if best.poss == 0:
logger.debug('choose: priority, min_level >= 4')
for considering in priority:
for o in possibility.keys():
possibility[o].poss = 0
if possibility[o].min >= 4:
for x in considering:
if x in possibility[o].ls:
possibility[o].poss += 1 / len(possibility[o].ls)
if best < possibility[o]:
best = possibility[o]
if best.poss > 0:
break
# 按照等级下限判断,必定选中星级 >= 4 的干员
# 附加限制:min_level >= 4
if best.poss == 0:
logger.debug('choose: min_level >= 4')
for o in possibility.keys():
possibility[o].poss = 0
if possibility[o].min >= 4:
possibility[o].poss = possibility[o].min
if best < possibility[o]:
best = possibility[o]
# 按照优先级判断,检查其概率
if best.poss == 0:
logger.debug('choose: priority')
for considering in priority:
for o in possibility.keys():
possibility[o].poss = 0
for x in considering:
if x in possibility[o].ls:
possibility[o].poss += 1 / len(possibility[o].ls)
if best < possibility[o]:
best = possibility[o]
if best.poss > 0:
break
# 按照等级下限判断,默认高稀有度优先
if best.poss == 0:
logger.debug('choose: min_level')
for o in possibility.keys():
possibility[o].poss = possibility[o].min
if best < possibility[o]:
best = possibility[o]
logger.debug(f'poss: {possibility}')
logger.debug(f'best: {best}')
# 返回选择的标签列表
choose = []
for i in range(len(tags)):
if best.choose & (1 << i):
choose.append(tags[i])
return choose, best
| 2,278 | 0 | 162 |
375bf56f2d70c880920c1c4d9ca81b57c85e9cb2 | 7,869 | py | Python | 03-cohort-analysis/01-plot-demographics.py | rionbr/DDIIndy | 63737cd05e92f496e7fa51fecc1c7542edba1f2e | [
"MIT"
] | null | null | null | 03-cohort-analysis/01-plot-demographics.py | rionbr/DDIIndy | 63737cd05e92f496e7fa51fecc1c7542edba1f2e | [
"MIT"
] | null | null | null | 03-cohort-analysis/01-plot-demographics.py | rionbr/DDIIndy | 63737cd05e92f496e7fa51fecc1c7542edba1f2e | [
"MIT"
] | null | null | null | # coding=utf-8
# Author: Rion B Correia
# Date: April 17, 2020
#
# Description: Demographics
#
#
import configparser
import numpy as np
import pandas as pd
import sqlalchemy
from sqlalchemy import event
from utils import add_own_encoders, ensurePathExists, map_age_to_age_group
import matplotlib as mpl
mpl.rcParams['mathtext.fontset'] = 'cm'
mpl.rcParams['mathtext.rm'] = 'serif'
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.colors import to_hex
from matplotlib.colors import LinearSegmentedColormap
if __name__ == '__main__':
# DB
cfg = configparser.ConfigParser()
cfg.read('../config.ini')
url = 'mysql+pymysql://%(user)s:%(pass)s@%(host)s/%(db)s?charset=utf8' % cfg['IU-RDC-MySQL']
engine = sqlalchemy.create_engine(url, encoding='utf-8')
event.listen(engine, "before_cursor_execute", add_own_encoders)
#
# Retrieve Data
#
#
# Gender (only from those with a medication)
#
sqlg = """
SELECT
p.gender,
COUNT(*) AS 'count'
FROM patient p
WHERE
p.gender IS NOT NULL
AND
p.id_patient IN (SELECT m.id_patient FROM medication m)
GROUP BY p.gender
"""
dfg = pd.read_sql(sqlg, con=engine, index_col='gender')
# Percent
dfg['%'] = dfg['count'] / dfg['count'].sum()
# Color
dfg.loc['Male', 'color'] = '#1f77b4'
dfg.loc['Female', 'color'] = '#d62728'
# Age (today; only from those with a medication)
sqla = """
SELECT
p.age_today AS 'age',
COUNT(*) AS 'count'
FROM patient p
WHERE
p.id_patient IN (SELECT m.id_patient FROM medication m)
GROUP BY p.age_today
"""
dfa = pd.read_sql(sqla, con=engine, index_col='age')
# Map age to age_group
dfa['age_group'] = map_age_to_age_group(dfa.index)
# Group by age_group
dfa = dfa.groupby('age_group').agg({'count': 'sum'})
# Percent
dfa['%'] = dfa['count'] / dfa['count'].sum()
# Color
#cmap = LinearSegmentedColormap.from_list(name='custom', colors=['#ff7f0e', '#d62728', '#9467bd'], N=256, gamma=1.0)
cmap = cm.get_cmap('jet_r')
dfa['color'] = [to_hex(cmap(c)) for c in np.linspace(0, 1, len(dfa))]
#
# Ethnicity
#
sqle = """
SELECT
p.ethnicity,
COUNT(*) AS 'count'
FROM patient p
WHERE
/* p.ethnicity IS NOT NULL AND */
p.id_patient IN (SELECT m.id_patient FROM medication m)
GROUP BY p.ethnicity
"""
dfe = pd.read_sql(sqle, con=engine)
# Rename
dfe['ethnicity'] = dfe.replace({'Hispanic/Latino': 'Hispanic/Latino', 'Not Hispanic, Latino/a, or Spanish origin': 'Not Hisp/Latin/Span', 'Not Hispanic/Latino': 'Not Hisp/Latin'})
dfe['ethnicity'] = dfe['ethnicity'].fillna('n/a')
# To Categorical
dfe['ethnicity'] = pd.Categorical(dfe['ethnicity'], categories=['Not Hisp/Latin', 'Not Hisp/Latin/Span', 'Hisp/Latin', 'n/a'], ordered=True)
# Sort
dfe = dfe.sort_values('ethnicity', ascending=True)
# Set Index
dfe.set_index('ethnicity', inplace=True)
# %
dfe['%'] = dfe['count'] / dfe['count'].sum()
# Color
dfe['color'] = ['#ffbb78', '#c49c94', '#98df8a', '#c7c7c7']
# Race
sqlr = """
SELECT
p.race,
count(*) AS 'count'
FROM patient p
WHERE
p.id_patient IN (SELECT m.id_patient FROM medication m)
GROUP BY p.race
"""
dfr = pd.read_sql(sqlr, con=engine)
# Rename / Group
race_minorities = 'Bi-racial, Hispanic, Islander, or Indian'
dfr['race'] = dfr['race'].replace({'Islander': 'Minorities', 'Bi-racial': 'Minorities', 'Hispanic': 'Minorities', 'Indian': 'Minorities'})
dfr['race'] = dfr['race'].fillna('n/a')
dfr = dfr.groupby('race').agg('sum').reset_index()
# To Categorical
dfr['race'] = pd.Categorical(dfr['race'], categories=['White', 'Black', 'Asian', 'Minorities', 'Indian', 'Islander', 'Bi-racial', 'Hispanic', 'n/a'], ordered=True)
# Sort
dfr = dfr.sort_values('race', ascending=True)
# Set Index[]
dfr.set_index('race', inplace=True)
# %
dfr['%'] = dfr['count'] / dfr['count'].sum()
# Color
dfr['color'] = ['#2ca02c', '#8c564b', '#e377c2', '#17becf', '#c7c7c7']
#
# Plot
#
fig, ax = plt.subplots(figsize=(7, 2.5), nrows=1, ncols=1)
ax.set_title('Patient Demographics')
width = 0.80
edgecolor = '#7f7f7f'
# Gender
cum_percent = 0
for gender, row in dfg.iterrows():
percent = row['%']
facecolor = row['color']
b = ax.barh(2, percent, width, facecolor=facecolor, left=cum_percent, edgecolor=edgecolor, alpha=0.5)
#
patch = b.get_children()[0]
bx, by = patch.get_xy()
tx, ty = 0.5 * patch.get_width() + bx, 0.45 * patch.get_height() + by
#
ax.text(tx, ty, gender, ha='center', va='center', rotation=0)
#
cum_percent += percent
# Age
cum_percent = 0
for age_group, row in dfa.iterrows():
percent = row['%']
facecolor = row['color']
b = ax.barh(1, percent, width, facecolor=facecolor, left=cum_percent, edgecolor=edgecolor, alpha=0.5)
#
patch = b.get_children()[0]
bx, by = patch.get_xy()
tx, ty = 0.59 * patch.get_width() + bx, 0.5 * patch.get_height() + by
#
if age_group not in ['80-84', '85-89', '90-94', '95-99', '>99']:
ax.text(tx, ty, age_group, ha='center', va='center', rotation=90)
#
cum_percent += percent
# Race
cum_percent = 0
for race, row in dfr.iterrows():
percent = row['%']
facecolor = row['color']
b = ax.barh(0, percent, width, facecolor=facecolor, left=cum_percent, edgecolor=edgecolor, alpha=0.5)
#
patch = b.get_children()[0]
bx, by = patch.get_xy()
tx, ty = 0.5 * patch.get_width() + bx, 0.45 * patch.get_height() + by
#
if race in ['White', 'Black']:
ax.text(tx, ty, race, ha='center', va='center', rotation=0)
elif race == 'Minorities':
mx, my = 0.58, -1.1
ax.annotate(race_minorities, xy=(tx, 0.25 * patch.get_height() + by), xycoords='data', xytext=(mx, my),
arrowprops=dict(facecolor='black', arrowstyle="<|-,head_length=0.3,head_width=0.15",
connectionstyle="angle3,angleA=0,angleB=90"),
horizontalalignment='left', verticalalignment='center'
)
else:
ax.text(tx, ty, race, ha='center', va='center', rotation=90)
#
cum_percent += percent
# Ethnicity
"""
cum_percent = 0
for ethnicity, row in dfe.iterrows():
percent = row['%']
color = row['color']
b = ax.barh(0, percent, width, color=color, left=cum_percent, edgecolor=edgecolor)
#
patch = b.get_children()[0]
bx, by = patch.get_xy()
tx, ty = 0.5 * patch.get_width() + bx, 0.45 * patch.get_height() + by
#
if ethnicity in ['Hisp/Latin']:
ax.text(tx, ty, ethnicity, ha='center', va='center', rotation=90)
else:
pass
#
cum_percent += percent
"""
#
xticks = np.linspace(0, 1, 11, endpoint=True)
xticklabels = ['%.1f' % x for x in xticks]
ax.set_xticks(xticks)
ax.set_xticklabels(xticklabels)
yticks = np.array([0, 1, 2]) # + (width / 2)
ax.set_yticks(yticks)
ax.set_yticklabels(['Race', 'Age', 'Gender'])
ax.set_xlim(0, 1)
ax.set_ylim(-0.5, 2.5)
# Save
plt.subplots_adjust(left=0.10, right=0.97, bottom=0.20, top=0.88, wspace=0, hspace=0)
wIMGfile = 'images/img-demographics.pdf'
ensurePathExists(wIMGfile)
fig.savefig(wIMGfile)
plt.close() | 33.063025 | 183 | 0.574406 | # coding=utf-8
# Author: Rion B Correia
# Date: April 17, 2020
#
# Description: Demographics
#
#
import configparser
import numpy as np
import pandas as pd
import sqlalchemy
from sqlalchemy import event
from utils import add_own_encoders, ensurePathExists, map_age_to_age_group
import matplotlib as mpl
mpl.rcParams['mathtext.fontset'] = 'cm'
mpl.rcParams['mathtext.rm'] = 'serif'
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.colors import to_hex
from matplotlib.colors import LinearSegmentedColormap
if __name__ == '__main__':
# DB
cfg = configparser.ConfigParser()
cfg.read('../config.ini')
url = 'mysql+pymysql://%(user)s:%(pass)s@%(host)s/%(db)s?charset=utf8' % cfg['IU-RDC-MySQL']
engine = sqlalchemy.create_engine(url, encoding='utf-8')
event.listen(engine, "before_cursor_execute", add_own_encoders)
#
# Retrieve Data
#
#
# Gender (only from those with a medication)
#
sqlg = """
SELECT
p.gender,
COUNT(*) AS 'count'
FROM patient p
WHERE
p.gender IS NOT NULL
AND
p.id_patient IN (SELECT m.id_patient FROM medication m)
GROUP BY p.gender
"""
dfg = pd.read_sql(sqlg, con=engine, index_col='gender')
# Percent
dfg['%'] = dfg['count'] / dfg['count'].sum()
# Color
dfg.loc['Male', 'color'] = '#1f77b4'
dfg.loc['Female', 'color'] = '#d62728'
# Age (today; only from those with a medication)
sqla = """
SELECT
p.age_today AS 'age',
COUNT(*) AS 'count'
FROM patient p
WHERE
p.id_patient IN (SELECT m.id_patient FROM medication m)
GROUP BY p.age_today
"""
dfa = pd.read_sql(sqla, con=engine, index_col='age')
# Map age to age_group
dfa['age_group'] = map_age_to_age_group(dfa.index)
# Group by age_group
dfa = dfa.groupby('age_group').agg({'count': 'sum'})
# Percent
dfa['%'] = dfa['count'] / dfa['count'].sum()
# Color
#cmap = LinearSegmentedColormap.from_list(name='custom', colors=['#ff7f0e', '#d62728', '#9467bd'], N=256, gamma=1.0)
cmap = cm.get_cmap('jet_r')
dfa['color'] = [to_hex(cmap(c)) for c in np.linspace(0, 1, len(dfa))]
#
# Ethnicity
#
sqle = """
SELECT
p.ethnicity,
COUNT(*) AS 'count'
FROM patient p
WHERE
/* p.ethnicity IS NOT NULL AND */
p.id_patient IN (SELECT m.id_patient FROM medication m)
GROUP BY p.ethnicity
"""
dfe = pd.read_sql(sqle, con=engine)
# Rename
dfe['ethnicity'] = dfe.replace({'Hispanic/Latino': 'Hispanic/Latino', 'Not Hispanic, Latino/a, or Spanish origin': 'Not Hisp/Latin/Span', 'Not Hispanic/Latino': 'Not Hisp/Latin'})
dfe['ethnicity'] = dfe['ethnicity'].fillna('n/a')
# To Categorical
dfe['ethnicity'] = pd.Categorical(dfe['ethnicity'], categories=['Not Hisp/Latin', 'Not Hisp/Latin/Span', 'Hisp/Latin', 'n/a'], ordered=True)
# Sort
dfe = dfe.sort_values('ethnicity', ascending=True)
# Set Index
dfe.set_index('ethnicity', inplace=True)
# %
dfe['%'] = dfe['count'] / dfe['count'].sum()
# Color
dfe['color'] = ['#ffbb78', '#c49c94', '#98df8a', '#c7c7c7']
# Race
sqlr = """
SELECT
p.race,
count(*) AS 'count'
FROM patient p
WHERE
p.id_patient IN (SELECT m.id_patient FROM medication m)
GROUP BY p.race
"""
dfr = pd.read_sql(sqlr, con=engine)
# Rename / Group
race_minorities = 'Bi-racial, Hispanic, Islander, or Indian'
dfr['race'] = dfr['race'].replace({'Islander': 'Minorities', 'Bi-racial': 'Minorities', 'Hispanic': 'Minorities', 'Indian': 'Minorities'})
dfr['race'] = dfr['race'].fillna('n/a')
dfr = dfr.groupby('race').agg('sum').reset_index()
# To Categorical
dfr['race'] = pd.Categorical(dfr['race'], categories=['White', 'Black', 'Asian', 'Minorities', 'Indian', 'Islander', 'Bi-racial', 'Hispanic', 'n/a'], ordered=True)
# Sort
dfr = dfr.sort_values('race', ascending=True)
# Set Index[]
dfr.set_index('race', inplace=True)
# %
dfr['%'] = dfr['count'] / dfr['count'].sum()
# Color
dfr['color'] = ['#2ca02c', '#8c564b', '#e377c2', '#17becf', '#c7c7c7']
#
# Plot
#
fig, ax = plt.subplots(figsize=(7, 2.5), nrows=1, ncols=1)
ax.set_title('Patient Demographics')
width = 0.80
edgecolor = '#7f7f7f'
# Gender
cum_percent = 0
for gender, row in dfg.iterrows():
percent = row['%']
facecolor = row['color']
b = ax.barh(2, percent, width, facecolor=facecolor, left=cum_percent, edgecolor=edgecolor, alpha=0.5)
#
patch = b.get_children()[0]
bx, by = patch.get_xy()
tx, ty = 0.5 * patch.get_width() + bx, 0.45 * patch.get_height() + by
#
ax.text(tx, ty, gender, ha='center', va='center', rotation=0)
#
cum_percent += percent
# Age
cum_percent = 0
for age_group, row in dfa.iterrows():
percent = row['%']
facecolor = row['color']
b = ax.barh(1, percent, width, facecolor=facecolor, left=cum_percent, edgecolor=edgecolor, alpha=0.5)
#
patch = b.get_children()[0]
bx, by = patch.get_xy()
tx, ty = 0.59 * patch.get_width() + bx, 0.5 * patch.get_height() + by
#
if age_group not in ['80-84', '85-89', '90-94', '95-99', '>99']:
ax.text(tx, ty, age_group, ha='center', va='center', rotation=90)
#
cum_percent += percent
# Race
cum_percent = 0
for race, row in dfr.iterrows():
percent = row['%']
facecolor = row['color']
b = ax.barh(0, percent, width, facecolor=facecolor, left=cum_percent, edgecolor=edgecolor, alpha=0.5)
#
patch = b.get_children()[0]
bx, by = patch.get_xy()
tx, ty = 0.5 * patch.get_width() + bx, 0.45 * patch.get_height() + by
#
if race in ['White', 'Black']:
ax.text(tx, ty, race, ha='center', va='center', rotation=0)
elif race == 'Minorities':
mx, my = 0.58, -1.1
ax.annotate(race_minorities, xy=(tx, 0.25 * patch.get_height() + by), xycoords='data', xytext=(mx, my),
arrowprops=dict(facecolor='black', arrowstyle="<|-,head_length=0.3,head_width=0.15",
connectionstyle="angle3,angleA=0,angleB=90"),
horizontalalignment='left', verticalalignment='center'
)
else:
ax.text(tx, ty, race, ha='center', va='center', rotation=90)
#
cum_percent += percent
# Ethnicity
"""
cum_percent = 0
for ethnicity, row in dfe.iterrows():
percent = row['%']
color = row['color']
b = ax.barh(0, percent, width, color=color, left=cum_percent, edgecolor=edgecolor)
#
patch = b.get_children()[0]
bx, by = patch.get_xy()
tx, ty = 0.5 * patch.get_width() + bx, 0.45 * patch.get_height() + by
#
if ethnicity in ['Hisp/Latin']:
ax.text(tx, ty, ethnicity, ha='center', va='center', rotation=90)
else:
pass
#
cum_percent += percent
"""
#
xticks = np.linspace(0, 1, 11, endpoint=True)
xticklabels = ['%.1f' % x for x in xticks]
ax.set_xticks(xticks)
ax.set_xticklabels(xticklabels)
yticks = np.array([0, 1, 2]) # + (width / 2)
ax.set_yticks(yticks)
ax.set_yticklabels(['Race', 'Age', 'Gender'])
ax.set_xlim(0, 1)
ax.set_ylim(-0.5, 2.5)
# Save
plt.subplots_adjust(left=0.10, right=0.97, bottom=0.20, top=0.88, wspace=0, hspace=0)
wIMGfile = 'images/img-demographics.pdf'
ensurePathExists(wIMGfile)
fig.savefig(wIMGfile)
plt.close() | 0 | 0 | 0 |
c843c087d88d5065cf5a63507f064fd8b3d81707 | 5,139 | py | Python | bot/clients/stack_client.py | EJCFox/factorio-discord-bot | 150cb7f49a12edee781b2e57ebb2b5bddffcd851 | [
"MIT"
] | 1 | 2021-02-04T22:54:56.000Z | 2021-02-04T22:54:56.000Z | bot/clients/stack_client.py | EJCFox/factorio-discord-bot | 150cb7f49a12edee781b2e57ebb2b5bddffcd851 | [
"MIT"
] | 30 | 2021-12-17T20:33:06.000Z | 2022-01-02T14:35:31.000Z | bot/clients/stack_client.py | EJCFox/factorio-discord-bot | 150cb7f49a12edee781b2e57ebb2b5bddffcd851 | [
"MIT"
] | null | null | null | import logging
import os
import aiobotocore
from ..helpers.env import getenv
BOT_IP = getenv('BOT_IP')
SSH_KEY_NAME = getenv('SSH_KEY_NAME')
_template = get_template()
_session = aiobotocore.get_session()
| 34.489933 | 85 | 0.505351 | import logging
import os
import aiobotocore
from ..helpers.env import getenv
BOT_IP = getenv('BOT_IP')
SSH_KEY_NAME = getenv('SSH_KEY_NAME')
def get_template():
with open(
os.path.join(os.path.dirname(
os.path.abspath(__file__)), '../template.yaml'),
encoding="utf8") as template_file:
return template_file.read()
_template = get_template()
_session = aiobotocore.get_session()
async def create_stack(name, version):
logging.info('Creating stack with name %s and version %s',
name, version)
async with _session.create_client('ec2') as ec2_client:
logging.info('Fetching VPC information')
# Assume that the account has a single default VPC
vpc_id = (await ec2_client.describe_vpcs(Filters=[{
'Name': 'is-default',
'Values': ['true']
}]))['Vpcs'][0]['VpcId']
subnets = await ec2_client.describe_subnets(Filters=[{
'Name': 'vpc-id',
'Values': [vpc_id]}])
subnet_a = subnets['Subnets'][0]['SubnetId']
subnet_b = subnets['Subnets'][1]['SubnetId']
async with _session.create_client('cloudformation') as cloudformation_client:
logging.info('Creating stack')
await cloudformation_client.create_stack(
StackName=name,
TemplateBody=_template,
Parameters=[
{
'ParameterKey': 'FactorioImageTag',
'ParameterValue': version,
},
{
'ParameterKey': 'YourIp',
'ParameterValue': BOT_IP,
},
{
'ParameterKey': 'KeyPairName',
'ParameterValue': SSH_KEY_NAME,
},
{
'ParameterKey': 'VpcId',
'ParameterValue': vpc_id,
},
{
'ParameterKey': 'SubnetA',
'ParameterValue': subnet_a,
},
{
'ParameterKey': 'SubnetB',
'ParameterValue': subnet_b,
}
],
Capabilities=['CAPABILITY_IAM']
)
await cloudformation_client.get_waiter('stack_create_complete').wait(
StackName=name,
WaiterConfig={
'Delay': 15
})
logging.info('Created stack %s with version %s', name, version)
async def update_stack(name, server_state_param):
logging.info('Updating stack %s to %s', name, server_state_param)
async with _session.create_client('cloudformation') as client:
await client.update_stack(
StackName=name,
UsePreviousTemplate=True,
Parameters=[
{
'ParameterKey': 'ServerState',
'ParameterValue': server_state_param,
},
{
'ParameterKey': 'FactorioImageTag',
'UsePreviousValue': True,
},
{
'ParameterKey': 'YourIp',
'UsePreviousValue': True,
},
{
'ParameterKey': 'KeyPairName',
'UsePreviousValue': True,
},
{
'ParameterKey': 'VpcId',
'UsePreviousValue': True,
},
{
'ParameterKey': 'SubnetA',
'UsePreviousValue': True,
},
{
'ParameterKey': 'SubnetB',
'UsePreviousValue': True,
}
],
Capabilities=['CAPABILITY_IAM']
)
await client.get_waiter('stack_update_complete').wait(
StackName=name,
WaiterConfig={
'Delay': 15
})
logging.info('Update for stack %s complete', name)
async def delete_stack(name):
logging.info('Deleting stack %s', name)
async with _session.create_client('cloudformation') as client:
await client.delete_stack(StackName=name)
await client.get_waiter('stack_delete_complete').wait(StackName=name)
logging.info('Deleted stack with name %s', name)
async def list_stacks():
logging.info('Fetching list of stacks')
async with _session.create_client('cloudformation') as client:
stack_response = await client.describe_stacks()
return list(
filter(
lambda stack: stack['StackStatus'] != 'DELETE_COMPLETE',
stack_response['Stacks']))
async def stack_details(name):
logging.info('Getting stack details for %s', name)
async with _session.create_client('cloudformation') as client:
stack_response = await client.describe_stacks(StackName=name)
return stack_response['Stacks'][0]
| 4,786 | 0 | 138 |
33a68569a46d9c6d65161ea6169a049677a6cc88 | 3,264 | py | Python | parallax/parallax/core/python/common/shard.py | snuspl/parallax | 83791254ccd5d7a55213687a8dff4c2e04372694 | [
"Apache-2.0"
] | 126 | 2018-07-26T07:06:56.000Z | 2022-01-25T08:11:25.000Z | parallal/parallax/core/python/common/shard.py | nikkkkhil/auto-parallal-deeplearning | 80e350fd933b8f82721bdc4950bc969ba0c630a4 | [
"Apache-2.0"
] | 20 | 2018-08-07T04:51:16.000Z | 2020-05-15T04:47:20.000Z | parallal/parallax/core/python/common/shard.py | nikkkkhil/auto-parallal-deeplearning | 80e350fd933b8f82721bdc4950bc969ba0c630a4 | [
"Apache-2.0"
] | 30 | 2018-08-01T13:25:43.000Z | 2022-02-28T01:28:34.000Z | # Copyright (C) 2018 Seoul National University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import tensorflow as tf
NUM_SHARDS = "num_shards"
SHARD_ID = "shard_id"
SHARD_FILTER_PRED = "shard_filter_predicate"
FILTER_DATASET_NUM_SHARDS_POS = 1
FILTER_DATASET_SHARD_ID_POS = 2
def create_num_shards_and_shard_id():
"""Returns and create the num shards and the shard id tensors.
Returns:
The num shards and the shard id tensors.
Raises:
ValueError: if the num shards tensor or the shard id tensor is already
defined.
"""
# TODO: allow num_shards and shard_id inside a library function
graph = tf.get_default_graph()
num_shards_tensors = graph.get_collection(NUM_SHARDS)
if len(num_shards_tensors) > 0:
raise ValueError('"num_shards" already exists.')
shard_id_tensors = graph.get_collection(SHARD_ID)
if len(shard_id_tensors) > 0:
raise ValueError('"shard_id" already exists.')
# Create in proper graph and base name_scope.
with graph.as_default() as g, g.name_scope(None):
# Initialize num_shards_tensor=1, and shard_id_tensor=0.
# parallax updates the value when the graph is transformed
# for distributed version.
num_shards_tensor = tf.constant(1, dtype=tf.int64, name="num_shards")
shard_id_tensor = tf.constant(0, dtype=tf.int64, name="shard_id")
tf.add_to_collection(NUM_SHARDS, num_shards_tensor)
tf.add_to_collection(SHARD_ID, shard_id_tensor)
return num_shards_tensor, shard_id_tensor
def shard(ds):
"""Convert a dataset to include shard, it has same effect
with ds.shard(num_shards, index).
"""
# TODO: allow dataset shard inside a function or dataset api
# (e.g., map, parallel_interleave)
num_shards, shard_id = _get_or_create_num_shards_and_shard_id()
f = ds._enumerate().filter(filter_fn)
assert f._predicate.captured_inputs[0] == num_shards
assert f._predicate.captured_inputs[1] == shard_id
tf.add_to_collection(SHARD_FILTER_PRED,
f._predicate.name)
return f.map(lambda _, elem: elem)
| 37.090909 | 80 | 0.706189 | # Copyright (C) 2018 Seoul National University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import tensorflow as tf
NUM_SHARDS = "num_shards"
SHARD_ID = "shard_id"
SHARD_FILTER_PRED = "shard_filter_predicate"
FILTER_DATASET_NUM_SHARDS_POS = 1
FILTER_DATASET_SHARD_ID_POS = 2
def create_num_shards_and_shard_id():
"""Returns and create the num shards and the shard id tensors.
Returns:
The num shards and the shard id tensors.
Raises:
ValueError: if the num shards tensor or the shard id tensor is already
defined.
"""
# TODO: allow num_shards and shard_id inside a library function
graph = tf.get_default_graph()
num_shards_tensors = graph.get_collection(NUM_SHARDS)
if len(num_shards_tensors) > 0:
raise ValueError('"num_shards" already exists.')
shard_id_tensors = graph.get_collection(SHARD_ID)
if len(shard_id_tensors) > 0:
raise ValueError('"shard_id" already exists.')
# Create in proper graph and base name_scope.
with graph.as_default() as g, g.name_scope(None):
# Initialize num_shards_tensor=1, and shard_id_tensor=0.
# parallax updates the value when the graph is transformed
# for distributed version.
num_shards_tensor = tf.constant(1, dtype=tf.int64, name="num_shards")
shard_id_tensor = tf.constant(0, dtype=tf.int64, name="shard_id")
tf.add_to_collection(NUM_SHARDS, num_shards_tensor)
tf.add_to_collection(SHARD_ID, shard_id_tensor)
return num_shards_tensor, shard_id_tensor
def _get_or_create_num_shards_and_shard_id():
graph = tf.get_default_graph()
num_shards_tensors = graph.get_collection(NUM_SHARDS)
if len(num_shards_tensors) > 0:
num_shards_tensor = num_shards_tensors[0]
shard_id_tensor = \
graph.get_collection(SHARD_ID)[0]
else:
num_shards_tensor, shard_id_tensor = create_num_shards_and_shard_id()
return num_shards_tensor, shard_id_tensor
def shard(ds):
"""Convert a dataset to include shard, it has same effect
with ds.shard(num_shards, index).
"""
# TODO: allow dataset shard inside a function or dataset api
# (e.g., map, parallel_interleave)
num_shards, shard_id = _get_or_create_num_shards_and_shard_id()
def filter_fn(elem_index, _):
mod_result = tf.mod(elem_index, num_shards)
return tf.equal(mod_result, shard_id)
f = ds._enumerate().filter(filter_fn)
assert f._predicate.captured_inputs[0] == num_shards
assert f._predicate.captured_inputs[1] == shard_id
tf.add_to_collection(SHARD_FILTER_PRED,
f._predicate.name)
return f.map(lambda _, elem: elem)
| 517 | 0 | 50 |
1a8e8be81848626887850c0eb27bf0b710e21586 | 2,185 | py | Python | video2images.py | tenghehan/reid_without_id | d1d0ff273b1ef19fc6da8cbbf210527779b37455 | [
"MIT"
] | null | null | null | video2images.py | tenghehan/reid_without_id | d1d0ff273b1ef19fc6da8cbbf210527779b37455 | [
"MIT"
] | null | null | null | video2images.py | tenghehan/reid_without_id | d1d0ff273b1ef19fc6da8cbbf210527779b37455 | [
"MIT"
] | null | null | null | """
@author: tenghehan
将 video 形式的视频数据转化成 image sequence 形式的数据.
"""
import cv2
import os
import argparse
from tqdm import tqdm
from utils.log import get_logger
if __name__ == "__main__":
args = parse_args()
with VideoFramer(args, video_path=args.VIDEO_PATH) as vdo_frm:
vdo_frm.run() | 29.931507 | 97 | 0.642563 | """
@author: tenghehan
将 video 形式的视频数据转化成 image sequence 形式的数据.
"""
import cv2
import os
import argparse
from tqdm import tqdm
from utils.log import get_logger
class VideoFramer(object):
def __init__(self, args, video_path):
self.args = args
self.video_path = video_path
self.logger = get_logger("root")
self.vdo = cv2.VideoCapture()
def __enter__(self):
assert os.path.isfile(self.video_path), "Path error"
self.vdo.open(self.video_path)
self.total_frames = int(cv2.VideoCapture.get(self.vdo, cv2.CAP_PROP_FRAME_COUNT))
self.im_width = int(self.vdo.get(cv2.CAP_PROP_FRAME_WIDTH))
self.im_height = int(self.vdo.get(cv2.CAP_PROP_FRAME_HEIGHT))
assert self.vdo.isOpened()
if self.args.save_path:
# path of saved image sequence
dirname, _ = os.path.splitext(os.path.basename(self.video_path))
self.args.save_path = os.path.join(self.args.save_path, dirname, "img1")
os.makedirs(self.args.save_path, exist_ok=True)
# logging
self.logger.info("Save results to {}".format(self.args.save_path))
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
if exc_type:
print(exc_type, exc_value, exc_traceback)
def run(self):
idx_frame = 0
pbar = tqdm(self.total_frames + 1)
while self.vdo.grab():
idx_frame += 1
if idx_frame % self.args.frame_interval:
continue
_, ori_im = self.vdo.retrieve()
image_save_path = os.path.join(self.args.save_path, f'{str(idx_frame).zfill(6)}.jpg')
cv2.imwrite(image_save_path, ori_im)
pbar.update()
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("VIDEO_PATH", type=str)
parser.add_argument("--save_path", type=str, default="./image_sequence/")
parser.add_argument("--frame_interval", type=int, default=1)
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
with VideoFramer(args, video_path=args.VIDEO_PATH) as vdo_frm:
vdo_frm.run() | 1,724 | 5 | 153 |
0c8bec83e7b2a1f70caa96c61d46a9eeb3799d06 | 2,731 | py | Python | examples/tag_by_location.py | Trinity-College/py-space-platform | 2ead02c1af8b696cf673eddaac5199c8267f0bf3 | [
"ECL-2.0",
"Apache-2.0"
] | 27 | 2015-04-21T09:01:56.000Z | 2021-08-21T22:39:12.000Z | examples/tag_by_location.py | Trinity-College/py-space-platform | 2ead02c1af8b696cf673eddaac5199c8267f0bf3 | [
"ECL-2.0",
"Apache-2.0"
] | 6 | 2015-04-16T05:57:18.000Z | 2017-05-03T18:34:16.000Z | examples/tag_by_location.py | davids-uta-edu/py-space-platform | 2ead02c1af8b696cf673eddaac5199c8267f0bf3 | [
"ECL-2.0",
"Apache-2.0"
] | 22 | 2015-03-26T08:41:04.000Z | 2020-09-24T19:50:43.000Z | """
DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER
Copyright (c) 2015 Juniper Networks, Inc.
All rights reserved.
Use is subject to license terms.
Licensed under the Apache License, Version 2.0 (the ?License?); you may not use
this file except in compliance with the License. You may obtain a copy of the
License at http://www.apache.org/licenses/LICENSE-2.0.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and limitations
under the License.
"""
from __future__ import unicode_literals
from __future__ import print_function
import logging.config
import configparser
from jnpr.space import rest, factory
def main(my_space):
"""
Gets all connected Junos devices from Space. Then gets the snmp/location
configured on each. If this configuration is present, it creates a public
tag with this value and assign the tag to the device.
"""
devices_list = my_space.device_management.devices.\
get(filter_={'deviceFamily': 'junos',
'connectionStatus': 'up'})
for d in devices_list:
print(d.name, d.ipAddr, d.platform)
c = d.configurations.expanded.post(xpaths=['configuration/snmp/location'])
try:
tag_device(my_space, d, c.configuration.location)
except AttributeError:
print("Device %s does not have location configured" % d.name)
if __name__ == "__main__":
# Initialize logging
logging.config.fileConfig('../test/logging.conf')
# Extract Space URL, userid, password from config file
config = configparser.RawConfigParser()
config.read("../test/test.conf")
url = config.get('space', 'url')
user = config.get('space', 'user')
passwd = config.get('space', 'passwd')
# Create a Space REST end point
space = rest.Space(url, user, passwd)
main(space)
| 36.413333 | 82 | 0.693885 | """
DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER
Copyright (c) 2015 Juniper Networks, Inc.
All rights reserved.
Use is subject to license terms.
Licensed under the Apache License, Version 2.0 (the ?License?); you may not use
this file except in compliance with the License. You may obtain a copy of the
License at http://www.apache.org/licenses/LICENSE-2.0.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and limitations
under the License.
"""
from __future__ import unicode_literals
from __future__ import print_function
import logging.config
import configparser
from jnpr.space import rest, factory
def main(my_space):
"""
Gets all connected Junos devices from Space. Then gets the snmp/location
configured on each. If this configuration is present, it creates a public
tag with this value and assign the tag to the device.
"""
devices_list = my_space.device_management.devices.\
get(filter_={'deviceFamily': 'junos',
'connectionStatus': 'up'})
for d in devices_list:
print(d.name, d.ipAddr, d.platform)
c = d.configurations.expanded.post(xpaths=['configuration/snmp/location'])
try:
tag_device(my_space, d, c.configuration.location)
except AttributeError:
print("Device %s does not have location configured" % d.name)
def tag_device(spc, device, tag_name):
try:
# Check if a tag exists already with the given name
tag = spc.tag_management.tags.get(filter_={'name': tag_name})[0]
except:
# Create a new public tag with the given name
tag = factory.make_resource('tag_management.tag', spc)
tag.name, tag.type = tag_name, 'public'
tag = spc.tag_management.tags.post(tag)
"""
Create a new target for this tag, pointing to the given device.
In other words, assign this tag to this device
"""
target = factory.make_resource('tag_management.target', spc)
target.href = device.href
tag.targets.post(target)
if __name__ == "__main__":
# Initialize logging
logging.config.fileConfig('../test/logging.conf')
# Extract Space URL, userid, password from config file
config = configparser.RawConfigParser()
config.read("../test/test.conf")
url = config.get('space', 'url')
user = config.get('space', 'user')
passwd = config.get('space', 'passwd')
# Create a Space REST end point
space = rest.Space(url, user, passwd)
main(space)
| 644 | 0 | 23 |
ed45d468f0c2248b8d88b97319d7a1360e54055b | 7,743 | py | Python | fluidfoam/processing1d.py | remichassagne/fluidfoam | fa89f4cdd2f159619ed7d128257612e183926c93 | [
"CECILL-B"
] | 64 | 2018-03-08T21:52:48.000Z | 2022-03-31T08:11:17.000Z | fluidfoam/processing1d.py | CyrilleBonamy/fluidfoam | 2befa35722aa7aa13ac7d48884461614d608262b | [
"CECILL-B"
] | 14 | 2019-12-12T10:56:10.000Z | 2022-03-31T15:32:47.000Z | fluidfoam/processing1d.py | CyrilleBonamy/fluidfoam | 2befa35722aa7aa13ac7d48884461614d608262b | [
"CECILL-B"
] | 21 | 2019-09-25T21:34:43.000Z | 2022-03-31T14:49:12.000Z | """Write, Read and Plot 1D input files for swak4foam
==========================================================================
This module allows to read OpenFoam output of one dimensional computation
and then write, plot and read input files for Boundary and Initial
Conditions imposition in 3D computation (via swak4foam):
.. autofunction:: create1dprofil
.. autofunction:: read1dprofil
.. autofunction:: plot1dprofil
"""
#
# ---------------- Module General Import and Declarations ---------------
#
import numpy as np
from fluidfoam.readof import typefield, readmesh, readfield
def create1dprofil_spe(pathw, waxis, var, varname, typevar):
"""
This function provides way to write specific 1D profil (var array)
in OpenFoam Format in the 1d_profil folder of pathw
(for BC imposition in 2D or 3D case for example).
Args:
pathw: str\n
waxis: numpy array\n
var: numpy array\n
varname: str\n
typevar: str\n
Returns:
status: 'create specific 1D profil: done' if ok
A way you might use me is:\n
status = fluidfoam.create1dprofil_spe("pathw", z, epsilon,
"epsilon", "scalar")
Please note that the 1d_profil directory must be existing in the pathw
directory
"""
size1d = waxis.shape[0]
filename = ""
field = var
filename = "" + varname
if typevar == "scalar":
filename1 = pathw + "/1d_profil/" + filename + ".xy"
f = open(filename1, "w")
f.write("(\n")
if field.shape == (1,):
for cell in range(size1d):
f.write("(" + str(waxis[cell]) + " " + str(field[0]) + ")\n")
else:
for cell in range(size1d):
f.write("(" + str(waxis[cell]) + " " + str(field[cell]) + ")\n")
f.write(")\n")
f.close()
elif typevar == "vector":
for i in range(3):
filename1 = pathw + "/1d_profil/" + filename + str(i) + ".xy"
f = open(filename1, "w")
f.write("(\n")
if field.shape == (3, 1):
for cell in range(size1d):
f.write(
"(" + str(waxis[cell]) + " " + str(field[i, 0]) + ")\n"
)
else:
for cell in range(size1d):
f.write(
"(" + str(waxis[cell]) + " " + str(field[i, cell]) + ")\n"
)
f.write(")\n")
f.close()
else:
print("PROBLEM with input: Good input is for example :")
print(
'fluidfoam.create1dprofil_spe("/data/1dcompute/", Y, epsilon, "epsilon", "scalar")\n'
)
status = "create 1D profiles: done"
return status
def create1dprofil(pathr, pathw, timename, axis, varlist):
"""
This function provides way to read 1D profiles at time timename of pathr
and write them in OpenFoam Format in the 1d_profil folder of pathw
(for BC imposition in 2D or 3D case for example).
Args:
pathr: str\n
pathw: str\n
timename: str\n
axis: str\n
varlist: list of str\n
Returns:
status: 'create 1D profiles: done' if ok
A way you might use me is:\n
status = fluidfoam.create1dprofil("path_of_case", "pathw", time, 'Y',
['Ua', 'Ub'])
Please note that the 1d_profil directory must be existing in the pathw
directory
"""
X, Y, Z = readmesh(pathr)
size1d = Y.shape[0]
filename = ""
for var in varlist:
field = readfield(pathr, timename, var)
typevar = typefield(pathr, timename, var)
if axis == "X":
waxis = X
elif axis == "Y":
waxis = Y
elif axis == "Z":
waxis = Z
else:
print("axis does not exist, please check input parameters\n")
filename = "" + var
if typevar == "scalar":
filename1 = pathw + "/1d_profil/" + filename + ".xy"
f = open(filename1, "w")
f.write("(\n")
if field.shape == (1,):
for cell in range(size1d):
f.write("(" + str(waxis[cell]) + " " + str(field[0]) + ")\n")
else:
for cell in range(size1d):
f.write(
"(" + str(waxis[cell]) + " " + str(field[cell]) + ")\n"
)
# np.savetxt(f, np.c_[Y, field], fmt="(%s %s)")
f.write(")\n")
f.close()
elif typevar == "vector":
for i in range(3):
filename1 = pathw + "/1d_profil/" + filename + str(i) + ".xy"
f = open(filename1, "w")
f.write("(\n")
if field.shape == (3, 1):
for cell in range(size1d):
f.write(
"("
+ str(waxis[cell])
+ " "
+ str(field[i, 0])
+ ")\n"
)
else:
for cell in range(size1d):
f.write(
"("
+ str(waxis[cell])
+ " "
+ str(field[i, cell])
+ ")\n"
)
f.write(")\n")
f.close()
print("Warning for pyof users : Ua=Ua0, Va=Ua2, Wa=Ua1\n")
else:
print("PROBLEM with varlist input: Good input is for example :")
print(
'fluidfoam.create1dprofil("/data/1dcompute/", "/data/1dcompute/", "750", "Y",[\'omega\',\'p\'])\n'
)
status = "create 1D profiles: done"
return status
def read1dprofil(file_name):
"""This function provides way to read and return 1D profil created by the
create1dprofil function. file_name can be a complete path.
Args:
filename: str
Returns:
z: 1d mesh corresponding to 1d profil\n
field: scalar value of the field specified via filename\n
size1d: size of the 1d profil
A way you might use me is:\n
z, a, size1d = fluidfoam.read1dprofil("path_of_case/1d_profil/a.xy")
"""
with open(file_name) as handle:
size1d = len(handle.readlines()) - 2
z = np.empty(size1d)
field = np.empty(size1d)
handle.seek(0)
for line_num, line in enumerate(handle):
if (line_num != 0) & (line_num != size1d + 1):
line = line.replace(")", "")
line = line.replace("(", "")
cols = line.split()
z[(line_num - 1)] = cols[0]
field[(line_num - 1)] = cols[1]
return z, field, size1d
def plot1dprofil(pathr, varlist):
"""This function provides way to plot 1D profiles created by the
create1dprofil function.
Args:
pathr: str (must be the full path of the 1d_profil directory)\n
varlist: list of str
A way you might use me is:\n
fluidfoam.plot1dprofil("path_of_case/1d_profil", ['Ua', 'Ub', 'alpha'])
"""
import matplotlib.pyplot as plt
z, field, size1d = read1dprofil(pathr + "/" + varlist[0] + ".xy")
fields = np.empty([len(varlist), size1d])
fields[0] = field
for i in range(len(varlist) - 1):
z, field, size1d = read1dprofil(pathr + "/" + varlist[i + 1] + ".xy")
fields[i + 1] = field
dummy, axarr = plt.subplots(1, len(varlist), sharey=True)
for i, dummy in enumerate(varlist):
axarr[i].plot(fields[i], z)
axarr[i].set_title(varlist[i])
plt.show()
return
| 32.39749 | 114 | 0.496965 | """Write, Read and Plot 1D input files for swak4foam
==========================================================================
This module allows to read OpenFoam output of one dimensional computation
and then write, plot and read input files for Boundary and Initial
Conditions imposition in 3D computation (via swak4foam):
.. autofunction:: create1dprofil
.. autofunction:: read1dprofil
.. autofunction:: plot1dprofil
"""
#
# ---------------- Module General Import and Declarations ---------------
#
import numpy as np
from fluidfoam.readof import typefield, readmesh, readfield
def create1dprofil_spe(pathw, waxis, var, varname, typevar):
"""
This function provides way to write specific 1D profil (var array)
in OpenFoam Format in the 1d_profil folder of pathw
(for BC imposition in 2D or 3D case for example).
Args:
pathw: str\n
waxis: numpy array\n
var: numpy array\n
varname: str\n
typevar: str\n
Returns:
status: 'create specific 1D profil: done' if ok
A way you might use me is:\n
status = fluidfoam.create1dprofil_spe("pathw", z, epsilon,
"epsilon", "scalar")
Please note that the 1d_profil directory must be existing in the pathw
directory
"""
size1d = waxis.shape[0]
filename = ""
field = var
filename = "" + varname
if typevar == "scalar":
filename1 = pathw + "/1d_profil/" + filename + ".xy"
f = open(filename1, "w")
f.write("(\n")
if field.shape == (1,):
for cell in range(size1d):
f.write("(" + str(waxis[cell]) + " " + str(field[0]) + ")\n")
else:
for cell in range(size1d):
f.write("(" + str(waxis[cell]) + " " + str(field[cell]) + ")\n")
f.write(")\n")
f.close()
elif typevar == "vector":
for i in range(3):
filename1 = pathw + "/1d_profil/" + filename + str(i) + ".xy"
f = open(filename1, "w")
f.write("(\n")
if field.shape == (3, 1):
for cell in range(size1d):
f.write(
"(" + str(waxis[cell]) + " " + str(field[i, 0]) + ")\n"
)
else:
for cell in range(size1d):
f.write(
"(" + str(waxis[cell]) + " " + str(field[i, cell]) + ")\n"
)
f.write(")\n")
f.close()
else:
print("PROBLEM with input: Good input is for example :")
print(
'fluidfoam.create1dprofil_spe("/data/1dcompute/", Y, epsilon, "epsilon", "scalar")\n'
)
status = "create 1D profiles: done"
return status
def create1dprofil(pathr, pathw, timename, axis, varlist):
"""
This function provides way to read 1D profiles at time timename of pathr
and write them in OpenFoam Format in the 1d_profil folder of pathw
(for BC imposition in 2D or 3D case for example).
Args:
pathr: str\n
pathw: str\n
timename: str\n
axis: str\n
varlist: list of str\n
Returns:
status: 'create 1D profiles: done' if ok
A way you might use me is:\n
status = fluidfoam.create1dprofil("path_of_case", "pathw", time, 'Y',
['Ua', 'Ub'])
Please note that the 1d_profil directory must be existing in the pathw
directory
"""
X, Y, Z = readmesh(pathr)
size1d = Y.shape[0]
filename = ""
for var in varlist:
field = readfield(pathr, timename, var)
typevar = typefield(pathr, timename, var)
if axis == "X":
waxis = X
elif axis == "Y":
waxis = Y
elif axis == "Z":
waxis = Z
else:
print("axis does not exist, please check input parameters\n")
filename = "" + var
if typevar == "scalar":
filename1 = pathw + "/1d_profil/" + filename + ".xy"
f = open(filename1, "w")
f.write("(\n")
if field.shape == (1,):
for cell in range(size1d):
f.write("(" + str(waxis[cell]) + " " + str(field[0]) + ")\n")
else:
for cell in range(size1d):
f.write(
"(" + str(waxis[cell]) + " " + str(field[cell]) + ")\n"
)
# np.savetxt(f, np.c_[Y, field], fmt="(%s %s)")
f.write(")\n")
f.close()
elif typevar == "vector":
for i in range(3):
filename1 = pathw + "/1d_profil/" + filename + str(i) + ".xy"
f = open(filename1, "w")
f.write("(\n")
if field.shape == (3, 1):
for cell in range(size1d):
f.write(
"("
+ str(waxis[cell])
+ " "
+ str(field[i, 0])
+ ")\n"
)
else:
for cell in range(size1d):
f.write(
"("
+ str(waxis[cell])
+ " "
+ str(field[i, cell])
+ ")\n"
)
f.write(")\n")
f.close()
print("Warning for pyof users : Ua=Ua0, Va=Ua2, Wa=Ua1\n")
else:
print("PROBLEM with varlist input: Good input is for example :")
print(
'fluidfoam.create1dprofil("/data/1dcompute/", "/data/1dcompute/", "750", "Y",[\'omega\',\'p\'])\n'
)
status = "create 1D profiles: done"
return status
def read1dprofil(file_name):
"""This function provides way to read and return 1D profil created by the
create1dprofil function. file_name can be a complete path.
Args:
filename: str
Returns:
z: 1d mesh corresponding to 1d profil\n
field: scalar value of the field specified via filename\n
size1d: size of the 1d profil
A way you might use me is:\n
z, a, size1d = fluidfoam.read1dprofil("path_of_case/1d_profil/a.xy")
"""
with open(file_name) as handle:
size1d = len(handle.readlines()) - 2
z = np.empty(size1d)
field = np.empty(size1d)
handle.seek(0)
for line_num, line in enumerate(handle):
if (line_num != 0) & (line_num != size1d + 1):
line = line.replace(")", "")
line = line.replace("(", "")
cols = line.split()
z[(line_num - 1)] = cols[0]
field[(line_num - 1)] = cols[1]
return z, field, size1d
def plot1dprofil(pathr, varlist):
"""This function provides way to plot 1D profiles created by the
create1dprofil function.
Args:
pathr: str (must be the full path of the 1d_profil directory)\n
varlist: list of str
A way you might use me is:\n
fluidfoam.plot1dprofil("path_of_case/1d_profil", ['Ua', 'Ub', 'alpha'])
"""
import matplotlib.pyplot as plt
z, field, size1d = read1dprofil(pathr + "/" + varlist[0] + ".xy")
fields = np.empty([len(varlist), size1d])
fields[0] = field
for i in range(len(varlist) - 1):
z, field, size1d = read1dprofil(pathr + "/" + varlist[i + 1] + ".xy")
fields[i + 1] = field
dummy, axarr = plt.subplots(1, len(varlist), sharey=True)
for i, dummy in enumerate(varlist):
axarr[i].plot(fields[i], z)
axarr[i].set_title(varlist[i])
plt.show()
return
| 0 | 0 | 0 |
963b388baf2e86c3adb9df71da6250a85efe136a | 2,765 | py | Python | energy_usage/main.py | optiz0r/energy-usage | b7b57fb05e46257af96db287ed9eff63cb4c5677 | [
"MIT"
] | 6 | 2020-10-12T22:18:55.000Z | 2021-06-13T09:36:49.000Z | energy_usage/main.py | optiz0r/energy-usage | b7b57fb05e46257af96db287ed9eff63cb4c5677 | [
"MIT"
] | 1 | 2021-06-13T09:56:10.000Z | 2021-06-13T20:16:04.000Z | energy_usage/main.py | optiz0r/energy-usage | b7b57fb05e46257af96db287ed9eff63cb4c5677 | [
"MIT"
] | 1 | 2021-05-09T14:20:59.000Z | 2021-05-09T14:20:59.000Z | import argparse
import logging
import os
import queue
import sys
import confuse
from energy_usage.influx_client import InfluxClient
from energy_usage.mqtt_client import MqttClient
from energy_usage.sep import parse_sep, usage_to_datapoints
if __name__ == '__main__':
main()
| 25.136364 | 103 | 0.625316 | import argparse
import logging
import os
import queue
import sys
import confuse
from energy_usage.influx_client import InfluxClient
from energy_usage.mqtt_client import MqttClient
from energy_usage.sep import parse_sep, usage_to_datapoints
def parse_args(arg_str=None):
if arg_str is None:
arg_str = sys.argv[1:]
parser = argparse.ArgumentParser('energy-usage')
parser.add_argument(
'-d', '--debug', dest='debug', action='store_true',
help='Enable debug output')
parser.add_argument(
'-n', '--noop', dest='noop', action='store_true',
help="Don't make any modifications, just show what would be done")
args = parser.parse_args(arg_str)
return parser.prog, args
def load_config(args):
config = confuse.Configuration('energy-usage', 'energy_usage')
config.set_args(args)
validate_config(config)
return config
def validate_config(config):
required = [
(config['mqtt']['username'], str),
(config['mqtt']['password'], str),
(config['mqtt']['topic'], str)
]
for item in required:
key, datatype = item
key.get(datatype)
def setup_logging(prog, config):
logger = logging.getLogger()
if config['debug'].get(bool):
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
handler = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter('{0}[{1}]: %(levelname)s %(message)s'.format(prog, str(os.getpid())))
handler.setFormatter(formatter)
logger.addHandler(handler)
return logger
def main():
prog, args = parse_args()
config = load_config(args)
logger = setup_logging(prog, config)
msg_q = queue.Queue()
mqtt_client = MqttClient(
server=config['mqtt']['server'].get(str),
port=config['mqtt']['port'].get(int),
username=config['mqtt']['username'].get(str),
password=config['mqtt']['password'].get(str),
topic=config['mqtt']['topic'].get(str),
ca_certs=config['ca_certs'].get(str),
msg_q=msg_q)
mqtt_client.connect()
mqtt_client.run()
influx_client = InfluxClient(
server=config['influx']['server'].get(str),
port=config['influx']['port'].get(int),
database='default',
)
while True:
try:
msg = msg_q.get()
logger.debug(msg.topic + " " + str(msg.payload))
usage = parse_sep(msg.topic, msg.payload)
if usage:
usage_datapoints = usage_to_datapoints(usage)
if not config["noop"].get(bool):
influx_client.write_points(usage_datapoints)
except KeyboardInterrupt:
break
if __name__ == '__main__':
main()
| 2,363 | 0 | 115 |
3f9b9d33f3bac053061d7fd7e694eedeae93da99 | 21,296 | py | Python | didatictests/didatictests.py | lmkawakami/didatictests | fd6862cc6d59313d45a784b96048ed3bc42389f5 | [
"MIT"
] | 1 | 2022-01-10T11:58:12.000Z | 2022-01-10T11:58:12.000Z | didatictests/didatictests.py | lmkawakami/didatictests | fd6862cc6d59313d45a784b96048ed3bc42389f5 | [
"MIT"
] | 1 | 2022-01-10T03:51:01.000Z | 2022-01-10T03:51:13.000Z | didatictests/didatictests.py | lmkawakami/didatictests | fd6862cc6d59313d45a784b96048ed3bc42389f5 | [
"MIT"
] | 2 | 2022-01-18T21:02:25.000Z | 2022-02-07T19:13:35.000Z | import builtins
import sys
class Didatic_test:
"""
A class to configure and run simple didatic tests
Didatic_test(Callable=None, args={}, test_name=None, keyboard_inputs=(), \
expected_output=None, expected_prints="", verbose=None, \
run_output_test=None, run_prints_test=None,
)
Parameters
----------
fn: Callable
The function that will be tested
args: dict
The arguments that fn will be tested with. Use parse() to generate args,\
ex.: args = parse('a',5,7, x=1, s='aaa')
test_name: str
An optional identifier that will be printed with the test result
keyboard_inputs: Tuple[str, ...]
A tuple containig all the simulated keyboards inputs that will be used in \
every fn's input()
expected_output: Any
What the fn's return value should be
expected_prints: str
What the fn's internal print()'s concatenation should be \
(including new line character)
verbose: bool
Controls if all the fn's internal print()'s and input()'s prompts are printed
run_output_test: bool
Controls if the fn's return value is checked
run_prints_test: bool
Controls if the fn's internal print()'s are checked
"""
# Função input que inputs copia pro 'inputs' tbm (não retorna pra print original)
@staticmethod
# Função print que copia prints pro 'prints' tbm (não retorna pra print original)
@staticmethod
@staticmethod
# Redefine fn interceptando tudo: args, inputs, prints, output
@staticmethod
@staticmethod
@staticmethod
def generate_test(
fn=None,
args={},
test_name="Test",
verbose=None,
run_output_test=None,
run_prints_test=None,
generator_verbose=None,
):
"""
generate_test(fn, args, test_name="Test", verbose=False, \
run_output_test=True, run_prints_test=False, generator_verbose=False,)
Run the function once using ther given 'args' and intercepts all\
the inpus, prints and outpus
Generate and return the string to create the test with the given configs.\
and the intercepted infos.
ex.: generate_test(fn, Didatic_test.parse_args(1,2,3), "Test-5", True, True)
Parameters
----------
fn: The function that will be tested
args: dict in the format {"pos_inputs": args, "key_inputs": kwargs}
test_name: test name to identify the results and hint the type of test
verbose: controls if the fn's internal inputs and prints will be printed
run_output_test: controls if the output of the test run will be checked \
against the expected output value
run_prints_test: controls if the prints of the test run will be checked \
against the expected prints
generator_verbose: controls if the fn's internal inputs and prints \
will be printed in the fist run (the interception run)
Returns
-------
constructor_str: Return the string with the test constuctor containing\
all the configurations and args predefined, and with the intecepted\
inputs, prints and outputs as the expected values
"""
if fn is None:
fn = Didatic_test.default_fn
if verbose is None:
verbose = Didatic_test.default_verbose or False
if run_output_test is None:
run_output_test = Didatic_test.default_run_output_test or True
if run_prints_test is None:
run_prints_test = Didatic_test.default_run_prints_test or False
if generator_verbose is None:
Didatic_test.default_generator_verbose = generator_verbose or False
interceptions = {}
intercepted_fn = Didatic_test.intercepted_fn(
fn, interceptions, generator_verbose, "[I]: ", "[O]: "
)
pos_args = args.get("pos_inputs", ())
key_args = args.get("key_inputs", {})
output = intercepted_fn(*pos_args, **key_args)
fn_name = fn.__name__
args_str = Didatic_test.__stringify_args(args)
output_str = "'" + output + "'" if type(output) == str else str(output)
prints_str = "".join(interceptions["prints"])
constructor_str = f"Didatic_test({fn_name}, Didatic_test.parse_args\
{args_str}, '{test_name}', {interceptions['inputs']}, {output_str}, \
'{prints_str}', {verbose}, {run_output_test}, {run_prints_test})"
return constructor_str
@staticmethod
def auto_redefine(fn, args={}, verbose=False):
"""
auto_redefine(fn, verbose=False)
Run fn normally once and save all the inputs, then return a\
redefined fn that reuses the same user inputs (simulated)
The args and kwarks continue to work normally
ex.: open_menu = auto_redefine(open_menu)
Parameters
----------
fn: The function that will be called with intercepted inputs
verbose: flag that controls if the inputs primpts will be printed
Returns
-------
auto_redefined: Return a new function that will always use the same\
keyboard inputs as typed on the first run
"""
interceptions = {}
intercepted_fn = Didatic_test.intercepted_fn(
fn, interceptions, verbose, "[I]: ", "[O]: "
)
pos_args = args.get("pos_inputs", ())
key_args = args.get("key_inputs", {})
intercepted_fn(*pos_args, **key_args)
inputs_list = interceptions["inputs"]
auto_redefined = Didatic_test.redefine(fn, inputs_list, verbose)
return auto_redefined
@staticmethod
def redefine(fn, keyboard_inputs, verbose=False):
"""
redefine(fn, keyboard_inputs, verbose=False)
Return a new function that will use the 'keyboard_inputs' tuple\
as simulated inputs, but will work as fn otherwise
ex.: call_menu = redefine(call_menu,('lorem ipsum','25','y','n'))
Parameters
----------
fn: The function that will be copied but will use \
the simulated inputs
keyboard_inputs: The inputs that will be simulated
Returns
-------
refedined_fn: Return a fn copy that will always \
use the 'keyboard_inputs' as input simulation
"""
return refedined_fn
@staticmethod
def parse_args(*args, **kwargs):
"""
parse_args(args, kwargs)
Auxiliar function to pass fn's args and kwargs like in a normal fn call
Just passs the positional args first and then key arguments
ex.: parse_args(1,2,3,x=15,y=[0,0,1],z='aa')
Parameters
----------
args: The positional arguments of fn
kwargs: The key arguments of fn
Returns
-------
values: dict with 2 keys: 'pos_inputs' and 'key_inputs'
"""
return {"pos_inputs": args, "key_inputs": kwargs}
@staticmethod
def run_tests(tests):
"""
run_tests(tests)
Run all the tests in the 'tests' list
Parameters
----------
tests: list[Didatic_test]
A list of tests that you want to execute
"""
results = []
number_of_tests = len(tests)
completed_tests = 0
aborted_tests = 0
correct_outputs_tests = 0
correct_prints_tests = 0
for index, test in enumerate(tests):
if test.test_name is None:
test.test_name = index
else:
test.test_name = f"{index} - {test.test_name}"
result = test.run()
correct_outputs_tests += result["output_is_correct"]
correct_prints_tests += result["print_is_correct"]
aborted_tests += result["test_failed"]
completed_tests += result["test_done"]
results.append(result)
print(
f"""
correct_outputs_tests: {correct_outputs_tests}/{number_of_tests}
correct_prints_tests: {correct_prints_tests}/{number_of_tests}
aborted_tests: {aborted_tests}/{number_of_tests}
completed_tests: {completed_tests}/{number_of_tests}
"""
)
return results
fn = None
verbose = False
run_output_test = True
run_prints_test = False
@staticmethod
def set_defaults(fn=None, verbose=None, run_output_test=None, run_prints_test=None):
"""
set_defaults(fn=None, verbose=None, run_output_test=None, run_prints_test None)
Set common default values fot the tests configs to avoid repetition when \
setting them up later
Parameters
----------
fn: Callable
The function that will be tested
verbose: bool
Controls if all the fn's internal print()'s and \
input()'s prompts are printed
run_output_test: bool
Controls if the fn's return value is checked
run_prints_test: bool
Controls if the fn's internal print()'s are checked
"""
if not (fn is None):
Didatic_test.fn = new_fn
if not (verbose is None):
Didatic_test.verbose = verbose
if not (run_output_test is None):
Didatic_test.run_output_test = run_output_test
if not (run_prints_test is None):
Didatic_test.run_prints_test = run_prints_test
@staticmethod
def set_generator_defaults(
fn=None,
verbose=None,
run_output_test=None,
run_prints_test=None,
generator_verbose=None,
):
"""
set_generator_defaults(fn=None, verbose=None, run_output_test=None, \
run_prints_test=None, generator_verbose=None)
Set common default values fot the test generator to avoid unnecessary repetition
Parameters
----------
fn: Callable
The function that will be tested
verbose: bool
Controls if all the fn's internal print()'s and \
input()'s prompts are printed when a test runs
run_output_test: bool
Controls if the fn's return value is tested
run_prints_test: bool
Controls if the fn's internal print()'s are tested
generator_verbose: bool
Controls if all the fn's internal print()'s and\
input()'s prompts are printed on the test\
generator run
"""
if not (fn is None):
Didatic_test.default_fn = fn
if not (verbose is None):
Didatic_test.default_verbose = verbose
if not (run_output_test is None):
Didatic_test.default_run_output_test = run_output_test
if not (run_prints_test is None):
Didatic_test.default_run_prints_test = run_prints_test
if not (generator_verbose is None):
Didatic_test.default_generator_verbose = generator_verbose
def run(self):
"""
run()
Run the configured Didatic_test, print the result and \
returns a dictionary with the test outcome
{
"output_is_correct": bool,
"print_is_correct": bool,
"test_failed": bool,
"test_done": bool,
}
"""
self.keyboard_inputs_list = list(self.keyboard_inputs)
self.interceptions = {}
fn_temp = Didatic_test.intercepted_fn(
self.fn, self.interceptions, self.verbose, "[I]: ", "[P]: "
)
new_fn = Didatic_test.redefine(fn_temp, self.keyboard_inputs_list, False)
try:
new_fn(*self.args, **self.kwargs)
fn_output = self.interceptions["output"]
self.output_is_correct = fn_output == self.expected_output
fn_prints = "".join(self.interceptions["prints"])
self.print_is_correct = fn_prints == self.expected_prints
self.test_done = True
except Exception as excpt:
self.test_failed = True
self.test_exception = excpt
finally:
print(f"Case: {self.test_name}")
if self.test_failed:
self.__print_exception()
else:
self.__print_result()
print("---------------------------------------------------")
return {
"output_is_correct": self.output_is_correct,
"print_is_correct": self.print_is_correct,
"test_failed": self.test_failed,
"test_done": self.test_done,
}
def just_run(self):
"""
just_run()
Run the configured Didatic_test, print the result and \
returns a dictionary with the test outcome
{
"output_is_correct": bool,
"print_is_correct": bool,
"test_failed": bool,
"test_done": bool,
}
"""
self.keyboard_inputs_list = list(self.keyboard_inputs)
self.interceptions = {}
fn_temp = Didatic_test.intercepted_fn(
self.fn, self.interceptions, self.verbose, "[I]: ", "[P]: "
)
new_fn = Didatic_test.redefine(fn_temp, self.keyboard_inputs_list, False)
print(f"Case: {self.test_name}")
try:
new_fn(*self.args, **self.kwargs)
except Exception as excpt:
self.test_exception = excpt
self.__print_exception()
| 34.515397 | 88 | 0.579545 | import builtins
import sys
class Didatic_test:
"""
A class to configure and run simple didatic tests
Didatic_test(Callable=None, args={}, test_name=None, keyboard_inputs=(), \
expected_output=None, expected_prints="", verbose=None, \
run_output_test=None, run_prints_test=None,
)
Parameters
----------
fn: Callable
The function that will be tested
args: dict
The arguments that fn will be tested with. Use parse() to generate args,\
ex.: args = parse('a',5,7, x=1, s='aaa')
test_name: str
An optional identifier that will be printed with the test result
keyboard_inputs: Tuple[str, ...]
A tuple containig all the simulated keyboards inputs that will be used in \
every fn's input()
expected_output: Any
What the fn's return value should be
expected_prints: str
What the fn's internal print()'s concatenation should be \
(including new line character)
verbose: bool
Controls if all the fn's internal print()'s and input()'s prompts are printed
run_output_test: bool
Controls if the fn's return value is checked
run_prints_test: bool
Controls if the fn's internal print()'s are checked
"""
# Função input que inputs copia pro 'inputs' tbm (não retorna pra print original)
@staticmethod
def __intercepted_input_fn(
inputs: list, verbose: bool = False, identifier: str = ""
):
input_fn_backup = builtins.input
print_fn_backup = builtins.print
def new_input(prompt):
user_input = input_fn_backup(prompt)
if verbose:
print_fn_backup(f"{identifier}{prompt}{user_input}")
inputs.append(user_input)
return user_input
return new_input
# Função print que copia prints pro 'prints' tbm (não retorna pra print original)
@staticmethod
def __intercepted_print_fn(
prints: list, verbose: bool = False, identifier: str = ""
):
print_fn_backup = builtins.print
def new_print(*objects, sep=" ", end="\n", file=sys.stdout, flush=False):
str_list = [str(obj) for obj in objects]
print_str = sep.join(str_list) + end
if verbose:
print_fn_backup(f"{identifier}{print_str}", sep="", end="")
prints.append(print_str)
return
return new_print
@staticmethod
def __fake_input_fn(fake_inputs, verbose=False):
fake_inputs = list(fake_inputs)
def fake_input_fn(prompt):
try:
fake_input = str(fake_inputs.pop(0))
if verbose:
print(prompt, fake_input)
return fake_input
except Exception as excpt:
if excpt.args[0] == "pop from empty list":
print("⚠️ Error! insufficient simulated inputs")
raise excpt
return fake_input_fn
# Redefine fn interceptando tudo: args, inputs, prints, output
@staticmethod
def intercepted_fn(
fn, interceptions, verbose=False, input_identifier="", print_identifier=""
):
interceptions.setdefault("prints", [])
interceptions.setdefault("inputs", [])
def new_fn(*args, **kwargs):
input_fn_backup = builtins.input
print_fn_backup = builtins.print
builtins.input = Didatic_test.__intercepted_input_fn(
interceptions["inputs"], verbose, input_identifier
)
builtins.print = Didatic_test.__intercepted_print_fn(
interceptions["prints"], verbose, print_identifier
)
interceptions["args"] = args
interceptions["kwargs"] = kwargs
output = fn(*args, **kwargs)
interceptions["output"] = output
builtins.print = print_fn_backup
builtins.input = input_fn_backup
return output
return new_fn
@staticmethod
def __stringify_args(args={}):
quotes = "'"
pos_args = args.get("pos_inputs", ())
key_args = args.get("key_inputs", {})
pos_args_str = str(pos_args).replace("(", "").replace(")", "")
key_args_str = ", ".join(
[
f"{key}={quotes+value+quotes if type(value)==str else value}"
for key, value in key_args.items()
]
)
args_str = ", ".join([pos_args_str, key_args_str]).strip(", ")
return f"({args_str})"
@staticmethod
def generate_test(
fn=None,
args={},
test_name="Test",
verbose=None,
run_output_test=None,
run_prints_test=None,
generator_verbose=None,
):
"""
generate_test(fn, args, test_name="Test", verbose=False, \
run_output_test=True, run_prints_test=False, generator_verbose=False,)
Run the function once using ther given 'args' and intercepts all\
the inpus, prints and outpus
Generate and return the string to create the test with the given configs.\
and the intercepted infos.
ex.: generate_test(fn, Didatic_test.parse_args(1,2,3), "Test-5", True, True)
Parameters
----------
fn: The function that will be tested
args: dict in the format {"pos_inputs": args, "key_inputs": kwargs}
test_name: test name to identify the results and hint the type of test
verbose: controls if the fn's internal inputs and prints will be printed
run_output_test: controls if the output of the test run will be checked \
against the expected output value
run_prints_test: controls if the prints of the test run will be checked \
against the expected prints
generator_verbose: controls if the fn's internal inputs and prints \
will be printed in the fist run (the interception run)
Returns
-------
constructor_str: Return the string with the test constuctor containing\
all the configurations and args predefined, and with the intecepted\
inputs, prints and outputs as the expected values
"""
if fn is None:
fn = Didatic_test.default_fn
if verbose is None:
verbose = Didatic_test.default_verbose or False
if run_output_test is None:
run_output_test = Didatic_test.default_run_output_test or True
if run_prints_test is None:
run_prints_test = Didatic_test.default_run_prints_test or False
if generator_verbose is None:
Didatic_test.default_generator_verbose = generator_verbose or False
interceptions = {}
intercepted_fn = Didatic_test.intercepted_fn(
fn, interceptions, generator_verbose, "[I]: ", "[O]: "
)
pos_args = args.get("pos_inputs", ())
key_args = args.get("key_inputs", {})
output = intercepted_fn(*pos_args, **key_args)
fn_name = fn.__name__
args_str = Didatic_test.__stringify_args(args)
output_str = "'" + output + "'" if type(output) == str else str(output)
prints_str = "".join(interceptions["prints"])
constructor_str = f"Didatic_test({fn_name}, Didatic_test.parse_args\
{args_str}, '{test_name}', {interceptions['inputs']}, {output_str}, \
'{prints_str}', {verbose}, {run_output_test}, {run_prints_test})"
return constructor_str
@staticmethod
def auto_redefine(fn, args={}, verbose=False):
"""
auto_redefine(fn, verbose=False)
Run fn normally once and save all the inputs, then return a\
redefined fn that reuses the same user inputs (simulated)
The args and kwarks continue to work normally
ex.: open_menu = auto_redefine(open_menu)
Parameters
----------
fn: The function that will be called with intercepted inputs
verbose: flag that controls if the inputs primpts will be printed
Returns
-------
auto_redefined: Return a new function that will always use the same\
keyboard inputs as typed on the first run
"""
interceptions = {}
intercepted_fn = Didatic_test.intercepted_fn(
fn, interceptions, verbose, "[I]: ", "[O]: "
)
pos_args = args.get("pos_inputs", ())
key_args = args.get("key_inputs", {})
intercepted_fn(*pos_args, **key_args)
inputs_list = interceptions["inputs"]
auto_redefined = Didatic_test.redefine(fn, inputs_list, verbose)
return auto_redefined
@staticmethod
def redefine(fn, keyboard_inputs, verbose=False):
"""
redefine(fn, keyboard_inputs, verbose=False)
Return a new function that will use the 'keyboard_inputs' tuple\
as simulated inputs, but will work as fn otherwise
ex.: call_menu = redefine(call_menu,('lorem ipsum','25','y','n'))
Parameters
----------
fn: The function that will be copied but will use \
the simulated inputs
keyboard_inputs: The inputs that will be simulated
Returns
-------
refedined_fn: Return a fn copy that will always \
use the 'keyboard_inputs' as input simulation
"""
def refedined_fn(*args, **kwargs):
inputs_list = list(keyboard_inputs)
input_fn_backup = builtins.input
builtins.input = Didatic_test.__fake_input_fn(inputs_list, verbose)
try:
output = fn(*args, **kwargs)
except Exception as excpt:
raise excpt
finally:
builtins.input = input_fn_backup
return output
return refedined_fn
@staticmethod
def parse_args(*args, **kwargs):
"""
parse_args(args, kwargs)
Auxiliar function to pass fn's args and kwargs like in a normal fn call
Just passs the positional args first and then key arguments
ex.: parse_args(1,2,3,x=15,y=[0,0,1],z='aa')
Parameters
----------
args: The positional arguments of fn
kwargs: The key arguments of fn
Returns
-------
values: dict with 2 keys: 'pos_inputs' and 'key_inputs'
"""
return {"pos_inputs": args, "key_inputs": kwargs}
@staticmethod
def run_tests(tests):
"""
run_tests(tests)
Run all the tests in the 'tests' list
Parameters
----------
tests: list[Didatic_test]
A list of tests that you want to execute
"""
results = []
number_of_tests = len(tests)
completed_tests = 0
aborted_tests = 0
correct_outputs_tests = 0
correct_prints_tests = 0
for index, test in enumerate(tests):
if test.test_name is None:
test.test_name = index
else:
test.test_name = f"{index} - {test.test_name}"
result = test.run()
correct_outputs_tests += result["output_is_correct"]
correct_prints_tests += result["print_is_correct"]
aborted_tests += result["test_failed"]
completed_tests += result["test_done"]
results.append(result)
print(
f"""
correct_outputs_tests: {correct_outputs_tests}/{number_of_tests}
correct_prints_tests: {correct_prints_tests}/{number_of_tests}
aborted_tests: {aborted_tests}/{number_of_tests}
completed_tests: {completed_tests}/{number_of_tests}
"""
)
return results
fn = None
verbose = False
run_output_test = True
run_prints_test = False
@staticmethod
def set_defaults(fn=None, verbose=None, run_output_test=None, run_prints_test=None):
"""
set_defaults(fn=None, verbose=None, run_output_test=None, run_prints_test None)
Set common default values fot the tests configs to avoid repetition when \
setting them up later
Parameters
----------
fn: Callable
The function that will be tested
verbose: bool
Controls if all the fn's internal print()'s and \
input()'s prompts are printed
run_output_test: bool
Controls if the fn's return value is checked
run_prints_test: bool
Controls if the fn's internal print()'s are checked
"""
if not (fn is None):
def new_fn(self, *args, **kwargs):
return fn(*args, **kwargs)
Didatic_test.fn = new_fn
if not (verbose is None):
Didatic_test.verbose = verbose
if not (run_output_test is None):
Didatic_test.run_output_test = run_output_test
if not (run_prints_test is None):
Didatic_test.run_prints_test = run_prints_test
@staticmethod
def set_generator_defaults(
fn=None,
verbose=None,
run_output_test=None,
run_prints_test=None,
generator_verbose=None,
):
"""
set_generator_defaults(fn=None, verbose=None, run_output_test=None, \
run_prints_test=None, generator_verbose=None)
Set common default values fot the test generator to avoid unnecessary repetition
Parameters
----------
fn: Callable
The function that will be tested
verbose: bool
Controls if all the fn's internal print()'s and \
input()'s prompts are printed when a test runs
run_output_test: bool
Controls if the fn's return value is tested
run_prints_test: bool
Controls if the fn's internal print()'s are tested
generator_verbose: bool
Controls if all the fn's internal print()'s and\
input()'s prompts are printed on the test\
generator run
"""
if not (fn is None):
Didatic_test.default_fn = fn
if not (verbose is None):
Didatic_test.default_verbose = verbose
if not (run_output_test is None):
Didatic_test.default_run_output_test = run_output_test
if not (run_prints_test is None):
Didatic_test.default_run_prints_test = run_prints_test
if not (generator_verbose is None):
Didatic_test.default_generator_verbose = generator_verbose
def run(self):
"""
run()
Run the configured Didatic_test, print the result and \
returns a dictionary with the test outcome
{
"output_is_correct": bool,
"print_is_correct": bool,
"test_failed": bool,
"test_done": bool,
}
"""
self.keyboard_inputs_list = list(self.keyboard_inputs)
self.interceptions = {}
fn_temp = Didatic_test.intercepted_fn(
self.fn, self.interceptions, self.verbose, "[I]: ", "[P]: "
)
new_fn = Didatic_test.redefine(fn_temp, self.keyboard_inputs_list, False)
try:
new_fn(*self.args, **self.kwargs)
fn_output = self.interceptions["output"]
self.output_is_correct = fn_output == self.expected_output
fn_prints = "".join(self.interceptions["prints"])
self.print_is_correct = fn_prints == self.expected_prints
self.test_done = True
except Exception as excpt:
self.test_failed = True
self.test_exception = excpt
finally:
print(f"Case: {self.test_name}")
if self.test_failed:
self.__print_exception()
else:
self.__print_result()
print("---------------------------------------------------")
return {
"output_is_correct": self.output_is_correct,
"print_is_correct": self.print_is_correct,
"test_failed": self.test_failed,
"test_done": self.test_done,
}
def just_run(self):
"""
just_run()
Run the configured Didatic_test, print the result and \
returns a dictionary with the test outcome
{
"output_is_correct": bool,
"print_is_correct": bool,
"test_failed": bool,
"test_done": bool,
}
"""
self.keyboard_inputs_list = list(self.keyboard_inputs)
self.interceptions = {}
fn_temp = Didatic_test.intercepted_fn(
self.fn, self.interceptions, self.verbose, "[I]: ", "[P]: "
)
new_fn = Didatic_test.redefine(fn_temp, self.keyboard_inputs_list, False)
print(f"Case: {self.test_name}")
try:
new_fn(*self.args, **self.kwargs)
except Exception as excpt:
self.test_exception = excpt
self.__print_exception()
def __init__(
self,
fn=None,
args={},
test_name="Test",
keyboard_inputs=[],
expected_output=None,
expected_prints="",
verbose=None,
run_output_test=None,
run_prints_test=None,
):
if not (fn is None):
self.fn = fn
self.args = args.get("pos_inputs", ())
self.kwargs = args.get("key_inputs", {})
self.test_name = test_name
self.keyboard_inputs = keyboard_inputs
self.expected_output = expected_output
self.expected_prints = expected_prints
if not (verbose is None):
self.verbose = verbose
if not (run_output_test is None):
self.run_output_test = run_output_test
if not (run_prints_test is None):
self.run_prints_test = run_prints_test
self.test_done = False
self.test_failed = False
self.output_is_correct = None
self.print_is_correct = None
self.test_exception = None
self.interceptions = {}
def __repr__(self) -> str:
return f"fn: {self.fn.__name__}/n\
args: {self.args}/n\
kwargs: {self.kwargs}/n\
test_name: {self.test_name}/n\
keyboard_inputs: {self.keyboard_inputs}/n\
expected_output: {self.expected_output}/n\
expected_prints: {self.expected_prints}/n\
verbose: {self.verbose}/n\
run_output_test: {self.run_output_test}/n\
run_prints_test: {self.run_prints_test}/n\
interceptions: {str(self.interceptions)}"
def __print_exception(self):
print("🚨⚠️🚨⚠️🚨 Error! 💀💀💀")
print(type(self.test_exception))
print(self.test_exception.args)
print(self.test_exception)
def __print_result(self):
outputs_check = "✔️" if self.output_is_correct else "❌"
prints_check = "✔️" if self.print_is_correct else "❌"
outputs_check_message = (
f"outputs: {outputs_check} " if self.run_output_test else ""
)
prints_check_message = (
f"prints: {prints_check} " if self.run_prints_test else ""
)
remaining_keyboard_inputs_warning = (
f"⚠️☢️ Warning!!! some inputs were not used: \
{self.keyboard_inputs_list}"
if len(self.keyboard_inputs_list) > len(self.interceptions["inputs"])
else ""
)
print(
f"{outputs_check_message}{prints_check_message}\
{remaining_keyboard_inputs_warning}"
)
if (not self.output_is_correct) or (not self.print_is_correct):
stripped_print_buffer = (
"".join(self.interceptions["prints"]).replace("\n", " | ").rstrip(" | ")
)
stripped_expected_prints = self.expected_prints.replace("\n", " | ").rstrip(
" | "
)
fn_args_line = f" ➖ Function args: {self.args} {self.kwargs}"
keyboard_inputs_line = f"\n ➖ Keyboard inputs: {self.keyboard_inputs}"
output_line = (
f"\n {outputs_check} Function outputs: \
{self.interceptions['output']}"
if self.run_output_test
else ""
)
expected_output_line = (
f"\n ➖ Expected output: {self.expected_output}"
if self.run_output_test
else ""
)
prints_line = (
f"\n {prints_check} fn internal prints: {stripped_print_buffer}"
if self.run_prints_test
else ""
)
expected_prints_line = (
f"\n ➖ Expected prints: {stripped_expected_prints}"
if self.run_prints_test
else ""
)
print(
f"{fn_args_line}{keyboard_inputs_line}{output_line}\
{expected_output_line}{prints_line}{expected_prints_line}"
)
| 7,289 | 0 | 304 |
68447a9b071c1a187cc068eb308d7b403243d6b0 | 6,812 | py | Python | autogl/module/model/encoders/_dgl/_gin.py | dedsec-9/AutoGL | 487f2b2f798b9b1363ad5dc100fb410b12222e06 | [
"MIT"
] | null | null | null | autogl/module/model/encoders/_dgl/_gin.py | dedsec-9/AutoGL | 487f2b2f798b9b1363ad5dc100fb410b12222e06 | [
"MIT"
] | null | null | null | autogl/module/model/encoders/_dgl/_gin.py | dedsec-9/AutoGL | 487f2b2f798b9b1363ad5dc100fb410b12222e06 | [
"MIT"
] | null | null | null | import dgl
import torch.nn.functional
import typing as _typing
from dgl.nn.pytorch.conv import GINConv
from .. import base_encoder, encoder_registry
from ... import _utils
class ApplyNodeFunc(torch.nn.Module):
"""Update the node feature hv with MLP, BN and ReLU."""
class MLP(torch.nn.Module):
"""MLP with linear output"""
def __init__(self, num_layers, input_dim, hidden_dim, output_dim):
"""MLP layers construction
Paramters
---------
num_layers: int
The number of linear layers
input_dim: int
The dimensionality of input features
hidden_dim: int
The dimensionality of hidden units at ALL layers
output_dim: int
The number of classes for prediction
"""
super(MLP, self).__init__()
self.linear_or_not = True # default is linear model
self.num_layers = num_layers
self.output_dim = output_dim
if num_layers < 1:
raise ValueError("number of layers should be positive!")
elif num_layers == 1:
# Linear model
self.linear = torch.nn.Linear(input_dim, output_dim)
else:
# Multi-layer model
self.linear_or_not = False
self.linears = torch.nn.ModuleList()
self.batch_norms = torch.nn.ModuleList()
self.linears.append(torch.nn.Linear(input_dim, hidden_dim))
for layer in range(num_layers - 2):
self.linears.append(torch.nn.Linear(hidden_dim, hidden_dim))
self.linears.append(torch.nn.Linear(hidden_dim, output_dim))
for layer in range(num_layers - 1):
self.batch_norms.append(torch.nn.BatchNorm1d(hidden_dim))
@encoder_registry.EncoderUniversalRegistry.register_encoder('gin')
@encoder_registry.EncoderUniversalRegistry.register_encoder('gin_encoder')
| 34.933333 | 98 | 0.546829 | import dgl
import torch.nn.functional
import typing as _typing
from dgl.nn.pytorch.conv import GINConv
from .. import base_encoder, encoder_registry
from ... import _utils
class ApplyNodeFunc(torch.nn.Module):
"""Update the node feature hv with MLP, BN and ReLU."""
def __init__(self, mlp):
super(ApplyNodeFunc, self).__init__()
self.mlp = mlp
self.bn = torch.nn.BatchNorm1d(self.mlp.output_dim)
def forward(self, h):
h = self.mlp(h)
h = self.bn(h)
h = torch.nn.functional.relu(h)
return h
class MLP(torch.nn.Module):
"""MLP with linear output"""
def __init__(self, num_layers, input_dim, hidden_dim, output_dim):
"""MLP layers construction
Paramters
---------
num_layers: int
The number of linear layers
input_dim: int
The dimensionality of input features
hidden_dim: int
The dimensionality of hidden units at ALL layers
output_dim: int
The number of classes for prediction
"""
super(MLP, self).__init__()
self.linear_or_not = True # default is linear model
self.num_layers = num_layers
self.output_dim = output_dim
if num_layers < 1:
raise ValueError("number of layers should be positive!")
elif num_layers == 1:
# Linear model
self.linear = torch.nn.Linear(input_dim, output_dim)
else:
# Multi-layer model
self.linear_or_not = False
self.linears = torch.nn.ModuleList()
self.batch_norms = torch.nn.ModuleList()
self.linears.append(torch.nn.Linear(input_dim, hidden_dim))
for layer in range(num_layers - 2):
self.linears.append(torch.nn.Linear(hidden_dim, hidden_dim))
self.linears.append(torch.nn.Linear(hidden_dim, output_dim))
for layer in range(num_layers - 1):
self.batch_norms.append(torch.nn.BatchNorm1d(hidden_dim))
def forward(self, x):
if self.linear_or_not:
# If linear model
return self.linear(x)
else:
# If MLP
h = x
for i in range(self.num_layers - 1):
h = torch.nn.functional.relu(self.batch_norms[i](self.linears[i](h)))
return self.linears[-1](h)
class _GIN(torch.nn.Module):
def __init__(
self, input_dimension: int,
dimensions: _typing.Sequence[int],
num_mlp_layers: int,
act: _typing.Optional[str],
_eps: str, neighbor_pooling_type: str
):
super(_GIN, self).__init__()
self.__num_layers: int = len(dimensions)
self._act: _typing.Optional[str] = act
self.__gin_layers: torch.nn.ModuleList = torch.nn.ModuleList()
self.__batch_normalizations: torch.nn.ModuleList = torch.nn.ModuleList()
for layer in range(self.__num_layers):
mlp = MLP(
num_mlp_layers,
input_dimension if layer == 0 else dimensions[layer - 1],
dimensions[layer], dimensions[layer]
)
self.__gin_layers.append(
GINConv(
ApplyNodeFunc(mlp), neighbor_pooling_type, 0,
_eps.lower() == "true"
)
)
self.__batch_normalizations.append(
torch.nn.BatchNorm1d(dimensions[layer])
)
def forward(self, graph: dgl.DGLGraph, *__args, **__kwargs) -> _typing.Sequence[torch.Tensor]:
x: torch.Tensor = graph.ndata['feat']
features: _typing.MutableSequence[torch.Tensor] = [x]
for layer in range(self.__num_layers):
x: torch.Tensor = self.__gin_layers[layer](graph, x)
x: torch.Tensor = self.__batch_normalizations[layer](x)
x: torch.Tensor = _utils.activation.activation_func(x, self._act)
features.append(x)
return features
@encoder_registry.EncoderUniversalRegistry.register_encoder('gin')
@encoder_registry.EncoderUniversalRegistry.register_encoder('gin_encoder')
class GINEncoderMaintainer(base_encoder.AutoHomogeneousEncoderMaintainer):
def __init__(
self,
input_dimension: _typing.Optional[int] = ...,
final_dimension: _typing.Optional[int] = ...,
device: _typing.Union[torch.device, str, int, None] = ...,
*args, **kwargs
):
super(GINEncoderMaintainer, self).__init__(
input_dimension, final_dimension,
device, *args, **kwargs
)
self.hyper_parameter_space = [
{
"parameterName": "num_layers",
"type": "DISCRETE",
"feasiblePoints": "2,3,4",
},
{
"parameterName": "hidden",
"type": "NUMERICAL_LIST",
"numericalType": "INTEGER",
"length": 3,
"minValue": [8, 8, 8],
"maxValue": [64, 64, 64],
"scalingType": "LOG",
"cutPara": ("num_layers",),
"cutFunc": lambda x: x[0] - 1,
},
{
"parameterName": "act",
"type": "CATEGORICAL",
"feasiblePoints": ["leaky_relu", "relu", "elu", "tanh"],
},
{
"parameterName": "eps",
"type": "CATEGORICAL",
"feasiblePoints": ["True", "False"],
},
{
"parameterName": "mlp_layers",
"type": "DISCRETE",
"feasiblePoints": "2,3,4",
},
{
"parameterName": "neighbor_pooling_type",
"type": "CATEGORICAL",
"feasiblePoints": ["sum", "mean", "max"],
}
]
self.hyper_parameters = {
"num_layers": 3,
"hidden": [64, 64],
"act": "relu",
"eps": "False",
"mlp_layers": 2,
"neighbor_pooling_type": "sum"
}
def _initialize(self) -> _typing.Optional[bool]:
dimensions = list(self.hyper_parameters['hidden'])
if (
self.final_dimension not in (Ellipsis, None) and
isinstance(self.final_dimension, int) and
self.final_dimension > 0
):
dimensions.append(self.final_dimension)
self._encoder = _GIN(
self.input_dimension, dimensions, self.hyper_parameters["mlp_layers"],
self.hyper_parameters["act"], self.hyper_parameters["eps"],
self.hyper_parameters["neighbor_pooling_type"]
).to(self.device)
return True
| 4,620 | 60 | 231 |
eb1669f961f86b086e499126d0d7b3bd559ac0d8 | 26,338 | py | Python | infotrope/datasets/base.py | dwd/Polymer | 656d2b2f159b649fdb5281b82d938fb5453b9047 | [
"MIT"
] | 4 | 2016-04-14T06:52:02.000Z | 2019-03-30T08:43:14.000Z | infotrope/datasets/base.py | dwd/Polymer | 656d2b2f159b649fdb5281b82d938fb5453b9047 | [
"MIT"
] | null | null | null | infotrope/datasets/base.py | dwd/Polymer | 656d2b2f159b649fdb5281b82d938fb5453b9047 | [
"MIT"
] | null | null | null | #
# Copyright 2004,2005 Dave Cridland <dave@cridland.net>
#
# This file forms part of the Infotrope Python Library.
#
# The Infotrope Python Library is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# The Infotrope Python Library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with the Infotrope Python Library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
from infotrope.weak import weakref
import infotrope.acap
dataset_types = {}
datasets = weakref.WeakValueDictionary()
| 36.030096 | 298 | 0.539601 | #
# Copyright 2004,2005 Dave Cridland <dave@cridland.net>
#
# This file forms part of the Infotrope Python Library.
#
# The Infotrope Python Library is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# The Infotrope Python Library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with the Infotrope Python Library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
from infotrope.weak import weakref
import infotrope.acap
class resync_search( infotrope.acap.search ):
def __init__( self, **kw ):
infotrope.acap.search.__init__( self, **kw )
class index_search( infotrope.acap.search ):
def __init__( self, **kw ):
infotrope.acap.search.__init__( self, **kw )
class first_search( infotrope.acap.search ):
def __init__( self, **kw ):
infotrope.acap.search.__init__( self, **kw )
class fallback_resync_search( infotrope.acap.search ):
def __init__( self, **kw ):
infotrope.acap.search.__init__( self, **kw )
class modsince_search( infotrope.acap.search ):
def __init__( self, **kw ):
infotrope.acap.search.__init__( self, **kw )
class dataset_class:
def __init__( self, url, depth=False ):
self._search = None
self._cache = None
import infotrope.serverman
import infotrope.url
import infotrope.cache
self.url = infotrope.url.URL( url )
if self.url.path[-1] != '/':
self.url.path += '/'
self._search = None
self._resync_search = None
self._waited = False
self._sync_active = False
self._subnotify = []
c = infotrope.serverman.get_serverman().get( self.url ) # Async.
self._cache_name = c.cache_root()
self._cache = None
self._delta_cache = infotrope.cache.dummy()
self._delta_index = []
self._index = None
self._modtime = None
if self._cache_name and not depth:
import os
for x in url.path.split('/'):
if len(x):
if x == '~':
x = '%'
self._cache_name = os.path.join( self._cache_name, x )
if not os.path.exists( self._cache_name ):
os.makedirs( self._cache_name )
self._cache_name = os.path.join( self._cache_name, self.__class__.__name__ )
self._cache = infotrope.cache.open( self._cache_name )
self._delta_cache = infotrope.cache.open( self._cache_name + '_delta' )
if self._delta_cache.has_key('INDEX'):
self._delta_index = self._delta_cache['INDEX']
if self._cache.has_key('INDEX'):
self._index = self._cache['INDEX']
self._modtime = self._cache['MODTIME']
if self.url.server == '__DUMMY__':
if self._cache is None:
self._cache = infotrope.cache.dummy()
if not self._cache.has_key('INDEX'):
self._index = []
self._cache['INDEX'] = []
self._modtime = 'whenever'
self._cache['MODTIME'] = 'whenever'
self._cache.sync()
self._delta_cache.sync()
self._waited = True
self._sync_active = True
c.notify_ready( self.connected )
def sync( self ):
if self._sync_active:
if self._cache is not None:
self._cache.sync()
if self._delta_cache is not None:
self._delta_cache.sync()
def log( self, what ):
import infotrope.serverman
infotrope.serverman.get_serverman().log( self.url.asString(), '==', what )
def connected( self, c ):
if not c.ready:
return
c.add_resync( self.reconnect )
self.do_search( c )
def reconnect( self, c ):
self._search = None
c.notify_ready( self.connected )
def real_getitem( self, what ):
if self._cache:
return self._cache['E::'+what]
if self.url.server == '__DUMMY__':
raise KeyError,what
self.do_search()
if what not in self._search:
if not self._waited:
self.log( "Waiting []..." )
self._search.wait()
return self._search[what]
def raw_getitem( self, what ):
d = {}
try:
q = self.real_getitem( what )
if q is not None:
d.update( q )
except TypeError:
pass
except KeyError:
pass
if what in self._delta_index:
o = self._delta_cache['E::'+what]
if o is None:
raise KeyError, "Entry %s has been deleted locally" % `what`
for k,a in o.items():
if a is None:
if k in d:
del d[k]
else:
if k in d:
d[k]['value'] = a
else:
d[k] = {'value':a}
return d
def __getitem__( self, what ):
if isinstance(what,int):
index = self.get_index()
what = index[what]
entry = self.raw_getitem( what )
return self.base_factory( entry )
return self.base_factory( self.raw_getitem( what ) )
def __delitem__( self, what ):
self[what] = None
def __setitem__( self, what, s ):
if isinstance(what,unicode):
what = what.encode('utf-8')
oldhere = what in self
key = 'E::' + what
if s is not None:
if key in self._delta_cache:
d = self._delta_cache[key]
else:
d = None
if d is None:
d = {}
d.update( s )
else:
d = None
if what not in self._delta_index:
self._delta_index.append( what )
self._delta_cache[key] = d
self._delta_cache['INDEX'] = self._delta_index
self.sync()
sm = infotrope.serverman.get_serverman()
c = sm.get( self.url )
if c.ready:
self.do_pending_stores( c )
self.do_search( c )
else:
newhere = what in self
if oldhere:
if newhere:
self.send_notify_change( what )
else:
self.send_notify_removefrom( what )
else:
self.send_notify_addto( what )
def __contains__( self, what ):
if what in self._delta_index:
if self._delta_cache['E::'+what] is None:
return False
else:
return True
try:
self.index( what )
return True
except ValueError:
return False
def get_index( self ):
if self._index is not None:
if not self._delta_index:
return self._index
index = self._index[:]
for x in self._delta_index:
if x in index:
if self._delta_cache['E::'+x] is None:
index.remove(x)
else:
if self._delta_cache['E::'+x] is not None:
index.append(x)
return index
self.do_search()
if self._search is None:
return []
if not self._waited:
self._search.wait()
return self._search.entries()
def index( self, what ):
if self._cache:
return self.get_index().index(what)
self.do_search()
if not self._search:
raise ValueError, "No search"
if what not in self._search.entries():
if not self._waited:
self._search.wait()
return self._search.entries().index(what)
def entries( self ):
if self._cache:
self.log( "Producing cache index." )
return self.get_index()
self.do_search()
if self._search is None:
return []
if not self._waited:
self.log( "Waiting for entries" )
self._search.wait()
self.log( "Using search entries" )
return self._search.entries()
def __len__( self ):
self.log( "len() requested" )
if self._cache:
self.log( "Using cache length" )
return len(self.get_index())
self.log( "Using search length" )
self.do_search()
if not self._waited:
self.log( "Waiting len()" )
self._search.wait()
if self._search is None:
self.log( "Search dead, zero length" )
return 0
return len(self._search)
def empty( self ):
return len(self)==0
def do_search( self, conn=None ):
import infotrope.acap
if self._search is not None:
self.do_pending_stores( conn )
return
if conn is None:
import infotrope.serverman
conn = infotrope.serverman.get_serverman()[self.url]
return
limit = None
self._search_mode = 'FIRST'
self._modtime_resync = self._modtime
if self._cache and self._modtime: # We have a cache with a modtime, so we need a resync.
limit = 0
self._search_mode = 'RESYNC'
self.log( "Using resync search" )
sort = self.get_search_sort()
self._search = first_search( connection=conn, context=self.__class__.__name__ + self.url.path, base=self.url.path, criteria=self.get_search_criteria(), ret=self.get_search_return(), sort=sort, notify=self, enum=sort is not None, limit=limit, notify_complete=self.recreate_context_complete )
self._search.send()
def recreate_context_complete( self, search, result ):
self._waited = True
if result.lower()!='ok':
self._search = None
self._index = []
self.do_pending_stores()
self.send_notify_complete()
return
if self._search_mode == 'RESYNC':
self.log( "Sending modsince search" )
self._modsearch = modsince_search( connection=self._search.connection(), base=self.__class__.__name__ + self.url.path, ret=self.get_search_return(), notify_complete=self.modsince_search_complete, criteria='COMPARESTRICT "modtime" "i;octet" "%s"' % self._modtime_resync )
self._modsearch.send()
else:
self._index = self._search.entries()
self.update_cache()
self.do_pending_stores()
self.send_notify_complete()
def modsince_search_complete( self, *args ):
ls = len(self._search)
li = len(self._index)
lm = len(self._modsearch)
if self.get_search_sort() is not None:
self.log( "Sorted search" )
if ls==li and lm==0:
self.log( "Length unchanged and no modifications." )
self._modsearch = None
self.sync_complete()
return # No modifications, context same size => no deletions.
self.log( "Merging modifications" )
for x in self._modsearch.entries():
if x in self._index:
self.notify_change( x, 0, 0, self._modsearch[x] )
else:
self.notify_addto( x, 0, self._modsearch[x] )
self.send_index_search()
else:
self.log( "Unsorted search, merging modifications." )
adds = 0
for x in self._modsearch.entries():
if x in self._index:
self.notify_change( x, 0, 0, self._modsearch[x] )
else:
adds += 1
self.notify_addto( x, 0, self._modsearch[x] )
if ls-li == adds:
self.log( "Difference in length equal to adds, no deletions." )
self._modsearch = None
self.sync_complete()
return # No modifications, context same size => no deletions.
self.log( "Finding deletions via DELETEDSINCE." )
conn = self._search.connection()
t,r,s = conn.send( 'DELETEDSINCE', infotrope.acap.string( self.url.path ), infotrope.acap.string( self._modtime_resync ) )
conn.register( t, self )
t.notifier = self.deletedsince
self._resync_command = t
def send_index_search( self ):
self.log( "Sending index search" )
self._modsearch = None # Done with this now.
self._indexsearch = index_search( connection=self._search.connection(), base=self.__class__.__name__ + self.url.path, ret=['modtime'], sort=self.get_search_sort(), notify_complete=self.index_search_complete )
self._indexsearch.send()
def index_search_complete( self, *args ):
self.log( "Index search complete, removing non-existent entries" )
for x in self._index:
if x not in self._indexsearch:
self.notify_removefrom( x, 0 )
self.log( "Copying index" )
self._index = self._indexsearch.entries()
self._modtime = self._indexsearch.modtime()
self.update_cache( False )
self._indexsearch = None
self.sync_complete()
def _handle_deleted( self, t, r, s ):
self.log( "Removing deleted entry" )
self.notify_removefrom( s, 0 )
def get_search_sort( self ):
return None
def deletedsince( self, cmd, t, r, s ):
self._resync_command.notifier = None # Break loop.
self._resync_command = None
if r.lower()!='ok': # In theory, this is rare. In practise, it's actually normal.
self.log( "DELETEDSINCE failed, using fallback resync search" )
conn = infotrope.serverman.get_serverman()[self.url]
criteria = self.get_search_criteria()
criteria = 'AND COMPARE "modtime" "-i;octet" "%s" %s' % ( self._modtime_resync, criteria )
self._resync_search = fallback_resync_search( connection=conn, base=self.__class__.__name__ + self.url.path, criteria=criteria, ret=['entry'], notify_complete=self.resync_search_complete )
self._resync_search.send()
else:
self.log( "DELETEDSINCE success!" )
if len(self._search) == len(self._index):
self.log( "Found all removals." )
self._modsearch = None
self.sync_complete()
else:
self.log( "Some removals are modifications, annoying." )
self.send_index_search()
def base_factory( self, entry ):
if 'entry' in entry and 'value' in entry['entry']:
if entry['entry']['value'] == '':
return empty_entry( entry, self.url )
return self.factory( entry )( entry, self.url )
def new( self, t=None ):
raise TypeError, "Abstract method called."
def update_cache( self, normal=True ):
if self._index is None:
self._index = []
if normal:
if self._modtime == self._search.modtime():
return
self._modtime = self._search.modtime()
if self._cache is not None:
self._cache['MODTIME'] = self._modtime
self._cache['INDEX'] = self._index
self.sync()
def send_notify_addto( self, entry ):
for x in self._subnotify:
z = x()
if z is not None:
z.notify_addto( entry )
def send_notify_change( self, entry ):
for x in self._subnotify:
z = x()
if z is not None:
z.notify_change( entry )
def send_notify_removefrom( self, entry ):
for x in self._subnotify:
z = x()
if z is not None:
z.notify_removefrom( entry )
def send_notify_complete( self ):
for x in self._subnotify:
z = x()
if z is not None:
z.notify_complete( 'ok' )
def notify_addto( self, entry, pos, e=None ):
self.log( "Add: " + entry )
if self._cache is not None:
incache = 'E::'+entry in self._cache
self._cache['E::'+entry] = e or self._search[entry]
if self._index is not None:
if entry not in self._index:
self._index.insert( pos, entry )
else:
self._index = [ entry ]
self.log( "Post index insert: " + `self._index` + " incache: " + `incache` )
self.update_cache()
if incache:
self.send_notify_change( entry )
return
self.send_notify_addto( entry )
def notify_removefrom( self, entry, pos ):
self.log( "Remove: " + entry )
if self._cache is not None:
incache = 'E::'+entry in self._cache
del self._cache['E::'+entry]
self._index.remove( entry )
self.log( "Post index removal: " + `self._index` )
self.update_cache()
if not incache:
return
self.send_notify_removefrom( entry )
def notify_change( self, entry, oldpos, newpos, e=None ):
self.log( "Change: " + entry )
if self._cache is not None:
self._cache['E::'+entry] = e or self._search[entry]
self.log( "Pre index removal: " + `self._index` )
self._index.remove( entry )
self.log( "Post index removal: " + `self._index` )
self._index.insert( newpos, entry )
self.log( "Post index insert: " + `self._index` )
self.update_cache()
self.send_notify_change( entry )
def sync_complete( self ):
self.log( "Sync complete, setting index." )
self._search.set_index( self._index )
self.update_cache()
self.log( "Fully resynchronized" )
self.do_pending_stores()
self.send_notify_complete()
def do_pending_stores( self, conn=None ):
if self._delta_index:
if conn is None:
conn = infotrope.serverman.get_serverman()[ self.url ]
dl = []
for k,d in self._delta_cache.items():
if k=='INDEX':
continue
if d is None:
d = {'entry':None}
conn.store( self.url.path + k[3:], d, True )
dl.append( k )
for k in dl:
del self._delta_cache[k]
self._delta_index = []
self._delta_cache['INDEX'] = self._delta_index
conn.store_flush()
self.sync()
if self._search:
self._search.updatecontext()
else:
self._waited = False
self.do_search()
self._sync_active = True
self.sync()
def resync_search_complete( self, srch, r ):
self.test_both_done()
def notify_complete( self, state ):
if state.lower()!='ok':
self._search = None
self._waited = False
else:
self._waited = True
if self._cache is not None:
if self._search_mode == 'FIRST':
if self._index is None:
self._index = []
for e in self._index:
self._cache['E::' + e] = self._search[e]
self.update_cache()
self.test_both_done()
for x in self._subnotify:
z = x()
if z is not None:
z.notify_complete( state.lower() )
def test_both_done( self ):
# Update cache after completed fallback search for failed DELETEDSINCE.
if self._search_mode == 'DELETEDSINCE':
if self._resync_search is None:
return
if self._resync_search.complete() and self._search.complete():
for x in self._cache.keys():
if x.find('E::'):
entry = x[3:]
if entry not in self._resync_search and entry not in self._search:
self.notify_removefrom( entry )
self.update_cache()
self._resync_search = None
def add_notify( self, what ):
self._subnotify.append( weakref.ref(what) )
if self._cache and self._cache.has_key('INDEX'):
for entry in self.get_index():
what.notify_addto( entry )
elif self._search is not None:
for entry in self._search.entries():
what.notify_addto( entry )
if self._waited:
what.notify_complete( 'ok' )
def shutdown(self):
if self._search is not None:
#print "freecontext"
self._search.freecontext(self.post_shutdown)
def post_shutdown(self,*args):
#print "post shutdown"
self.update_cache()
def __del__( self ):
#print "(base dataset del)"
if self._cache is not None:
self._cache.close()
class entry:
def __init__( self, entry, url ):
self._raw = entry
self.cont_url = url
self._setting = {}
def __getitem__( self, attra ):
attr = ''
if isinstance( attra, str ):
attr = attra
else:
attr = attra.encode('utf-8')
raw = None
if attr in self._setting:
raw = self._setting[attr]
if raw is None:
if attr in self._raw:
if 'value' in self._raw[attr]:
raw = self._raw[attr]['value']
if raw is None:
return None
if attr=='entry':
return raw.decode( 'utf-8' )
elif attr=='subdataset':
return raw
else:
try:
t = self.decode( attr, raw )
except:
return None # Attributes which fail to get decoded right are treated as if they don't exist.
if t is not None:
return t
return raw
def __setitem__( self, attr, polish ):
if polish is None:
if self[attr] is not None:
self._setting[attr] = None
return
raw = self.encode( attr, polish )
if raw is None:
raw = polish
if isinstance(raw,unicode):
raw = raw.encode('utf-8')
elif isinstance(raw,tuple) or isinstance(raw,list):
nraw = []
for x in raw:
if isinstance(x,unicode):
nraw.append(x.encode('utf-8'))
elif isinstance(x,str):
nraw.append(x)
else:
nraw.append(str(x))
raw = nraw
elif not isinstance(raw,str):
raw = str(raw)
if attr in self._raw:
if 'value' in self._raw[attr]:
if raw==self._raw[attr]['value']:
return
self._setting[attr] = raw
def entry_url( self ):
u = self.cont_url.add_relative( self['entry'] )
return u
def save( self, force=False ):
if force:
for name,attr in self._raw.items():
if name not in self._setting:
if attr['value'] is not None:
self._setting[name] = attr['value']
if 0==len(self._setting):
return
u = self.entry_url()
d = get_dataset( self.cont_url )
en = u.path
if '/' in en:
en = en[en.rfind('/')+1:]
d[en] = self._setting
#infotrope.serverman.get_serverman()[ u ].store( u.path, self._setting )
def subdataset_url( self ):
sd = self['subdataset']
if sd is None:
return None
if '.' in sd:
return self.entry_url() # Shortcut, prefer immediate.
# Scan through twice - once for local, once for remote.
for s in sd:
if s[0:5]=='acap:' or s[0:2]=='//':
# Remote.
pass
else:
return self.entry_url().add_relative( s )
if sd[0][0:5]=='acap:':
return infotrope.url.URL( sd[0] )
return infotrope.url.URL( 'acap:'+sd[0] )
def subdataset( self ):
u = self.subdataset_url()
if u is not None:
return get_dataset( u )
return None
def __contains__( self, what ):
return what in self._raw
class empty_entry(entry):
def __init__( self, e, url ):
entry.__init__( self, e, url )
def decode( self, attr, raw ):
if attr.find( 'dataset.acl' ) == 0:
return dict([ x.split('\t') for x in raw ])
else:
return raw
dataset_types = {}
datasets = weakref.WeakValueDictionary()
def register_dataset_type( dataset, c ):
global dataset_types
dataset_types[dataset] = c
def get_dataset_type( url ):
import infotrope.url
global dataset_types
u = infotrope.url.URL( url )
if u.scheme!='acap':
raise "Cannot handle non-ACAP URLs"
if u.dataset_class is None:
raise "Erm. Dunno."
if u.dataset_class not in dataset_types:
raise "Erm, still dunno about %s from %s" % ( `u.dataset_class`, `u` )
return dataset_types[u.dataset_class]( u )
def get_dataset( url ):
import infotrope.url
global datasets
u = infotrope.url.URL( str(url) )
us = u.asString()
srv = None
if us not in datasets:
srv = get_dataset_type( u )
datasets[us] = srv
else:
srv = datasets[us]
return srv
def cleanup():
global datasets
datasets = weakref.WeakValueDictionary()
| 23,302 | 123 | 1,913 |
e39cc9e52f76f774f895e55eda47eb10730e2507 | 2,325 | py | Python | examples/publications/b_ATB_solvationFreeEnergies/sets/build_gromos_singleligand_files.py | SalomeRonja/restraintmaker | ecb4c524cb11ffffa2ee3a3665056412bef84999 | [
"MIT"
] | 1 | 2021-11-15T14:01:28.000Z | 2021-11-15T14:01:28.000Z | examples/publications/b_ATB_solvationFreeEnergies/sets/build_gromos_singleligand_files.py | SalomeRonja/restraintmaker | ecb4c524cb11ffffa2ee3a3665056412bef84999 | [
"MIT"
] | 3 | 2021-10-01T18:35:59.000Z | 2022-01-28T14:59:34.000Z | examples/publications/b_ATB_solvationFreeEnergies/sets/build_gromos_singleligand_files.py | SalomeRonja/restraintmaker | ecb4c524cb11ffffa2ee3a3665056412bef84999 | [
"MIT"
] | 3 | 2020-12-27T12:27:15.000Z | 2021-11-01T13:45:14.000Z | #%%
import os, sys, glob
from collections import defaultdict
from pygromos.gromos.gromosPP import GromosPP
from pygromos.gromos.gromosXX import GromosXX
from pygromos.files import imd
import restraintmaker
#CHANGE HERE
gromos_bin_path = "/home/bschroed/Documents/code/gromosPP/installed/bin"
restraintmaker_path = os.path.abspath(os.path.dirname(restraintmaker.__file__)+"/..")
control_dict = {
"gen_resn_lib": False,
"gen_single_tops": False, #Buggy!
}
#%%
gromPP = GromosPP(gromos_bin_path)
atb_dirs = restraintmaker_path+"/devtools/otherScripts/b_ATB_solvationFreeEnergies/ATB_molecules"
sets_dir = restraintmaker_path+"/devtools/otherScripts/b_ATB_solvationFreeEnergies/sets"
mstate_dir = sets_dir+"/multistate"
pairwise_dir = sets_dir+"/pairwise"
if(not os.path.exists(mstate_dir)):
os.mkdir(mstate_dir)
if (not os.path.exists(pairwise_dir)):
os.mkdir(pairwise_dir)
sys.path.append(atb_dirs+"/..")
import utils_test_set_ATB as util
#RESNlib
resn_lib_path = sets_dir+"/resn_lib.lib"
if(control_dict['gen_resn_lib']):
#translation lib
from pygromos.files.otherfiles import residue_library
long_short = {}
mol_names = util.translate
for ID in mol_names:
long_short.update({mol_names[ID]["short"]:[mol_names[ID]["orig_name"]]})
resn_lib = residue_library.residue_library()
resn_lib.RESIDUENAMELIB.pdb_top.update(long_short)
resn_lib.write(resn_lib_path)
#Generate TOPOS - THIS STEP IS BUGGY don't use, rather manual?
if (control_dict['gen_single_tops']):
for mol_dir in os.listdir(atb_dirs):
print(mol_dir)
mtb_path = glob.glob(atb_dirs+"/"+mol_dir+"/*.mtb")[0]
ifp_path = glob.glob(atb_dirs+"/"+mol_dir+"/*.ifp")[0]
mol_name = "_"+os.path.basename(mtb_path).split("_")[1] if(not mtb_path.startswith("_")) else os.path.basename(mtb_path).split("_")[0]
top_path = atb_dirs+"/"+mol_dir+"/"+mol_name+".top"
gromPP.make_top(out_top_path=top_path, in_sequence=mol_name, in_solvent="H2O",
in_building_block_lib_path=mtb_path, in_parameter_lib_path=ifp_path, use_argfile=False)
#Systems
##get all_single file_tops:
all_tops = glob.glob(atb_dirs+"/*/*top")
state_all_tops={os.path.basename(value).split(".")[0]: value for value in all_tops}
all_mstate_sys = glob.glob(mstate_dir+"/*")
| 33.695652 | 142 | 0.732903 | #%%
import os, sys, glob
from collections import defaultdict
from pygromos.gromos.gromosPP import GromosPP
from pygromos.gromos.gromosXX import GromosXX
from pygromos.files import imd
import restraintmaker
#CHANGE HERE
gromos_bin_path = "/home/bschroed/Documents/code/gromosPP/installed/bin"
restraintmaker_path = os.path.abspath(os.path.dirname(restraintmaker.__file__)+"/..")
control_dict = {
"gen_resn_lib": False,
"gen_single_tops": False, #Buggy!
}
#%%
gromPP = GromosPP(gromos_bin_path)
atb_dirs = restraintmaker_path+"/devtools/otherScripts/b_ATB_solvationFreeEnergies/ATB_molecules"
sets_dir = restraintmaker_path+"/devtools/otherScripts/b_ATB_solvationFreeEnergies/sets"
mstate_dir = sets_dir+"/multistate"
pairwise_dir = sets_dir+"/pairwise"
if(not os.path.exists(mstate_dir)):
os.mkdir(mstate_dir)
if (not os.path.exists(pairwise_dir)):
os.mkdir(pairwise_dir)
sys.path.append(atb_dirs+"/..")
import utils_test_set_ATB as util
#RESNlib
resn_lib_path = sets_dir+"/resn_lib.lib"
if(control_dict['gen_resn_lib']):
#translation lib
from pygromos.files.otherfiles import residue_library
long_short = {}
mol_names = util.translate
for ID in mol_names:
long_short.update({mol_names[ID]["short"]:[mol_names[ID]["orig_name"]]})
resn_lib = residue_library.residue_library()
resn_lib.RESIDUENAMELIB.pdb_top.update(long_short)
resn_lib.write(resn_lib_path)
#Generate TOPOS - THIS STEP IS BUGGY don't use, rather manual?
if (control_dict['gen_single_tops']):
for mol_dir in os.listdir(atb_dirs):
print(mol_dir)
mtb_path = glob.glob(atb_dirs+"/"+mol_dir+"/*.mtb")[0]
ifp_path = glob.glob(atb_dirs+"/"+mol_dir+"/*.ifp")[0]
mol_name = "_"+os.path.basename(mtb_path).split("_")[1] if(not mtb_path.startswith("_")) else os.path.basename(mtb_path).split("_")[0]
top_path = atb_dirs+"/"+mol_dir+"/"+mol_name+".top"
gromPP.make_top(out_top_path=top_path, in_sequence=mol_name, in_solvent="H2O",
in_building_block_lib_path=mtb_path, in_parameter_lib_path=ifp_path, use_argfile=False)
#Systems
##get all_single file_tops:
all_tops = glob.glob(atb_dirs+"/*/*top")
state_all_tops={os.path.basename(value).split(".")[0]: value for value in all_tops}
all_mstate_sys = glob.glob(mstate_dir+"/*")
| 0 | 0 | 0 |
7c4114e3fdb667259e1827035838bb4fd2294646 | 1,228 | py | Python | examples/graph/football.py | theaverageguy/networkx | b2b74b3ba028ef3788f796aa64b037c8ea446539 | [
"BSD-3-Clause"
] | null | null | null | examples/graph/football.py | theaverageguy/networkx | b2b74b3ba028ef3788f796aa64b037c8ea446539 | [
"BSD-3-Clause"
] | null | null | null | examples/graph/football.py | theaverageguy/networkx | b2b74b3ba028ef3788f796aa64b037c8ea446539 | [
"BSD-3-Clause"
] | 2 | 2016-09-04T10:59:12.000Z | 2020-02-17T07:43:04.000Z | #!/usr/bin/env python
"""
Load football network in GML format and compute some network statistcs.
Shows how to download GML graph in a zipped file, unpack it, and load
into a NetworkX graph.
Requires Internet connection to download the URL
http://www-personal.umich.edu/~mejn/netdata/football.zip
"""
# Author: Aric Hagberg (hagberg@lanl.gov)
# Copyright (C) 2007-2016 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
from networkx import *
url="http://www-personal.umich.edu/~mejn/netdata/football.zip"
try: # Python 3.x
import urllib.request as urllib
except ImportError: # Python 2.x
import urllib
import io
import zipfile
sock = urllib.urlopen(url) # open URL
s=io.BytesIO(sock.read()) # read into BytesIO "file"
sock.close()
zf = zipfile.ZipFile(s) # zipfile object
txt=zf.read('football.txt').decode() # read info file
gml=zf.read('football.gml').decode() # read gml data
# throw away bogus first line with # from mejn files
gml=gml.split('\n')[1:]
G=parse_gml(gml) # parse gml data
print(txt)
# print degree for each team - number of games
for n,d in G.degree():
print('%s %d' % (n, d))
| 26.12766 | 71 | 0.708469 | #!/usr/bin/env python
"""
Load football network in GML format and compute some network statistcs.
Shows how to download GML graph in a zipped file, unpack it, and load
into a NetworkX graph.
Requires Internet connection to download the URL
http://www-personal.umich.edu/~mejn/netdata/football.zip
"""
# Author: Aric Hagberg (hagberg@lanl.gov)
# Copyright (C) 2007-2016 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
from networkx import *
url="http://www-personal.umich.edu/~mejn/netdata/football.zip"
try: # Python 3.x
import urllib.request as urllib
except ImportError: # Python 2.x
import urllib
import io
import zipfile
sock = urllib.urlopen(url) # open URL
s=io.BytesIO(sock.read()) # read into BytesIO "file"
sock.close()
zf = zipfile.ZipFile(s) # zipfile object
txt=zf.read('football.txt').decode() # read info file
gml=zf.read('football.gml').decode() # read gml data
# throw away bogus first line with # from mejn files
gml=gml.split('\n')[1:]
G=parse_gml(gml) # parse gml data
print(txt)
# print degree for each team - number of games
for n,d in G.degree():
print('%s %d' % (n, d))
| 0 | 0 | 0 |
ab45ba28ece2b6f08898955e44b4f746e4747d64 | 2,390 | py | Python | locations/spiders/goodwill.py | davidchiles/alltheplaces | 6f35f6cd652e7462107ead0a77f322caff198653 | [
"MIT"
] | 297 | 2017-12-07T01:29:14.000Z | 2022-03-29T06:58:01.000Z | locations/spiders/goodwill.py | davidchiles/alltheplaces | 6f35f6cd652e7462107ead0a77f322caff198653 | [
"MIT"
] | 2,770 | 2017-11-28T04:20:21.000Z | 2022-03-31T11:29:16.000Z | locations/spiders/goodwill.py | davidchiles/alltheplaces | 6f35f6cd652e7462107ead0a77f322caff198653 | [
"MIT"
] | 111 | 2017-11-27T21:40:02.000Z | 2022-01-22T01:21:52.000Z | # -*- coding: utf-8 -*-
import scrapy
import json
from locations.items import GeojsonPointItem
CATEGORY_MAPPING = {
'1': 'Donation Site',
'2': 'Outlet',
'3': 'Retail Store',
'4': 'Job & Career Support',
'5': 'Headquarters'
}
| 33.661972 | 106 | 0.516736 | # -*- coding: utf-8 -*-
import scrapy
import json
from locations.items import GeojsonPointItem
CATEGORY_MAPPING = {
'1': 'Donation Site',
'2': 'Outlet',
'3': 'Retail Store',
'4': 'Job & Career Support',
'5': 'Headquarters'
}
class GoodwillSpider(scrapy.Spider):
name = "goodwill"
item_attributes = { 'brand': "Goodwill" }
allowed_domains = ['www.goodwill.org']
download_delay = 0.2
def start_requests(self):
url = 'https://www.goodwill.org/getLocations.php'
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
with open('./locations/searchable_points/us_centroids_25mile_radius.csv') as points:
next(points) # Ignore the header
for point in points:
_, lat, lon = point.strip().split(',')
# Unable to find a way to specify a search radius
# Appears to use a set search radius somewhere > 25mi, using 25mi to be safe
form_data = {
'lat': '{}'.format(lat),
'lng': '{}'.format(lon),
'cats': '3,1,2,4,5' # Includes donation sites
}
yield scrapy.http.FormRequest(url=url, method='POST', formdata=form_data, headers=headers,
callback=self.parse)
def parse(self, response):
data = json.loads(response.body_as_unicode())
for store in data:
service_codes = store.get("services")
store_categories = []
for code in service_codes:
store_categories.append(CATEGORY_MAPPING[code])
properties = {
'name': store["name"],
'ref': store["id"],
'addr_full': store["address1"],
'city': store["city"],
'state': store["state"],
'postcode': store["postal_code"],
'country': store["country"],
'phone': store.get("phone"),
'website': store.get("website") or response.url,
'lat': store.get("lat"),
'lon': store.get("lng"),
'extras': {
'service_codes': service_codes,
'store_categories': store_categories
}
}
yield GeojsonPointItem(**properties)
| 1,915 | 205 | 23 |
2ea619d0142a49aa8f384ca7d65a3b1adde4ebbc | 2,170 | py | Python | apps/core/manager.py | zjjott/html | 68429832d8b022602915a267a62051f4869f430f | [
"MIT"
] | null | null | null | apps/core/manager.py | zjjott/html | 68429832d8b022602915a267a62051f4869f430f | [
"MIT"
] | null | null | null | apps/core/manager.py | zjjott/html | 68429832d8b022602915a267a62051f4869f430f | [
"MIT"
] | null | null | null | # coding=utf-8
from __future__ import unicode_literals
import sys
from IPython import start_ipython
import argparse
from apps.core.models.client import Client
import apps.conf # flake8: noqa
from tornado.options import options
import subprocess
managers = [ShellManage, DBShellManage, # flake8: noqa
RedisManage,
]
| 26.790123 | 69 | 0.621659 | # coding=utf-8
from __future__ import unicode_literals
import sys
from IPython import start_ipython
import argparse
from apps.core.models.client import Client
import apps.conf # flake8: noqa
from tornado.options import options
import subprocess
class BaseManage(object):
name = None # 启动子命令
doc = "" # 会用这个属性作为子命令文档
def __init__(self, subparsers):
self.parser = subparsers.add_parser(self.name, help=self.doc)
self.add_arguments()
self.parser.set_defaults(func=self.start)
def add_arguments(self):
"""写一些启动参数或者不写"""
pass
def add_argument(self, *args, **kwargs):
self.parser.add_argument(*args, **kwargs)
def start(self, args):
raise NotImplementedError(
"manage need implement start func to start command")
class ShellManage(BaseManage):
name = "shell"
doc = "ipython shell with tornado env"
def add_arguments(self):
"""写一些启动参数或者不写"""
self.add_argument('params', nargs=argparse.REMAINDER,
help='other params to ipython')
def start(self, args):
sys.exit(start_ipython(args.params))
class DBShellManage(BaseManage):
name = "dbshell"
doc = "database shell client"
def add_arguments(self):
"""写一些启动参数或者不写"""
self.add_argument('params', nargs=argparse.REMAINDER,
help='other params to subprocess')
def start(self, args):
Client.runshell(args.params)
class RedisManage(BaseManage):
name = "redis"
doc = "start redis-cli"
def start(self, args):
args = ['redis-cli']
redis_options = options.cache_options
if "host" in redis_options:
args.append("-h")
args.append("%s" % redis_options['host'])
if "port" in redis_options:
args.append("-p")
args.append("%s" % redis_options['port'])
if "selected_db" in redis_options:
args.append("-n")
args.append("%s" % redis_options['selected_db'])
subprocess.call(args)
managers = [ShellManage, DBShellManage, # flake8: noqa
RedisManage,
]
| 893 | 940 | 92 |
4207937d1c51c6694182c6a70926068e88bb2a6f | 8,041 | py | Python | direct/src/task/FrameProfiler.py | cmarshall108/panda3d-python3 | 8bea2c0c120b03ec1c9fd179701fdeb7510bb97b | [
"PHP-3.0",
"PHP-3.01"
] | 3 | 2020-01-02T08:43:36.000Z | 2020-07-05T08:59:02.000Z | direct/src/task/FrameProfiler.py | cmarshall108/panda3d-python3 | 8bea2c0c120b03ec1c9fd179701fdeb7510bb97b | [
"PHP-3.0",
"PHP-3.01"
] | 20 | 2021-05-03T18:02:23.000Z | 2022-03-12T12:01:04.000Z | Lib/site-packages/direct/task/FrameProfiler.py | fochoao/cpython | 3dc84b260e5bced65ebc2c45c40c8fa65f9b5aa9 | [
"bzip2-1.0.6",
"0BSD"
] | 1 | 2021-04-09T00:02:59.000Z | 2021-04-09T00:02:59.000Z | from direct.directnotify.DirectNotifyGlobal import directNotify
from direct.fsm.StatePush import FunctionCall
from direct.showbase.PythonUtil import formatTimeExact, normalDistrib
from direct.task import Task
| 43 | 97 | 0.57667 | from direct.directnotify.DirectNotifyGlobal import directNotify
from direct.fsm.StatePush import FunctionCall
from direct.showbase.PythonUtil import formatTimeExact, normalDistrib
from direct.task import Task
class FrameProfiler:
notify = directNotify.newCategory('FrameProfiler')
# because of precision requirements, all times related to the profile/log
# schedule are stored as integers
Minute = 60
Hour = 60 * Minute
Day = 24 * Hour
def __init__(self):
Hour = FrameProfiler.Hour
# how long to wait between frame profiles
self._period = 2 * FrameProfiler.Minute
if config.GetBool('frequent-frame-profiles', 0):
self._period = 1 * FrameProfiler.Minute
# used to prevent profile from being taken exactly every 'period' seconds
self._jitterMagnitude = self._period * .75
# when to log output
# each entry must be an integer multiple of all previous entries
# as well as an integer multiple of the period
self._logSchedule = [ 1 * FrameProfiler.Hour,
4 * FrameProfiler.Hour,
12 * FrameProfiler.Hour,
1 * FrameProfiler.Day,
] # day schedule proceeds as 1, 2, 4, 8 days, etc.
if config.GetBool('frequent-frame-profiles', 0):
self._logSchedule = [ 1 * FrameProfiler.Minute,
4 * FrameProfiler.Minute,
12 * FrameProfiler.Minute,
24 * FrameProfiler.Minute,
]
for t in self._logSchedule:
#assert isInteger(t)
# make sure the period is evenly divisible into each element of the log schedule
assert (t % self._period) == 0
# make sure each element of the schedule is evenly divisible into each subsequent element
for i in range(len(self._logSchedule)):
e = self._logSchedule[i]
for j in range(i, len(self._logSchedule)):
assert (self._logSchedule[j] % e) == 0
#assert isInteger(self._period)
self._enableFC = FunctionCall(self._setEnabled, taskMgr.getProfileFramesSV())
self._enableFC.pushCurrentState()
def destroy(self):
self._enableFC.set(False)
self._enableFC.destroy()
def _setEnabled(self, enabled):
if enabled:
self.notify.info('frame profiler started')
self._startTime = globalClock.getFrameTime()
self._profileCounter = 0
self._jitter = None
self._period2aggregateProfile = {}
self._id2session = {}
self._id2task = {}
# don't profile process startup
self._task = taskMgr.doMethodLater(self._period, self._scheduleNextProfileDoLater,
'FrameProfilerStart-%s' % serialNum())
else:
self._task.remove()
del self._task
for session in self._period2aggregateProfile.values():
session.release()
del self._period2aggregateProfile
for task in self._id2task.values():
task.remove()
del self._id2task
for session in self._id2session.values():
session.release()
del self._id2session
self.notify.info('frame profiler stopped')
def _scheduleNextProfileDoLater(self, task):
self._scheduleNextProfile()
return task.done
def _scheduleNextProfile(self):
self._profileCounter += 1
self._timeElapsed = self._profileCounter * self._period
#assert isInteger(self._timeElapsed)
time = self._startTime + self._timeElapsed
# vary the actual delay between profiles by a random amount to prevent interaction
# with periodic events
jitter = self._jitter
if jitter is None:
jitter = normalDistrib(-self._jitterMagnitude, self._jitterMagnitude)
time += jitter
else:
time -= jitter
jitter = None
self._jitter = jitter
sessionId = serialNum()
session = taskMgr.getProfileSession('FrameProfile-%s' % sessionId)
self._id2session[sessionId] = session
taskMgr.profileFrames(num=1, session=session, callback=Functor(
self._analyzeResults, sessionId))
# schedule the next profile
delay = max(time - globalClock.getFrameTime(), 0.)
self._task = taskMgr.doMethodLater(delay, self._scheduleNextProfileDoLater,
'FrameProfiler-%s' % serialNum())
def _analyzeResults(self, sessionId):
# do the analysis in a task 1) to separate the processing from the profiled frame,
# and 2) to get the processing to show up in a named task instead of in the taskMgr
self._id2task[sessionId] = taskMgr.add(
Functor(self._doAnalysis, sessionId), 'FrameProfilerAnalysis-%s' % sessionId)
def _doAnalysis(self, sessionId, task):
if hasattr(task, '_generator'):
gen = task._generator
else:
gen = self._doAnalysisGen(sessionId)
task._generator = gen
result = next(gen)
if result == Task.done:
del task._generator
return result
def _doAnalysisGen(self, sessionId):
# generator to limit max number of profile loggings per frame
p2ap = self._period2aggregateProfile
self._id2task.pop(sessionId)
session = self._id2session.pop(sessionId)
if session.profileSucceeded():
# always add this profile to the first aggregated profile
period = self._logSchedule[0]
if period not in self._period2aggregateProfile:
p2ap[period] = session.getReference()
else:
p2ap[period].aggregate(session)
else:
self.notify.warning('frame profile did not succeed')
session.release()
session = None
counter = 0
# log profiles when it's time, and aggregate them upwards into the
# next-longer profile
for pi in range(len(self._logSchedule)):
period = self._logSchedule[pi]
if (self._timeElapsed % period) == 0:
if period in p2ap:
# delay until the next frame if we've already processed N profiles this frame
if counter >= 3:
counter = 0
yield Task.cont
self.notify.info('aggregate profile of sampled frames over last %s\n%s' %
(formatTimeExact(period), p2ap[period].getResults()))
counter += 1
# aggregate this profile into the next larger profile
nextIndex = pi + 1
if nextIndex >= len(self._logSchedule):
# if we're adding a new period to the end of the log period table,
# set it to double the duration of the current longest period
nextPeriod = period * 2
self._logSchedule.append(nextPeriod)
else:
nextPeriod = self._logSchedule[nextIndex]
if nextPeriod not in p2ap:
p2ap[nextPeriod] = p2ap[period].getReference()
else:
p2ap[nextPeriod].aggregate(p2ap[period])
# this profile is now represented in the next larger profile
# throw it out
p2ap[period].release()
del p2ap[period]
else:
# current time is not divisible evenly into selected period, and all higher
# periods are multiples of this one
break
yield Task.done
| 7,363 | 446 | 23 |
44878be67aae6a9ac74d9c0d731b8c221c71e809 | 2,329 | py | Python | src/tools/visum-transformer/read_infrastructure/src/read_infrastructure/util/lines.py | railtoolkit/OpenLinTim | 27eba8b6038946ce162e9f7bbc0bd23045029d51 | [
"MIT"
] | null | null | null | src/tools/visum-transformer/read_infrastructure/src/read_infrastructure/util/lines.py | railtoolkit/OpenLinTim | 27eba8b6038946ce162e9f7bbc0bd23045029d51 | [
"MIT"
] | null | null | null | src/tools/visum-transformer/read_infrastructure/src/read_infrastructure/util/lines.py | railtoolkit/OpenLinTim | 27eba8b6038946ce162e9f7bbc0bd23045029d51 | [
"MIT"
] | null | null | null | import logging
from core.model.graph import Graph
from core.model.lines import LinePool, Line
from core.model.ptn import Link, Stop
from common.model.net import Net
from common.util.constants import LINE_SECTION_HEADER, LINE_ROUTE_ITEMS_SECTION_HEADER, \
LINE_ROUTE_ITEMS_DIRECTION_HEADER, LINE_ROUTE_ITEMS_LINE_NAME_HEADER, LINE_ROUTE_ITEMS_NODE_HEADER
logger = logging.getLogger(__name__)
| 48.520833 | 124 | 0.719193 | import logging
from core.model.graph import Graph
from core.model.lines import LinePool, Line
from core.model.ptn import Link, Stop
from common.model.net import Net
from common.util.constants import LINE_SECTION_HEADER, LINE_ROUTE_ITEMS_SECTION_HEADER, \
LINE_ROUTE_ITEMS_DIRECTION_HEADER, LINE_ROUTE_ITEMS_LINE_NAME_HEADER, LINE_ROUTE_ITEMS_NODE_HEADER
logger = logging.getLogger(__name__)
def find_lines(net: Net, ptn: Graph[Stop, Link], cost_factor_line: float, cost_factor_length: float,
cost_factor_edge: float) -> LinePool:
stops_per_line = {} # Dict[str, List[Stop]]
stops_by_name = {} # Dict[str, Stop]
lines = {} # Dict[str, Line]
for stop in ptn.getNodes():
stops_by_name[stop.getShortName()] = stop
index = 1
for row in net.get_section(LINE_SECTION_HEADER).get_rows():
name = row[0]
stops_per_line[name] = []
lines[name] = Line(index, False, cost=cost_factor_line)
index += 1
line_route_section = net.get_section(LINE_ROUTE_ITEMS_SECTION_HEADER)
for row in line_route_section.get_rows():
if line_route_section.get_entry_from_row(row, LINE_ROUTE_ITEMS_DIRECTION_HEADER) is "<":
continue
name = line_route_section.get_entry_from_row(row, LINE_ROUTE_ITEMS_LINE_NAME_HEADER)
stop_name = line_route_section.get_entry_from_row(row, LINE_ROUTE_ITEMS_NODE_HEADER)
stops_per_line[name].append(stops_by_name[stop_name])
for line_name, stop_names in stops_per_line.items():
logger.debug("Found line {} with stops {}".format(line_name, stop_names))
for left_stop, right_stop in zip(stop_names, stop_names[1:]):
link = get_link(ptn, left_stop, right_stop)
lines[line_name].addLink(link, True, cost_factor_length, cost_factor_edge)
result = LinePool()
for line in lines.values():
result.addLine(line)
return result
def get_link(ptn: Graph[Stop, Link], left_stop: Stop, right_stop: Stop) -> Link:
candidates = ptn.getOutgoingEdges(left_stop)
for candidate in candidates:
if candidate.getRightNode() == right_stop or (not candidate.isDirected() and candidate.getLeftNode() == right_stop):
return candidate
raise RuntimeError("Could not find link between {} and {}".format(left_stop, right_stop)) | 1,886 | 0 | 46 |
bacc25571619ca1a153b7a8436183e857481c80b | 1,334 | py | Python | venv/Lib/site-packages/mcipc/rcon/je/commands/teleport.py | Svesnav2/Discord-Bot-Minecraft-server-status | ee34948e741930567a3adb557197523f9d32ace1 | [
"Unlicense"
] | null | null | null | venv/Lib/site-packages/mcipc/rcon/je/commands/teleport.py | Svesnav2/Discord-Bot-Minecraft-server-status | ee34948e741930567a3adb557197523f9d32ace1 | [
"Unlicense"
] | null | null | null | venv/Lib/site-packages/mcipc/rcon/je/commands/teleport.py | Svesnav2/Discord-Bot-Minecraft-server-status | ee34948e741930567a3adb557197523f9d32ace1 | [
"Unlicense"
] | null | null | null | """Implementation of the teleport command."""
from mcipc.rcon.client import Client
from mcipc.rcon.types import Anchor, Rotation, Vec3
__all__ = ['teleport']
def teleport(self: Client, *, destination: str = None, location: Vec3 = None,
targets: str = None, rotation: Rotation = None,
facing_location: Vec3 = None, facing_entity: str = None,
facing_anchor: Anchor = None) -> str:
"""Teleports the player."""
command = ['teleport']
if targets is not None:
command.append(targets)
if location is not None:
command.append(location)
if rotation is not None:
command.append(rotation)
elif facing_location is not None:
command += ['facing', facing_location]
elif facing_entity is not None:
command += ['facing', 'entity', facing_entity, facing_anchor]
elif destination is not None:
command.append(destination)
else:
raise TypeError('Must specify either destination or location.')
elif destination is not None:
command.append(destination)
elif location is not None:
command.append(location)
else:
raise TypeError('Must specify destination, location or targets.')
return self.run(*command)
| 31.761905 | 77 | 0.621439 | """Implementation of the teleport command."""
from mcipc.rcon.client import Client
from mcipc.rcon.types import Anchor, Rotation, Vec3
__all__ = ['teleport']
def teleport(self: Client, *, destination: str = None, location: Vec3 = None,
targets: str = None, rotation: Rotation = None,
facing_location: Vec3 = None, facing_entity: str = None,
facing_anchor: Anchor = None) -> str:
"""Teleports the player."""
command = ['teleport']
if targets is not None:
command.append(targets)
if location is not None:
command.append(location)
if rotation is not None:
command.append(rotation)
elif facing_location is not None:
command += ['facing', facing_location]
elif facing_entity is not None:
command += ['facing', 'entity', facing_entity, facing_anchor]
elif destination is not None:
command.append(destination)
else:
raise TypeError('Must specify either destination or location.')
elif destination is not None:
command.append(destination)
elif location is not None:
command.append(location)
else:
raise TypeError('Must specify destination, location or targets.')
return self.run(*command)
| 0 | 0 | 0 |
36dd5b81f7f62cd69ec877d923d43b280523fb8d | 4,046 | py | Python | scripts/function_puller.py | alcuadrado/eth2.0-specs | f55499c0baad5d22a6f7749a9153374c7edf9267 | [
"CC0-1.0"
] | null | null | null | scripts/function_puller.py | alcuadrado/eth2.0-specs | f55499c0baad5d22a6f7749a9153374c7edf9267 | [
"CC0-1.0"
] | null | null | null | scripts/function_puller.py | alcuadrado/eth2.0-specs | f55499c0baad5d22a6f7749a9153374c7edf9267 | [
"CC0-1.0"
] | null | null | null | import re
from typing import Dict, Tuple, NewType
FUNCTION_REGEX = r'^def [\w_]*'
BEGIN_INSERT_REGEX = r'# begin insert '
END_INSERT_REGEX = r'# end insert'
SpecObject = NewType('SpecObjects', Tuple[Dict[str, str], Dict[str, str], Dict[str, str], Dict[str, str]])
def get_spec(file_name: str) -> SpecObject:
"""
Takes in the file name of a spec.md file, opens it and returns the following objects:
functions = {function_name: function_code}
constants= {constant_name: constant_code}
ssz_objects= {object_name: object}
inserts= {insert_tag: code to be inserted}
Note: This function makes heavy use of the inherent ordering of dicts,
if this is not supported by your python version, it will not work.
"""
pulling_from = None # line number of start of latest object
current_name = None # most recent section title
insert_name = None # stores the label of the current insert object
functions = {}
constants = {}
ssz_objects = {}
inserts = {}
function_matcher = re.compile(FUNCTION_REGEX)
inserts_matcher = re.compile(BEGIN_INSERT_REGEX)
is_ssz = False
custom_types = {}
for linenum, line in enumerate(open(file_name).readlines()):
line = line.rstrip()
if pulling_from is None and len(line) > 0 and line[0] == '#' and line[-1] == '`':
current_name = line[line[:-1].rfind('`') + 1: -1]
if line[:9] == '```python':
assert pulling_from is None
pulling_from = linenum + 1
elif line[:3] == '```':
pulling_from = None
elif inserts_matcher.match(line) is not None:
# Find @insert names
insert_name = re.search(r'@[\w]*', line).group(0)
elif insert_name is not None:
# In insert mode, either the next line is more code, or the end of the insert
if re.match(END_INSERT_REGEX, line) is not None:
insert_name = None
else:
inserts[insert_name] = inserts.get(insert_name, '') + line + '\n'
else:
# Handle function definitions & ssz_objects
if pulling_from is not None:
# SSZ Object
if len(line) > 18 and line[:6] == 'class ' and line[-12:] == '(Container):':
name = line[6:-12]
# Check consistency with markdown header
assert name == current_name
is_ssz = True
# function definition
elif function_matcher.match(line) is not None:
current_name = function_matcher.match(line).group(0)
is_ssz = False
if is_ssz:
ssz_objects[current_name] = ssz_objects.get(current_name, '') + line + '\n'
else:
functions[current_name] = functions.get(current_name, '') + line + '\n'
# Handle constant and custom types table entries
elif pulling_from is None and len(line) > 0 and line[0] == '|':
row = line[1:].split('|')
if len(row) >= 2:
for i in range(2):
row[i] = row[i].strip().strip('`')
if '`' in row[i]:
row[i] = row[i][:row[i].find('`')]
if row[1].startswith('uint') or row[1].startswith('Bytes'):
custom_types[row[0]] = row[1]
else:
eligible = True
if row[0][0] not in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ_':
eligible = False
for c in row[0]:
if c not in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ_0123456789':
eligible = False
if eligible:
constants[row[0]] = row[1].replace('**TBD**', '0x1234567890123456789012345678901234567890')
return functions, custom_types, constants, ssz_objects, inserts
| 45.460674 | 119 | 0.542511 | import re
from typing import Dict, Tuple, NewType
FUNCTION_REGEX = r'^def [\w_]*'
BEGIN_INSERT_REGEX = r'# begin insert '
END_INSERT_REGEX = r'# end insert'
SpecObject = NewType('SpecObjects', Tuple[Dict[str, str], Dict[str, str], Dict[str, str], Dict[str, str]])
def get_spec(file_name: str) -> SpecObject:
"""
Takes in the file name of a spec.md file, opens it and returns the following objects:
functions = {function_name: function_code}
constants= {constant_name: constant_code}
ssz_objects= {object_name: object}
inserts= {insert_tag: code to be inserted}
Note: This function makes heavy use of the inherent ordering of dicts,
if this is not supported by your python version, it will not work.
"""
pulling_from = None # line number of start of latest object
current_name = None # most recent section title
insert_name = None # stores the label of the current insert object
functions = {}
constants = {}
ssz_objects = {}
inserts = {}
function_matcher = re.compile(FUNCTION_REGEX)
inserts_matcher = re.compile(BEGIN_INSERT_REGEX)
is_ssz = False
custom_types = {}
for linenum, line in enumerate(open(file_name).readlines()):
line = line.rstrip()
if pulling_from is None and len(line) > 0 and line[0] == '#' and line[-1] == '`':
current_name = line[line[:-1].rfind('`') + 1: -1]
if line[:9] == '```python':
assert pulling_from is None
pulling_from = linenum + 1
elif line[:3] == '```':
pulling_from = None
elif inserts_matcher.match(line) is not None:
# Find @insert names
insert_name = re.search(r'@[\w]*', line).group(0)
elif insert_name is not None:
# In insert mode, either the next line is more code, or the end of the insert
if re.match(END_INSERT_REGEX, line) is not None:
insert_name = None
else:
inserts[insert_name] = inserts.get(insert_name, '') + line + '\n'
else:
# Handle function definitions & ssz_objects
if pulling_from is not None:
# SSZ Object
if len(line) > 18 and line[:6] == 'class ' and line[-12:] == '(Container):':
name = line[6:-12]
# Check consistency with markdown header
assert name == current_name
is_ssz = True
# function definition
elif function_matcher.match(line) is not None:
current_name = function_matcher.match(line).group(0)
is_ssz = False
if is_ssz:
ssz_objects[current_name] = ssz_objects.get(current_name, '') + line + '\n'
else:
functions[current_name] = functions.get(current_name, '') + line + '\n'
# Handle constant and custom types table entries
elif pulling_from is None and len(line) > 0 and line[0] == '|':
row = line[1:].split('|')
if len(row) >= 2:
for i in range(2):
row[i] = row[i].strip().strip('`')
if '`' in row[i]:
row[i] = row[i][:row[i].find('`')]
if row[1].startswith('uint') or row[1].startswith('Bytes'):
custom_types[row[0]] = row[1]
else:
eligible = True
if row[0][0] not in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ_':
eligible = False
for c in row[0]:
if c not in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ_0123456789':
eligible = False
if eligible:
constants[row[0]] = row[1].replace('**TBD**', '0x1234567890123456789012345678901234567890')
return functions, custom_types, constants, ssz_objects, inserts
| 0 | 0 | 0 |
d8bf094e28fe8390c3d84fdfd1c20be429c49d9e | 98 | py | Python | enthought/envisage/ui/action/action_set.py | enthought/etsproxy | 4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347 | [
"BSD-3-Clause"
] | 3 | 2016-12-09T06:05:18.000Z | 2018-03-01T13:00:29.000Z | enthought/envisage/ui/action/action_set.py | enthought/etsproxy | 4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347 | [
"BSD-3-Clause"
] | 1 | 2020-12-02T00:51:32.000Z | 2020-12-02T08:48:55.000Z | enthought/envisage/ui/action/action_set.py | enthought/etsproxy | 4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347 | [
"BSD-3-Clause"
] | null | null | null | # proxy module
from __future__ import absolute_import
from envisage.ui.action.action_set import *
| 24.5 | 43 | 0.836735 | # proxy module
from __future__ import absolute_import
from envisage.ui.action.action_set import *
| 0 | 0 | 0 |
f048d9e1dc62c096862cd11bad90b0b911d6cdc4 | 8,176 | py | Python | optimizers/tensorflow/LAMB.py | AlanFokCo/compensation-tools | e3fbf2f583ff370d32ffa0e2b6a0c57c20ca9eb0 | [
"Apache-2.0"
] | null | null | null | optimizers/tensorflow/LAMB.py | AlanFokCo/compensation-tools | e3fbf2f583ff370d32ffa0e2b6a0c57c20ca9eb0 | [
"Apache-2.0"
] | null | null | null | optimizers/tensorflow/LAMB.py | AlanFokCo/compensation-tools | e3fbf2f583ff370d32ffa0e2b6a0c57c20ca9eb0 | [
"Apache-2.0"
] | null | null | null | import warnings
from typing import Optional, Union, Callable, List
from typeguard import typechecked
import tensorflow as tf
from tensorflow_addons.utils.types import FloatTensorLike
from tensorflow_addons.optimizers.utils import is_variable_matched_by_regexes
@tf.keras.utils.register_keras_serializable(package="Addons")
class LAMB(tf.keras.optimizers.Optimizer):
"""Optimizer that implements the Layer-wise Adaptive Moments (LAMB).
See paper [Large Batch Optimization for Deep Learning: Training BERT
in 76 minutes](https://arxiv.org/abs/1904.00962).
"""
@typechecked
def _do_use_weight_decay(self, variable):
"""Whether to use L2 weight decay for `param_name`."""
return not is_variable_matched_by_regexes(
variable, self.exclude_from_weight_decay
)
def _do_layer_adaptation(self, variable):
"""Whether to do layer-wise learning rate adaptation for
`param_name`."""
return not is_variable_matched_by_regexes(
variable, self.exclude_from_layer_adaptation
) | 41.085427 | 84 | 0.625245 | import warnings
from typing import Optional, Union, Callable, List
from typeguard import typechecked
import tensorflow as tf
from tensorflow_addons.utils.types import FloatTensorLike
from tensorflow_addons.optimizers.utils import is_variable_matched_by_regexes
@tf.keras.utils.register_keras_serializable(package="Addons")
class LAMB(tf.keras.optimizers.Optimizer):
"""Optimizer that implements the Layer-wise Adaptive Moments (LAMB).
See paper [Large Batch Optimization for Deep Learning: Training BERT
in 76 minutes](https://arxiv.org/abs/1904.00962).
"""
@typechecked
def __init__(
self,
learning_rate: Union[FloatTensorLike, Callable] = 0.001,
beta_1: FloatTensorLike = 0.9,
beta_2: FloatTensorLike = 0.999,
epsilon: FloatTensorLike = 1e-6,
weight_decay: FloatTensorLike = 0.0,
exclude_from_weight_decay: Optional[List[str]] = None,
exclude_from_layer_adaptation: Optional[List[str]] = None,
name: str = "LAMB",
**kwargs,
):
if "weight_decay_rate" in kwargs:
warnings.warn(
"weight_decay_rate has been renamed to weight_decay,"
"and will be deprecated in Addons 0.18.",
DeprecationWarning,
)
weight_decay = kwargs["weight_decay_rate"]
del kwargs["weight_decay_rate"]
super().__init__(name, **kwargs)
self._set_hyper("weight_decay", weight_decay)
self._set_hyper("learning_rate", kwargs.get("lr", learning_rate))
# This is learning rate decay for using keras learning rate schedule.
self._set_hyper("decay", self._initial_decay)
self._set_hyper("beta_1", beta_1)
self._set_hyper("beta_2", beta_2)
self.epsilon = epsilon or tf.backend_config.epsilon()
self.exclude_from_weight_decay = exclude_from_weight_decay
# exclude_from_layer_adaptation is set to exclude_from_weight_decay if
# the arg is None.
if exclude_from_layer_adaptation:
self.exclude_from_layer_adaptation = exclude_from_layer_adaptation
else:
self.exclude_from_layer_adaptation = exclude_from_weight_decay
def _create_slots(self, var_list):
# Create slots for the first and second moments.
# Separate for-loops to respect the ordering of slot variables from v1.
for var in var_list:
self.add_slot(var, "m")
for var in var_list:
self.add_slot(var, "v")
def _prepare_local(self, var_device, var_dtype, apply_state):
super()._prepare_local(var_device, var_dtype, apply_state)
local_step = tf.cast(self.iterations + 1, var_dtype)
beta_1_t = tf.identity(self._get_hyper("beta_1", var_dtype))
beta_2_t = tf.identity(self._get_hyper("beta_2", var_dtype))
weight_decay = tf.identity(self._get_hyper("weight_decay", var_dtype))
beta_1_power = tf.pow(beta_1_t, local_step)
beta_2_power = tf.pow(beta_2_t, local_step)
apply_state[(var_device, var_dtype)].update(
dict(
weight_decay=weight_decay,
epsilon=tf.convert_to_tensor(self.epsilon, var_dtype),
beta_1_t=beta_1_t,
beta_1_power=beta_1_power,
one_minus_beta_1_t=1 - beta_1_t,
beta_2_t=beta_2_t,
beta_2_power=beta_2_power,
one_minus_beta_2_t=1 - beta_2_t,
)
)
def _resource_apply_dense(self, grad, var, apply_state=None):
var_device, var_dtype = var.device, var.dtype.base_dtype
coefficients = (apply_state or {}).get(
(var_device, var_dtype)
) or self._fallback_apply_state(var_device, var_dtype)
# m_t = beta1 * m + (1 - beta1) * g_t
m = self.get_slot(var, "m")
m_scaled_g_values = grad * coefficients["one_minus_beta_1_t"]
m_t = m * coefficients["beta_1_t"] + m_scaled_g_values
m_t = m.assign(m_t, use_locking=self._use_locking)
# v_t = beta2 * v + (1 - beta2) * (g_t * g_t)
v = self.get_slot(var, "v")
v_scaled_g_values = (grad * grad) * coefficients["one_minus_beta_2_t"]
v_t = v * coefficients["beta_2_t"] + v_scaled_g_values
v_t = v.assign(v_t, use_locking=self._use_locking)
m_t_hat = m_t / (1.0 - coefficients["beta_1_power"])
v_t_hat = v_t / (1.0 - coefficients["beta_2_power"])
v_sqrt = tf.sqrt(v_t_hat)
update = m_t_hat / (v_sqrt + coefficients["epsilon"])
if self._do_use_weight_decay(var):
update += coefficients["weight_decay"] * var
ratio = 1.0
if self._do_layer_adaptation(var):
w_norm = tf.norm(var, ord=2)
g_norm = tf.norm(update, ord=2)
ratio = tf.where(
tf.greater(w_norm, 0),
tf.where(tf.greater(g_norm, 0), (w_norm / g_norm), 1.0),
1.0,
)
var_update = var - ratio * coefficients["lr_t"] * update
return var.assign(var_update, use_locking=self._use_locking)
def _resource_apply_sparse(self, grad, var, indices, apply_state=None):
var_device, var_dtype = var.device, var.dtype.base_dtype
coefficients = (apply_state or {}).get(
(var_device, var_dtype)
) or self._fallback_apply_state(var_device, var_dtype)
# m_t = beta1 * m + (1 - beta1) * g_t
m = self.get_slot(var, "m")
m_scaled_g_values = grad * coefficients["one_minus_beta_1_t"]
m_t = m.assign(m * coefficients["beta_1_t"], use_locking=self._use_locking)
with tf.control_dependencies([m_t]):
m_t = self._resource_scatter_add(m, indices, m_scaled_g_values)
# v_t = beta2 * v + (1 - beta2) * (g_t * g_t)
v = self.get_slot(var, "v")
v_scaled_g_values = (grad * grad) * coefficients["one_minus_beta_2_t"]
v_t = v.assign(v * coefficients["beta_2_t"], use_locking=self._use_locking)
with tf.control_dependencies([v_t]):
v_t = self._resource_scatter_add(v, indices, v_scaled_g_values)
m_t_hat = m_t / (1.0 - coefficients["beta_1_power"])
v_t_hat = v_t / (1.0 - coefficients["beta_2_power"])
v_sqrt = tf.sqrt(v_t_hat)
update = m_t_hat / (v_sqrt + coefficients["epsilon"])
if self._do_use_weight_decay(var):
update += coefficients["weight_decay"] * var
ratio = 1.0
if self._do_layer_adaptation(var):
w_norm = tf.norm(var, ord=2)
g_norm = tf.norm(update, ord=2)
ratio = tf.where(
tf.greater(w_norm, 0),
tf.where(tf.greater(g_norm, 0), (w_norm / g_norm), 1.0),
1.0,
)
var_update = var.assign_sub(
ratio * coefficients["lr_t"] * update, use_locking=self._use_locking
)
return tf.group(*[var_update, m_t, v_t])
def get_config(self):
config = super().get_config()
config.update(
{
"learning_rate": self._serialize_hyperparameter("learning_rate"),
"weight_decay": self._serialize_hyperparameter("weight_decay"),
"decay": self._serialize_hyperparameter("decay"),
"beta_1": self._serialize_hyperparameter("beta_1"),
"beta_2": self._serialize_hyperparameter("beta_2"),
"epsilon": self.epsilon,
"exclude_from_weight_decay": self.exclude_from_weight_decay,
"exclude_from_layer_adaptation": self.exclude_from_layer_adaptation,
}
)
return config
def _do_use_weight_decay(self, variable):
"""Whether to use L2 weight decay for `param_name`."""
return not is_variable_matched_by_regexes(
variable, self.exclude_from_weight_decay
)
def _do_layer_adaptation(self, variable):
"""Whether to do layer-wise learning rate adaptation for
`param_name`."""
return not is_variable_matched_by_regexes(
variable, self.exclude_from_layer_adaptation
) | 6,941 | 0 | 161 |
6a1c23894572a4e4e41bf47b5f43b2d3243c094a | 521 | py | Python | MainFiles/pythion/Assignment1_1_answer.py | Morris-wambua/pyKid | 99765ee9ee337efc2ba1f4be4007f34df0e6a8f1 | [
"MIT"
] | null | null | null | MainFiles/pythion/Assignment1_1_answer.py | Morris-wambua/pyKid | 99765ee9ee337efc2ba1f4be4007f34df0e6a8f1 | [
"MIT"
] | null | null | null | MainFiles/pythion/Assignment1_1_answer.py | Morris-wambua/pyKid | 99765ee9ee337efc2ba1f4be4007f34df0e6a8f1 | [
"MIT"
] | null | null | null | for i in range(1,26):
print('{0:5d}{1:5d}{2:6d}{3:6d}{4:6d}{5:6d}{6:6d}{7:6d}'.format(i,i*i,i+25,(i+25)*(i+25),i+50,(i+50)*(i+50),i+75,(i+75)*(i+75)))
# the characters in the curly brackets {} are placeholders for the data.
# the character before the colon indicates which of the data pieces should go where.
# the character after contains formatting information.
# in this case the number is the width of the filed and the d means decimal integer
# there are several other ways to achieve the same or similar result | 65.125 | 132 | 0.708253 | for i in range(1,26):
print('{0:5d}{1:5d}{2:6d}{3:6d}{4:6d}{5:6d}{6:6d}{7:6d}'.format(i,i*i,i+25,(i+25)*(i+25),i+50,(i+50)*(i+50),i+75,(i+75)*(i+75)))
# the characters in the curly brackets {} are placeholders for the data.
# the character before the colon indicates which of the data pieces should go where.
# the character after contains formatting information.
# in this case the number is the width of the filed and the d means decimal integer
# there are several other ways to achieve the same or similar result | 0 | 0 | 0 |
60759756c71a6da5c53813749d3da3fe55759688 | 3,105 | py | Python | utils_mod.py | innovator-zero/CS410_AI_Project2 | 2d33eb43274dcf6875f48b656ab7c7504ad2f7fa | [
"MIT"
] | null | null | null | utils_mod.py | innovator-zero/CS410_AI_Project2 | 2d33eb43274dcf6875f48b656ab7c7504ad2f7fa | [
"MIT"
] | null | null | null | utils_mod.py | innovator-zero/CS410_AI_Project2 | 2d33eb43274dcf6875f48b656ab7c7504ad2f7fa | [
"MIT"
] | null | null | null | import numpy as np
from multiprocessing import Pool
import glob
import os
import pandas as pd
import tensorflow as tf
# MAP
# restrict on @K is in ap_at_n
# data load generator
| 31.363636 | 111 | 0.571014 | import numpy as np
from multiprocessing import Pool
import glob
import os
import pandas as pd
import tensorflow as tf
def ap_at_10(data):
# based on https://github.com/google/youtube-8m/blob/master/average_precision_calculator.py
predictions, actuals = data
n = 10
total_num_positives = None
if len(predictions) != len(actuals):
raise ValueError("the shape of predictions and actuals does not match.")
if n is not None:
if not isinstance(n, int) or n <= 0:
raise ValueError("n must be 'None' or a positive integer."
" It was '%s'." % n)
ap = 0.0
sortidx = np.argsort(predictions)[::-1]
if total_num_positives is None:
numpos = np.size(np.where(actuals > 0))
else:
numpos = total_num_positives
if numpos == 0:
return 0
if n is not None:
numpos = min(numpos, n)
delta_recall = 1.0 / numpos
poscount = 0.0
# calculate the ap
r = len(sortidx)
if n is not None:
r = min(r, n)
for i in range(r):
if actuals[sortidx[i]] > 0:
poscount += 1
ap += poscount / (i + 1) * delta_recall
return ap
# MAP
# restrict on @K is in ap_at_n
def MAP_at_10(pred, actual):
lst = zip(list(pred), list(actual))
with Pool() as pool:
all = pool.map(ap_at_10, lst)
return np.mean(all)
def trans_in(label,file):
new_label=file[label]
return new_label
# data load generator
def tf_itr(tp='test', batch=1024, label_num=3862, FOLDER="", num=None):
file = pd.read_csv('Mapping_in.csv', sep=',')['NewLabel'].values
tfiles = sorted(glob.glob(os.path.join(FOLDER, tp, '*tfrecord')))
ids, aud, rgb, lbs = [], [], [], []
if num == None:
it = len(tfiles)
else:
it = num
for index_i in range(it):
fn = tfiles[index_i]
print("\rLoading files: [{0:50s}] {1:.1f}% ".format('#' * int((index_i + 1) / it * 50),
(index_i + 1) / it * 100), fn, end="", flush=True)
for example in tf.python_io.tf_record_iterator(fn):
tf_example = tf.train.Example.FromString(example)
ids.append(tf_example.features.feature['id'].bytes_list.value[0].decode(encoding='UTF-8'))
rgb.append(np.array(tf_example.features.feature['mean_rgb'].float_list.value).astype(float))
aud.append(np.array(tf_example.features.feature['mean_audio'].float_list.value).astype(float))
yss = np.array(tf_example.features.feature['labels'].int64_list.value)
yss_new=trans_in(yss,file)
out = np.zeros(label_num).astype(np.int8)
for y in yss_new:
out[y] = 1
lbs.append(out)
if len(ids) >= batch:
yield np.array(ids), np.array(aud), np.array(rgb), np.array(lbs)
ids, aud, rgb, lbs = [], [], [], []
if index_i + 1 == it:
yield np.array(ids), np.array(aud), np.array(rgb), np.array(lbs)
ids, aud, rgb, lbs = [], [], [], []
| 2,833 | 0 | 90 |
d9bd8a4e9880845e45b6e63860e014baf705d1c0 | 76 | py | Python | hucrl/agent/__init__.py | lioritan/hucrl | fb5b3aea0edf69a4169ab79d2908918111fc6072 | [
"MIT"
] | 24 | 2020-12-05T20:10:27.000Z | 2022-03-25T10:00:51.000Z | hucrl/agent/__init__.py | lioritan/hucrl | fb5b3aea0edf69a4169ab79d2908918111fc6072 | [
"MIT"
] | 3 | 2021-02-04T10:07:30.000Z | 2021-06-02T15:25:18.000Z | hucrl/agent/__init__.py | lioritan/hucrl | fb5b3aea0edf69a4169ab79d2908918111fc6072 | [
"MIT"
] | 4 | 2021-04-04T12:36:40.000Z | 2022-03-26T10:37:55.000Z | """Python Script Template."""
from .model_based import MBMPOAgent, MPCAgent
| 25.333333 | 45 | 0.776316 | """Python Script Template."""
from .model_based import MBMPOAgent, MPCAgent
| 0 | 0 | 0 |
773f2dc30158c6d31ef821abf78ea88a059a38bd | 5,973 | py | Python | scripts/db_to_bundle.py | adolgert/cascade | 2084e07c9ee5e901dd407b817220de882c7246a3 | [
"MIT"
] | null | null | null | scripts/db_to_bundle.py | adolgert/cascade | 2084e07c9ee5e901dd407b817220de882c7246a3 | [
"MIT"
] | null | null | null | scripts/db_to_bundle.py | adolgert/cascade | 2084e07c9ee5e901dd407b817220de882c7246a3 | [
"MIT"
] | null | null | null | import argparse
import os
import re
from pathlib import Path
import numpy as np
import pandas as pd
from cascade.dismod.constants import DensityEnum, IntegrandEnum
from cascade.dismod.db.wrapper import get_engine
from cascade.input_data.configuration.id_map import make_integrand_map
MEASURES_ACCEPTABLE_TO_ELMO = {
"prevalence",
"duration",
"yld",
"continuous",
"cfr",
"proportion",
"mtstandard",
"relrisk",
"incidence",
"tincidence",
"sincidence",
"remission",
"mtexcess",
"pmtexcess",
"mtwith",
"mtall",
"mtspecific",
"mtother",
}
MEASURE_ID_TO_CANONICAL_NAME = {
24: "acute_inc",
23: "acute_prev",
17: "cfr",
22: "chronic_prev",
19: "continuous",
2: "daly",
1: "death",
21: "diswght",
8: "duration",
45: "fertility",
28: "hale",
43: "haq_index",
6: "incidence",
26: "le",
37: "le_decomp",
30: "le_nsnh",
31: "le_nswh",
36: "lt_prevalence",
25: "mmr",
34: "mort_risk",
14: "mtall",
9: "mtexcess",
16: "mtother",
15: "mtspecific",
12: "mtstandard",
13: "mtwith",
38: "pini",
10: "pmtexcess",
27: "pod",
32: "pod_nsnh",
33: "pod_nswh",
44: "population",
5: "prevalence",
18: "proportion",
11: "relrisk",
7: "remission",
29: "sev",
41: "sincidence",
35: "st_prevalence",
20: "survival_rate",
39: "susceptible",
42: "tincidence",
40: "withc",
3: "yld",
4: "yll",
}
REQUIRED_COLUMNS = [
"bundle_id",
"seq",
"nid",
"underlying_nid",
"input_type",
"source_type",
"location_id",
"sex",
"year_start",
"year_end",
"age_start",
"age_end",
"measure",
"mean",
"lower",
"upper",
"standard_error",
"effective_sample_size",
"cases",
"sample_size",
"unit_type",
"unit_value_as_published",
"uncertainty_type",
"uncertainty_type_value",
"representative_name",
"urbanicity_type",
"recall_type",
"recall_type_value",
"sampling_type",
"group",
"specificity",
"group_review",
"is_outlier",
"design_effect",
]
DUMMY_VALUES = {
"nid": 119_796,
"source_type": "Unidentifiable",
"unit_type": "Person",
"unit_value_as_published": 1,
"representative_name": "Nationally representative only",
"urbanicity_type": "Mixed/both",
"recall_type": "Period: years",
"recall_type_value": 1,
"is_outlier": 0,
"response_rate": "",
}
if __name__ == "__main__":
main()
| 26.312775 | 110 | 0.61845 | import argparse
import os
import re
from pathlib import Path
import numpy as np
import pandas as pd
from cascade.dismod.constants import DensityEnum, IntegrandEnum
from cascade.dismod.db.wrapper import get_engine
from cascade.input_data.configuration.id_map import make_integrand_map
MEASURES_ACCEPTABLE_TO_ELMO = {
"prevalence",
"duration",
"yld",
"continuous",
"cfr",
"proportion",
"mtstandard",
"relrisk",
"incidence",
"tincidence",
"sincidence",
"remission",
"mtexcess",
"pmtexcess",
"mtwith",
"mtall",
"mtspecific",
"mtother",
}
MEASURE_ID_TO_CANONICAL_NAME = {
24: "acute_inc",
23: "acute_prev",
17: "cfr",
22: "chronic_prev",
19: "continuous",
2: "daly",
1: "death",
21: "diswght",
8: "duration",
45: "fertility",
28: "hale",
43: "haq_index",
6: "incidence",
26: "le",
37: "le_decomp",
30: "le_nsnh",
31: "le_nswh",
36: "lt_prevalence",
25: "mmr",
34: "mort_risk",
14: "mtall",
9: "mtexcess",
16: "mtother",
15: "mtspecific",
12: "mtstandard",
13: "mtwith",
38: "pini",
10: "pmtexcess",
27: "pod",
32: "pod_nsnh",
33: "pod_nswh",
44: "population",
5: "prevalence",
18: "proportion",
11: "relrisk",
7: "remission",
29: "sev",
41: "sincidence",
35: "st_prevalence",
20: "survival_rate",
39: "susceptible",
42: "tincidence",
40: "withc",
3: "yld",
4: "yll",
}
REQUIRED_COLUMNS = [
"bundle_id",
"seq",
"nid",
"underlying_nid",
"input_type",
"source_type",
"location_id",
"sex",
"year_start",
"year_end",
"age_start",
"age_end",
"measure",
"mean",
"lower",
"upper",
"standard_error",
"effective_sample_size",
"cases",
"sample_size",
"unit_type",
"unit_value_as_published",
"uncertainty_type",
"uncertainty_type_value",
"representative_name",
"urbanicity_type",
"recall_type",
"recall_type_value",
"sampling_type",
"group",
"specificity",
"group_review",
"is_outlier",
"design_effect",
]
DUMMY_VALUES = {
"nid": 119_796,
"source_type": "Unidentifiable",
"unit_type": "Person",
"unit_value_as_published": 1,
"representative_name": "Nationally representative only",
"urbanicity_type": "Mixed/both",
"recall_type": "Period: years",
"recall_type_value": 1,
"is_outlier": 0,
"response_rate": "",
}
def main():
readable_by_all = 0o0002
os.umask(readable_by_all)
parser = argparse.ArgumentParser()
parser.add_argument("input_file")
parser.add_argument("output_file")
parser.add_argument("bundle_id", type=int)
args = parser.parse_args()
engine = get_engine(Path(args.input_file))
data = pd.read_sql_query("select * from data", engine)
node_table = pd.read_sql_query("select * from node", engine)
covariate_table = pd.read_sql_query("select * from covariate", engine)
# For distributions other than gaussian remove the standard deviation
# which will cause Elmo to use Wilson Score Interval to estimate
# uncertainty from sample size and Theo says that's what we want
data.loc[data.density_id != DensityEnum.gaussian.value, "meas_std"] = np.nan
covariate_columns = [c for c in data.columns if c.startswith("x_")]
data = data[
[
"age_lower",
"age_upper",
"hold_out",
"node_id",
"time_lower",
"time_upper",
"sample_size",
"integrand_id",
"meas_value",
"meas_std",
]
+ covariate_columns
]
data = data.rename(
columns={
"age_lower": "age_start",
"age_upper": "age_end",
"time_lower": "year_start",
"time_upper": "year_end",
"meas_value": "mean",
"meas_std": "standard_error",
}
)
# Covariates
cov_pattern = re.compile("[sc]_(.*)_[^_]+")
dm_cov_to_gbd_study_cov = {
f"x_{r['covariate_id']}": f"cv_{cov_pattern.match(r['covariate_name']).group(1)}"
for _, r in covariate_table.iterrows()
if r["covariate_name"].startswith("s_")
}
for c in covariate_columns:
if c in dm_cov_to_gbd_study_cov:
data = data.rename(columns={c: dm_cov_to_gbd_study_cov[c]})
else:
data = data.drop(c, axis=1)
# Convert sex covariate to sex name
data["sex"] = data.cv_sex.apply(lambda c: {-0.5: "Female", 0.5: "Male", 0.0: "Both"}[c])
data = data.drop("cv_sex", axis=1)
# Convert nodes to locations
node_to_location = {r.node_id: r.c_location_id for _, r in node_table.iterrows()}
data["location_id"] = data.node_id.apply(lambda nid: node_to_location[nid])
data = data.drop("node_id", axis=1)
# Convert integrands to measures
integrand_to_measure = {v.value: MEASURE_ID_TO_CANONICAL_NAME[k] for k, v in make_integrand_map().items()}
# prevalence and incidence are special because they have more complicated relationships with integrands
# than other measures so clean them up
integrand_to_measure[IntegrandEnum.prevalence.value] = "prevalence"
integrand_to_measure[IntegrandEnum.Tincidence.value] = "tincidence"
integrand_to_measure[IntegrandEnum.Sincidence.value] = "sincidence"
data["measure"] = data.integrand_id.apply(integrand_to_measure.get)
data = data.drop("integrand_id", axis=1)
assert not set(data.measure.unique()) - MEASURES_ACCEPTABLE_TO_ELMO
# Add in the bundle_id
data = data.assign(bundle_id=args.bundle_id)
data = data.assign(**DUMMY_VALUES)
missing_columns = set(REQUIRED_COLUMNS) - set(data.columns)
data = data.assign(**{c: "" for c in missing_columns})
data.to_excel(args.output_file, "extraction", index=False)
if __name__ == "__main__":
main()
| 3,382 | 0 | 23 |
756c5c44c633b5ed7e03288193e68c4de5dc6001 | 169 | py | Python | src/llana_main.py | carchard/llana | 8f755ad4e0217f67ececb639b4cf2b32294496ce | [
"MIT"
] | null | null | null | src/llana_main.py | carchard/llana | 8f755ad4e0217f67ececb639b4cf2b32294496ce | [
"MIT"
] | null | null | null | src/llana_main.py | carchard/llana | 8f755ad4e0217f67ececb639b4cf2b32294496ce | [
"MIT"
] | null | null | null | # authors:
# connor archard
# geoff keating
#
# updated:
# 12-15-2016
#
# brief:
# main file for llana operation
if __name__ == "__main__":
print "Hello world" | 13 | 32 | 0.650888 | # authors:
# connor archard
# geoff keating
#
# updated:
# 12-15-2016
#
# brief:
# main file for llana operation
if __name__ == "__main__":
print "Hello world" | 0 | 0 | 0 |
861e85ffcb8b0f165a295f1b2f939988161d235d | 1,598 | py | Python | transform.py | oskar-j/walsh-hadamard-transform | cd7df03648225e08ac6736194d7d605aaf49626b | [
"MIT"
] | 5 | 2019-07-01T21:26:22.000Z | 2021-08-22T01:51:52.000Z | transform.py | oskar-j/walsh-hadamard-transform | cd7df03648225e08ac6736194d7d605aaf49626b | [
"MIT"
] | 1 | 2019-07-07T14:33:24.000Z | 2019-07-07T14:33:24.000Z | transform.py | oskar-j/walsh-hadamard-transform | cd7df03648225e08ac6736194d7d605aaf49626b | [
"MIT"
] | null | null | null | # coding: utf-8
from fal.task import Task
import numpy as np
from PIL import Image
from matplotlib.pyplot import imshow
from matplotlib.pyplot import hist
import matplotlib.pyplot as plt
if __name__ == '__main__':
# Let'c create compression task
task = Task()
# We define in 'data flow' that we wish to transform first
task.with_action('compress')
task.with_output('data/transformed.cim') # this is a raw file after transformation
# hence atypical filetype, as most of commercial tools won't be able to read this file
# Let's see the input file ourselves
input_img = Image.open('data/image.bmp')
hist(input_img.histogram(), bins=40)
# This is the input image
plt.rcParams["figure.figsize"] = (20, 9)
imshow(np.asarray(input_img))
# We're telling our framework where to find the image
task.with_input('data/image.bmp')
# Let's process it!
task.run()
# Now we're recreating image from the result
task = Task()
task.with_action('extract')
task.with_input('data/transformed.cim')
task.with_output('data/recreated.bmp')
# Run the process
# Check inverseTransform(self, src) in fal.transforms for more details
task.run()
# And let's see the results
output_image = Image.open('data/recreated.bmp')
plt.rcParams["figure.figsize"] = (20, 9)
imshow(np.asarray(output_image))
hist(output_image.histogram(), bins=40)
# As you can see, image after transformation has visible loss of quality
# and it's color profile (histogram for pixel counts) has changed as well
| 26.196721 | 90 | 0.695244 | # coding: utf-8
from fal.task import Task
import numpy as np
from PIL import Image
from matplotlib.pyplot import imshow
from matplotlib.pyplot import hist
import matplotlib.pyplot as plt
if __name__ == '__main__':
# Let'c create compression task
task = Task()
# We define in 'data flow' that we wish to transform first
task.with_action('compress')
task.with_output('data/transformed.cim') # this is a raw file after transformation
# hence atypical filetype, as most of commercial tools won't be able to read this file
# Let's see the input file ourselves
input_img = Image.open('data/image.bmp')
hist(input_img.histogram(), bins=40)
# This is the input image
plt.rcParams["figure.figsize"] = (20, 9)
imshow(np.asarray(input_img))
# We're telling our framework where to find the image
task.with_input('data/image.bmp')
# Let's process it!
task.run()
# Now we're recreating image from the result
task = Task()
task.with_action('extract')
task.with_input('data/transformed.cim')
task.with_output('data/recreated.bmp')
# Run the process
# Check inverseTransform(self, src) in fal.transforms for more details
task.run()
# And let's see the results
output_image = Image.open('data/recreated.bmp')
plt.rcParams["figure.figsize"] = (20, 9)
imshow(np.asarray(output_image))
hist(output_image.histogram(), bins=40)
# As you can see, image after transformation has visible loss of quality
# and it's color profile (histogram for pixel counts) has changed as well
| 0 | 0 | 0 |
969d383d8ad8e247a4bde60f2e4ef30b26b6f0ea | 5,763 | py | Python | clinicadl/clinicadl/tools/deep_learning/models/slice_level.py | CCF0211/AD-DL | f449db4b8370d0d31fd2c815def2e175bfd29cb9 | [
"MIT"
] | null | null | null | clinicadl/clinicadl/tools/deep_learning/models/slice_level.py | CCF0211/AD-DL | f449db4b8370d0d31fd2c815def2e175bfd29cb9 | [
"MIT"
] | null | null | null | clinicadl/clinicadl/tools/deep_learning/models/slice_level.py | CCF0211/AD-DL | f449db4b8370d0d31fd2c815def2e175bfd29cb9 | [
"MIT"
] | null | null | null | # coding: utf8
import torch.utils.model_zoo as model_zoo
from torchvision.models.resnet import BasicBlock
from torch import nn
import math
import timm
import urllib
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
import torch.nn.functional as F
__all__ = ['resnet18_2d', 'vit']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth'
}
def resnet18_2d(**kwargs):
"""
Construct a the ResNet-18 model with added dropout, FC and softmax layers.
:param kwargs:
:return:
"""
model = ResNetDesigner(BasicBlock, [2, 2, 2, 2], **kwargs)
try:
model.load_state_dict(model_zoo.load_url(model_urls['resnet18']), strict=False)
except Exception as err:
print("Error is:", err)
# raise ConnectionError('The URL %s may not be functional anymore. Check if it still exists or '
# 'if it has been moved.' % model_urls['resnet18'])
for p in model.parameters():
p.requires_grad = False
# fine-tune the 4-th residual block
for p in model.layer4.parameters():
p.requires_grad = True
# fine-tune the last FC layer
for p in model.fc.parameters():
p.requires_grad = True
# add a fc layer on top of the transfer_learning model and a softmax classifier
model.add_module('drop_out', nn.Dropout(p=kwargs["dropout"]))
model.add_module('fc_out', nn.Linear(1000, 2))
return model
| 33.9 | 104 | 0.618601 | # coding: utf8
import torch.utils.model_zoo as model_zoo
from torchvision.models.resnet import BasicBlock
from torch import nn
import math
import timm
import urllib
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
import torch.nn.functional as F
__all__ = ['resnet18_2d', 'vit']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth'
}
def vit(args, **kwargs):
model = timm.create_model(args.VIT_model_name, pretrained=True, num_classes=args.num_class)
return VitDesigner(model, args.VIT_model_name, args.reduce_method)
class interpolate(nn.Module):
def __init__(self, target_size):
super(interpolate, self).__init__()
self.target_size = target_size
def forward(self, x):
return F.interpolate(x, size=self.target_size)
def size_reduce(target_size=384, method='conv_avgpool'):
if method == 'conv_avgpool':
block = nn.Sequential(
nn.Conv2d(8, 3, kernel_size=3, stride=512 // target_size, padding=1),
nn.AdaptiveAvgPool2d(output_size=target_size)
)
elif method == 'conv_maxpool':
block = nn.Sequential(
nn.Conv2d(8, 3, kernel_size=3, stride=512 // target_size, padding=1),
nn.AdaptiveMaxPool2d(output_size=target_size),
)
elif method == 'interpolate':
block = nn.Sequential(
nn.Conv2d(8, 3, kernel_size=3, stride=1, padding=1),
interpolate(target_size=target_size)
)
return block
class VitDesigner(nn.Module):
def __init__(self, Vit, model_name, reduce_method='conv_avgpool'):
super(VitDesigner, self).__init__()
self.Vit = Vit
self.model_name = model_name
if '224' in self.model_name:
self.reduce_block = size_reduce(target_size=224, method=reduce_method)
elif '384' in self.model_name:
self.reduce_block = size_reduce(target_size=384, method=reduce_method)
else:
self.reduce_block = nn.Identity()
# config = resolve_data_config({}, model=self.Vit)
# self.transform = create_transform(**config)
def forward(self, x): # B, 8, 512, 512
x = self.reduce_block(x) # B, 3, target_size, target_size
# x = Image.fromarray(x).convert('RGB')
# x = self.transform(x).unsqueeze(0)
out = self.Vit(x)
return out
def resnet18_2d(**kwargs):
"""
Construct a the ResNet-18 model with added dropout, FC and softmax layers.
:param kwargs:
:return:
"""
model = ResNetDesigner(BasicBlock, [2, 2, 2, 2], **kwargs)
try:
model.load_state_dict(model_zoo.load_url(model_urls['resnet18']), strict=False)
except Exception as err:
print("Error is:", err)
# raise ConnectionError('The URL %s may not be functional anymore. Check if it still exists or '
# 'if it has been moved.' % model_urls['resnet18'])
for p in model.parameters():
p.requires_grad = False
# fine-tune the 4-th residual block
for p in model.layer4.parameters():
p.requires_grad = True
# fine-tune the last FC layer
for p in model.fc.parameters():
p.requires_grad = True
# add a fc layer on top of the transfer_learning model and a softmax classifier
model.add_module('drop_out', nn.Dropout(p=kwargs["dropout"]))
model.add_module('fc_out', nn.Linear(1000, 2))
return model
class ResNetDesigner(nn.Module):
def __init__(self, block, layers, num_classes=1000, **kwargs):
self.inplanes = 64
super(ResNetDesigner, self).__init__()
self.conv1 = nn.Conv2d(8, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AvgPool2d(7, stride=1)
self.fc = nn.Linear(51200, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
# Added top FC layer
x = self.drop_out(x)
x = self.fc_out(x)
return x
| 3,918 | 27 | 302 |
7dd3626fa9166bf1afb27b484c0c537345831354 | 12,541 | py | Python | tools/data_builder.py | cmenedes/police-brutality | a978046291015716a4972b478a01fe192f5f5ea1 | [
"MIT"
] | 1 | 2020-09-30T18:43:31.000Z | 2020-09-30T18:43:31.000Z | tools/data_builder.py | cmenedes/police-brutality | a978046291015716a4972b478a01fe192f5f5ea1 | [
"MIT"
] | null | null | null | tools/data_builder.py | cmenedes/police-brutality | a978046291015716a4972b478a01fe192f5f5ea1 | [
"MIT"
] | null | null | null | import csv
import glob
import json
import os
import re
import copy
from dateutil.parser import parse
from datetime import datetime, timezone
# `or '.'` because when you're in the same directory as this code
# `ValueError: no path specified` gets thrown by `relpath` with empty input
src_dir = os.path.relpath(os.path.dirname(__file__) or ".")
possible_tags_path = os.path.join(src_dir, "..", "docs/possible_tags.md")
md_dir = os.path.join(src_dir, "..", "reports")
out_dir = os.path.join(src_dir, "data_build")
combined_fpath = os.path.join(out_dir, "all-locations.md")
csv_fpath_v1 = os.path.join(out_dir, "all-locations.csv")
json_fpath_v1 = os.path.join(out_dir, "all-locations.json")
json_fpath_v2 = os.path.join(out_dir, "all-locations-v2.json")
readme_fpath = os.path.join(out_dir, "README.md")
if not os.path.exists(out_dir):
os.mkdir(out_dir)
date_regex = re.compile(
r"(Jan(uary)?|Feb(ruary)?|Mar(ch)?|Apr(il)?|May|Jun(e)?|"
r"Jul(y)?|Aug(ust)?|Sep(tember)?|Oct(ober)?|Nov(ember)?|"
r"Dec(ember)?)\s+\d{1,2}"
)
url_regex = re.compile(
r"(http|ftp|https):\/\/([\w\-_]+(?:(?:\.[\w\-_]+)+))" r"([\w\-\.,@?^=%&:/~\+#\!]*[\w\-\@?^=%&/~\+#\!])?"
)
# Regex is used to ensure that lat/long is both in a valid format has has 6-7 decimal places (or is an exact 90/180) to improve data quality on the backend
LAT_REGEX = re.compile(r"^\(?([-+]?(?:[1-8]?\d(?:\.\d{6,7})|90(?:\.0+)?)),")
LONG_REGEX = re.compile(r".*,\s*([-+]?(?:180(?:\.0+)?|(?:(?:1[0-7]\d)|(?:[1-9]?\d))(?:\.\d{6,7})))\)?$")
def find_md_link_or_url(text):
"""
find_md_link_or_url('ab[cd](ef)xy') returns:
('abcdxy', 'ef')
All the text goes into the text, and the URL as well.
"""
start = (0,)
open_sq = (1,)
closed_sq = (2,)
open_curve = (3,)
closed_curve = (4,)
state = start
text_content = ""
link_url = ""
for ch in text:
if state == start:
if ch == "[":
state = open_sq
else:
text_content += ch
elif state == open_sq:
if ch == "]":
state = closed_sq
else:
text_content += ch
elif state == closed_sq:
if ch == "(":
state = open_curve
else:
text_content += ch
elif state == open_curve:
if ch == ")":
state = closed_curve
else:
link_url += ch
elif state == closed_curve:
text_content += ch
if len(link_url) == 0:
# no markdown link found, consider it all one url
link_url = text_content
text_content = ""
return text_content.strip(), link_url.strip()
updated_at = datetime.now(timezone.utc).isoformat()
md_header = f"""
GENERATED FILE, PLEASE MAKE EDITS ON MASTER AT https://github.com/2020PB/police-brutality/
UPDATED AT: {updated_at}
"""
md_out_format = """
# {location}
{text}
"""
readme_text = """
# /r/2020PoliceBrutality/ dataset
This repository exists to accumulate and contextualize evidence of police brutality during the 2020 George Floyd protests.
Our goal in doing this is to assist journalists, politicians, prosecutors, activists and concerned individuals who can use the evidence accumulated here for political campaigns, news reporting, public education and prosecution of criminal police officers.
* This branch is just the files generated by parsing the markdown for ease of building other sites.
* For example your webapp can query and display data from https://raw.githubusercontent.com/2020PB/police-brutality/data_build/all-locations.json
* For more info see https://github.com/2020PB/police-brutality
* These data files are generated by https://github.com/2020PB/police-brutality/tree/main/tools
# THESE FILES ARE GENERATED - DO NOT EDIT (including this readme)
# THESE FILES ARE GENERATED - DO NOT EDIT (including this readme)
* Please edit the `.md` files on the `main` branch at https://github.com/2020PB/police-brutality
* Also notice each data row has a `edit_at` link so you can find the source data for every entry.
"""
if __name__ == "__main__":
md_texts = read_all_md_files(md_dir)
data = process_md_texts(md_texts)
to_merged_md_file(md_texts, combined_fpath)
to_json_file_v2(data, json_fpath_v2)
v1_data = [v1_only(item) for item in data]
to_csv_file_v1(v1_data, csv_fpath_v1)
to_json_file_v1(v1_data, json_fpath_v1)
to_readme(readme_fpath)
print("Done!")
| 32.15641 | 255 | 0.600989 | import csv
import glob
import json
import os
import re
import copy
from dateutil.parser import parse
from datetime import datetime, timezone
# `or '.'` because when you're in the same directory as this code
# `ValueError: no path specified` gets thrown by `relpath` with empty input
src_dir = os.path.relpath(os.path.dirname(__file__) or ".")
possible_tags_path = os.path.join(src_dir, "..", "docs/possible_tags.md")
md_dir = os.path.join(src_dir, "..", "reports")
out_dir = os.path.join(src_dir, "data_build")
combined_fpath = os.path.join(out_dir, "all-locations.md")
csv_fpath_v1 = os.path.join(out_dir, "all-locations.csv")
json_fpath_v1 = os.path.join(out_dir, "all-locations.json")
json_fpath_v2 = os.path.join(out_dir, "all-locations-v2.json")
readme_fpath = os.path.join(out_dir, "README.md")
if not os.path.exists(out_dir):
os.mkdir(out_dir)
date_regex = re.compile(
r"(Jan(uary)?|Feb(ruary)?|Mar(ch)?|Apr(il)?|May|Jun(e)?|"
r"Jul(y)?|Aug(ust)?|Sep(tember)?|Oct(ober)?|Nov(ember)?|"
r"Dec(ember)?)\s+\d{1,2}"
)
url_regex = re.compile(
r"(http|ftp|https):\/\/([\w\-_]+(?:(?:\.[\w\-_]+)+))" r"([\w\-\.,@?^=%&:/~\+#\!]*[\w\-\@?^=%&/~\+#\!])?"
)
# Regex is used to ensure that lat/long is both in a valid format has has 6-7 decimal places (or is an exact 90/180) to improve data quality on the backend
LAT_REGEX = re.compile(r"^\(?([-+]?(?:[1-8]?\d(?:\.\d{6,7})|90(?:\.0+)?)),")
LONG_REGEX = re.compile(r".*,\s*([-+]?(?:180(?:\.0+)?|(?:(?:1[0-7]\d)|(?:[1-9]?\d))(?:\.\d{6,7})))\)?$")
def critical_exit(msg):
print(f"---CRITICAL FAILURE {msg}")
exit(2)
def title_to_name_date(line):
parts = line.split("|")
if len(parts) != 2:
raise ValueError(f"Failed title_to_name_date. Expected 2 parts, separated by '|'. Got: {line}")
name = parts[0].strip()
if len(name) == 0:
raise ValueError(f"Failed name parse: missing name for {line}")
date_text = parts[1].strip()
if date_text in ("Date Unknown", "Unknown Date"):
return name, "", "Unknown Date"
date_found = date_regex.search(date_text).group()
date = parse(date_found).strftime("%Y-%m-%d")
return name, date, date_text
def read_all_md_files(base_dir):
md_texts = {}
for md_file in glob.glob(base_dir + "/*.md"):
print(f"Reading '{os.path.basename(md_file)}'")
with open(md_file, "rb") as fin:
fname = os.path.basename(md_file)
state_name = fname.replace(".md", "")
md_texts[state_name] = fin.read().decode("utf-8")
print(f"Got {len(md_texts)} locations")
return md_texts
def finalize_entry(entry):
entry["description"] = entry["description"].strip()
return entry
def find_md_link_or_url(text):
"""
find_md_link_or_url('ab[cd](ef)xy') returns:
('abcdxy', 'ef')
All the text goes into the text, and the URL as well.
"""
start = (0,)
open_sq = (1,)
closed_sq = (2,)
open_curve = (3,)
closed_curve = (4,)
state = start
text_content = ""
link_url = ""
for ch in text:
if state == start:
if ch == "[":
state = open_sq
else:
text_content += ch
elif state == open_sq:
if ch == "]":
state = closed_sq
else:
text_content += ch
elif state == closed_sq:
if ch == "(":
state = open_curve
else:
text_content += ch
elif state == open_curve:
if ch == ")":
state = closed_curve
else:
link_url += ch
elif state == closed_curve:
text_content += ch
if len(link_url) == 0:
# no markdown link found, consider it all one url
link_url = text_content
text_content = ""
return text_content.strip(), link_url.strip()
def _format_lat_or_long(val: str) -> None:
return val.strip("+")
def validate_geo(geo_body_raw: str) -> str:
geo_body = geo_body_raw.strip()
if geo_body == "":
return ""
parsed_lat = _format_lat_or_long(LAT_REGEX.match(geo_body).group(1))
parsed_long = _format_lat_or_long(LONG_REGEX.match(geo_body).group(1))
if not parsed_lat and not parsed_long:
raise ValueError(f"Could not parse geolocation: {geo_body}")
return f"{parsed_lat}, {parsed_long}"
def parse_state(state, text):
source_link = f"https://github.com/2020PB/police-brutality/blob/main/reports/{state}.md"
city = ""
if state == "Washington DC":
city = "DC"
if state == "Unknown Location":
city = ""
clean_entry = {
"links": [],
"links_v2": [],
"state": state,
"edit_at": source_link,
"city": city,
"description": "",
"tags": [],
"geolocation": "",
}
entry = copy.deepcopy(clean_entry)
for line in text.splitlines():
line = line.strip()
# if len(line) < 2:
# continue
starts_with = ""
for char in line:
if char in ("*", "#", "-"):
starts_with += char
else:
break
if entry["links"] and "#" in starts_with:
# We found a new city name so we must finish this `entry`
# and start a fresh new one
# Let the outer loop have this completed entry
yield finalize_entry(entry)
# Start a new entry
entry = copy.deepcopy(clean_entry)
# If we already parsed a city, keep it in there
entry["city"] = city
# remove the prefix
line = line[len(starts_with) :].strip()
if starts_with == "##":
city = line
entry["city"] = city
elif starts_with == "###":
name, date, date_text = title_to_name_date(line)
# print(name, date)
entry["name"] = name
entry["date"] = date
entry["date_text"] = date_text
elif starts_with == "*":
link_text, link_url = find_md_link_or_url(line)
if link_url:
entry["links"].append(link_url)
entry["links_v2"].append(
{"url": link_url, "text": link_text,}
)
else:
print("Data build failed, exiting")
critical_exit(f"Failed link parse '{line}' in state '{state}'")
elif starts_with == "**":
# **links** line
pass
else:
# Text without a markdown marker, this might be the description or metadata
id_prefix = "id:"
tags_prefix = "tags:"
lat_long_prefix = "geolocation:"
if line.startswith(id_prefix):
entry["id"] = line[len(id_prefix) :].strip()
elif line.startswith(tags_prefix):
spacey_tags = line[len(tags_prefix) :].split(",")
entry["tags"] = [tag.strip() for tag in spacey_tags]
elif line.startswith(lat_long_prefix):
entry["geolocation"] = validate_geo(line[len(lat_long_prefix) :].lstrip())
pass
else:
# Add a line to the description, but make sure there are no extra
# new lines surrounding it.
# entry["description"] = (entry["description"] + '\n' + line).strip()
# We want to allow as many newlines as are already in the middle of the description
# but not allow any extra newlines in the end or beginning. The only way
# to do that right now is right before we `yield`
entry["description"] += line + "\n"
if entry and entry["links"]:
yield finalize_entry(entry)
else:
raise ValueError(f"Failed links parse: missing links for {entry}")
def process_md_texts(md_texts):
data = []
for state, text in md_texts.items():
for entry in parse_state(state, text):
data.append(entry)
return data
updated_at = datetime.now(timezone.utc).isoformat()
md_header = f"""
GENERATED FILE, PLEASE MAKE EDITS ON MASTER AT https://github.com/2020PB/police-brutality/
UPDATED AT: {updated_at}
"""
md_out_format = """
# {location}
{text}
"""
def to_merged_md_file(md_texts, target_path):
with open(target_path, "wb") as fout:
fout.write(md_header.encode("utf-8"))
for location, text in sorted(md_texts.items()):
out_text = md_out_format.format(location=location, text=text)
fout.write(out_text.encode("utf-8"))
print(f"Written merged .md data to {target_path}")
def to_csv_file_v1(data, target_path):
max_link_count = max(len(it["links"]) for it in data)
flat_data = []
for row in data:
# just write it but instead of a list of links
# put each link in its own column
flat_row = row.copy()
links = flat_row["links"]
del flat_row["links"]
for i in range(max_link_count):
url = ""
if i < len(links):
url = links[i]
flat_row[f"Link {i + 1}"] = url
flat_data.append(flat_row)
with open(target_path, "w", newline="", encoding="utf-8") as fout:
writer = csv.DictWriter(fout, flat_data[0].keys())
writer.writeheader()
writer.writerows(flat_data)
print(f"Written .csv data to {target_path}")
def to_json_file_v1(data, target_path):
data_with_meta = {
"edit_at": "https://github.com/2020PB/police-brutality",
"help": "ask @ubershmekel on twitter",
"updated_at": updated_at,
"data": data,
}
with open(target_path, "w") as f:
json.dump(data_with_meta, f)
print(f"Written .json data to {target_path}")
def v2_only(item):
item = copy.deepcopy(item)
item["links"] = item["links_v2"]
del item["links_v2"]
return item
def to_json_file_v2(data, target_path):
v2_data = [v2_only(item) for item in data]
data_with_meta = {
"edit_at": "https://github.com/2020PB/police-brutality",
"help": "ask @ubershmekel on twitter",
"updated_at": updated_at,
"data": v2_data,
}
with open(target_path, "w") as f:
json.dump(data_with_meta, f)
print(f"Written .json v2 data to {target_path}")
readme_text = """
# /r/2020PoliceBrutality/ dataset
This repository exists to accumulate and contextualize evidence of police brutality during the 2020 George Floyd protests.
Our goal in doing this is to assist journalists, politicians, prosecutors, activists and concerned individuals who can use the evidence accumulated here for political campaigns, news reporting, public education and prosecution of criminal police officers.
* This branch is just the files generated by parsing the markdown for ease of building other sites.
* For example your webapp can query and display data from https://raw.githubusercontent.com/2020PB/police-brutality/data_build/all-locations.json
* For more info see https://github.com/2020PB/police-brutality
* These data files are generated by https://github.com/2020PB/police-brutality/tree/main/tools
# THESE FILES ARE GENERATED - DO NOT EDIT (including this readme)
# THESE FILES ARE GENERATED - DO NOT EDIT (including this readme)
* Please edit the `.md` files on the `main` branch at https://github.com/2020PB/police-brutality
* Also notice each data row has a `edit_at` link so you can find the source data for every entry.
"""
def to_readme(target_path):
with open(target_path, "w") as f:
f.write(readme_text)
def read_all_data():
md_texts = read_all_md_files(md_dir)
data = process_md_texts(md_texts)
return data
def v1_only(item):
# Deepcopy to avoid affecting the original data
item = copy.deepcopy(item)
v1_keys = set(["links", "state", "city", "edit_at", "name", "date", "date_text", "id"])
# Cache keys to avoid errors for deleting while iterating
item_keys = list(item.keys())
for key in item_keys:
if key not in v1_keys:
del item[key]
return item
if __name__ == "__main__":
md_texts = read_all_md_files(md_dir)
data = process_md_texts(md_texts)
to_merged_md_file(md_texts, combined_fpath)
to_json_file_v2(data, json_fpath_v2)
v1_data = [v1_only(item) for item in data]
to_csv_file_v1(v1_data, csv_fpath_v1)
to_json_file_v1(v1_data, json_fpath_v1)
to_readme(readme_fpath)
print("Done!")
| 7,638 | 0 | 368 |
597a0880ab1292080a540b2aee82937627ce1567 | 2,007 | py | Python | server/edd/urls.py | trussworks/edd | 1183c6bb1f04de31d8c4c6ed7b97b051228f3bb7 | [
"BSD-3-Clause-LBNL"
] | 13 | 2016-11-15T07:33:40.000Z | 2021-09-22T12:19:13.000Z | server/edd/urls.py | trussworks/edd | 1183c6bb1f04de31d8c4c6ed7b97b051228f3bb7 | [
"BSD-3-Clause-LBNL"
] | 40 | 2017-04-04T15:20:14.000Z | 2022-03-31T04:34:37.000Z | server/edd/urls.py | trussworks/edd | 1183c6bb1f04de31d8c4c6ed7b97b051228f3bb7 | [
"BSD-3-Clause-LBNL"
] | 10 | 2017-09-21T07:27:01.000Z | 2022-03-10T17:02:19.000Z | from django.conf import settings
from django.contrib import admin
from django.contrib.auth.decorators import login_required
from django.contrib.flatpages import views as flatpage_views
from django.http import HttpResponse
from django.urls import include, path, re_path
from edd.branding.views import favicon as favicon_view
admin.autodiscover()
rest_urlpatterns = [
path("", include("edd.rest.urls", namespace="rest")),
path("auth/", include("rest_framework.urls", namespace="rest_framework")),
]
urlpatterns = [
# make sure to match the path to favicon *exactly*
re_path(r"favicon\.ico$", favicon_view, name="favicon"),
# simplest possible view for healthcheck
path("health/", lambda request: HttpResponse(), name="healthcheck"),
path("admin/", admin.site.urls),
path("", include("main.urls", namespace="main")),
path("export/", include("edd.export.urls", namespace="export")),
# allauth does not support namespacing
path("accounts/", include("allauth.urls")),
path("utilities/", include("tools.urls", namespace="tools")),
path("profile/", include("edd.profile.urls", namespace="profile")),
path("", include("edd.campaign.urls", namespace="campaign")),
path("rest/", include(rest_urlpatterns)),
# flatpages.urls does not include app_name; cannot include it with namespace
# path('pages/', include('django.contrib.flatpages.urls', namespace='flatpage'))
path("pages/<path:url>", flatpage_views.flatpage, name="flatpage"),
]
if getattr(settings, "EDD_ENABLE_GRAPHQL", False):
from graphene_django.views import GraphQLView
urlpatterns += [
path(
"explore/",
login_required(GraphQLView.as_view(graphiql=True)),
name="graphiql",
),
path("graphql/", login_required(GraphQLView.as_view()), name="graphql",),
]
if getattr(settings, "DEBUG", False):
import debug_toolbar
urlpatterns += [path("__debug__/", include(debug_toolbar.urls, namespace="djdt"))]
| 37.867925 | 86 | 0.694071 | from django.conf import settings
from django.contrib import admin
from django.contrib.auth.decorators import login_required
from django.contrib.flatpages import views as flatpage_views
from django.http import HttpResponse
from django.urls import include, path, re_path
from edd.branding.views import favicon as favicon_view
admin.autodiscover()
rest_urlpatterns = [
path("", include("edd.rest.urls", namespace="rest")),
path("auth/", include("rest_framework.urls", namespace="rest_framework")),
]
urlpatterns = [
# make sure to match the path to favicon *exactly*
re_path(r"favicon\.ico$", favicon_view, name="favicon"),
# simplest possible view for healthcheck
path("health/", lambda request: HttpResponse(), name="healthcheck"),
path("admin/", admin.site.urls),
path("", include("main.urls", namespace="main")),
path("export/", include("edd.export.urls", namespace="export")),
# allauth does not support namespacing
path("accounts/", include("allauth.urls")),
path("utilities/", include("tools.urls", namespace="tools")),
path("profile/", include("edd.profile.urls", namespace="profile")),
path("", include("edd.campaign.urls", namespace="campaign")),
path("rest/", include(rest_urlpatterns)),
# flatpages.urls does not include app_name; cannot include it with namespace
# path('pages/', include('django.contrib.flatpages.urls', namespace='flatpage'))
path("pages/<path:url>", flatpage_views.flatpage, name="flatpage"),
]
if getattr(settings, "EDD_ENABLE_GRAPHQL", False):
from graphene_django.views import GraphQLView
urlpatterns += [
path(
"explore/",
login_required(GraphQLView.as_view(graphiql=True)),
name="graphiql",
),
path("graphql/", login_required(GraphQLView.as_view()), name="graphql",),
]
if getattr(settings, "DEBUG", False):
import debug_toolbar
urlpatterns += [path("__debug__/", include(debug_toolbar.urls, namespace="djdt"))]
| 0 | 0 | 0 |
a6166bacafb2941014a2080438fc330df92d394d | 1,387 | py | Python | venv/Lib/site-packages/prompt_toolkit/output/color_depth.py | ajayiagbebaku/NFL-Model | afcc67a85ca7138c58c3334d45988ada2da158ed | [
"MIT"
] | 6,989 | 2017-07-18T06:23:18.000Z | 2022-03-31T15:58:36.000Z | venv/Lib/site-packages/prompt_toolkit/output/color_depth.py | ajayiagbebaku/NFL-Model | afcc67a85ca7138c58c3334d45988ada2da158ed | [
"MIT"
] | 1,978 | 2017-07-18T09:17:58.000Z | 2022-03-31T14:28:43.000Z | venv/Lib/site-packages/prompt_toolkit/output/color_depth.py | ajayiagbebaku/NFL-Model | afcc67a85ca7138c58c3334d45988ada2da158ed | [
"MIT"
] | 1,228 | 2017-07-18T09:03:13.000Z | 2022-03-29T05:57:40.000Z | import os
from enum import Enum
from typing import Optional
__all__ = [
"ColorDepth",
]
class ColorDepth(str, Enum):
"""
Possible color depth values for the output.
"""
value: str
#: One color only.
DEPTH_1_BIT = "DEPTH_1_BIT"
#: ANSI Colors.
DEPTH_4_BIT = "DEPTH_4_BIT"
#: The default.
DEPTH_8_BIT = "DEPTH_8_BIT"
#: 24 bit True color.
DEPTH_24_BIT = "DEPTH_24_BIT"
# Aliases.
MONOCHROME = DEPTH_1_BIT
ANSI_COLORS_ONLY = DEPTH_4_BIT
DEFAULT = DEPTH_8_BIT
TRUE_COLOR = DEPTH_24_BIT
@classmethod
def from_env(cls) -> Optional["ColorDepth"]:
"""
Return the color depth if the $PROMPT_TOOLKIT_COLOR_DEPTH environment
variable has been set.
This is a way to enforce a certain color depth in all prompt_toolkit
applications.
"""
# Check the `PROMPT_TOOLKIT_COLOR_DEPTH` environment variable.
all_values = [i.value for i in ColorDepth]
if os.environ.get("PROMPT_TOOLKIT_COLOR_DEPTH") in all_values:
return cls(os.environ["PROMPT_TOOLKIT_COLOR_DEPTH"])
return None
@classmethod
def default(cls) -> "ColorDepth":
"""
Return the default color depth for the default output.
"""
from .defaults import create_output
return create_output().get_default_color_depth()
| 23.508475 | 77 | 0.64672 | import os
from enum import Enum
from typing import Optional
__all__ = [
"ColorDepth",
]
class ColorDepth(str, Enum):
"""
Possible color depth values for the output.
"""
value: str
#: One color only.
DEPTH_1_BIT = "DEPTH_1_BIT"
#: ANSI Colors.
DEPTH_4_BIT = "DEPTH_4_BIT"
#: The default.
DEPTH_8_BIT = "DEPTH_8_BIT"
#: 24 bit True color.
DEPTH_24_BIT = "DEPTH_24_BIT"
# Aliases.
MONOCHROME = DEPTH_1_BIT
ANSI_COLORS_ONLY = DEPTH_4_BIT
DEFAULT = DEPTH_8_BIT
TRUE_COLOR = DEPTH_24_BIT
@classmethod
def from_env(cls) -> Optional["ColorDepth"]:
"""
Return the color depth if the $PROMPT_TOOLKIT_COLOR_DEPTH environment
variable has been set.
This is a way to enforce a certain color depth in all prompt_toolkit
applications.
"""
# Check the `PROMPT_TOOLKIT_COLOR_DEPTH` environment variable.
all_values = [i.value for i in ColorDepth]
if os.environ.get("PROMPT_TOOLKIT_COLOR_DEPTH") in all_values:
return cls(os.environ["PROMPT_TOOLKIT_COLOR_DEPTH"])
return None
@classmethod
def default(cls) -> "ColorDepth":
"""
Return the default color depth for the default output.
"""
from .defaults import create_output
return create_output().get_default_color_depth()
| 0 | 0 | 0 |
63208333abce488e6b6180083bdb0989bfa6ab67 | 2,771 | py | Python | spectratmo/util.py | tabataba/spectratmo-puma | 3cd9e07df012b5a7a3f298c68f8607c225b6f6a8 | [
"CECILL-B"
] | 1 | 2020-02-12T23:05:19.000Z | 2020-02-12T23:05:19.000Z | spectratmo/util.py | tabataba/spectratmo-puma | 3cd9e07df012b5a7a3f298c68f8607c225b6f6a8 | [
"CECILL-B"
] | null | null | null | spectratmo/util.py | tabataba/spectratmo-puma | 3cd9e07df012b5a7a3f298c68f8607c225b6f6a8 | [
"CECILL-B"
] | 1 | 2020-02-12T23:05:35.000Z | 2020-02-12T23:05:35.000Z | """Utilities
============
.. autoclass:: SetOfVariables
:members:
:private-members:
.. autoclass:: SetOfSpectra
:members:
:private-members:
.. autoclass:: StackOfSetOfVariables
:members:
:private-members:
"""
import numpy as np
from spectratmo.phys_const import g, Gamma_dah
class SetOfVariables(object):
"""Set of variables on one pressure level."""
__radd__ = __add__
__rmul__ = __mul__
class SetOfSpectra(SetOfVariables):
"""Set of energy spectra."""
class StackOfSetOfVariables(object):
"""..."""
| 28.56701 | 68 | 0.595453 | """Utilities
============
.. autoclass:: SetOfVariables
:members:
:private-members:
.. autoclass:: SetOfSpectra
:members:
:private-members:
.. autoclass:: StackOfSetOfVariables
:members:
:private-members:
"""
import numpy as np
from spectratmo.phys_const import g, Gamma_dah
class SetOfVariables(object):
"""Set of variables on one pressure level."""
def __init__(self, name_type_variables):
self.name_type_variables = name_type_variables
self.ddata = dict()
def __add__(self, other):
if isinstance(other, SetOfVariables):
obj_result = SetOfVariables(self.name_type_variables)
for k, item_data in self.ddata.iteritems():
obj_result.ddata[k] = self.ddata[k]+other.ddata[k]
elif isinstance(other, int) or isinstance(other, float):
obj_result = SetOfVariables(self.name_type_variables)
for k, item_data in self.ddata.iteritems():
obj_result.ddata[k] = self.ddata[k]+other
else:
raise ValueError()
return obj_result
__radd__ = __add__
def __sub__(self, other):
if isinstance(other, SetOfVariables):
obj_result = SetOfVariables(self.name_type_variables)
for k, item_data in self.ddata.iteritems():
obj_result.ddata[k] = self.ddata[k]-other.ddata[k]
else:
raise ValueError()
return obj_result
def __mul__(self, other):
if isinstance(other, (int, float)):
obj_result = SetOfVariables(self.name_type_variables)
for k, item_data in self.ddata.iteritems():
obj_result.ddata[k] = other*self.ddata[k]
else:
raise ValueError()
return obj_result
__rmul__ = __mul__
def __div__(self, other):
if isinstance(other, (int, float)):
obj_result = SetOfVariables(self.name_type_variables)
for k, item_data in self.ddata.iteritems():
obj_result.ddata[k] = self.ddata[k]/other
else:
raise ValueError()
return obj_result
def reset_to_zeros(self):
for key, item_data in self.ddata.iteritems():
if np.isscalar(item_data):
self.ddata[key] = 0.
else:
item_data[:] = 0.
class SetOfSpectra(SetOfVariables):
"""Set of energy spectra."""
def add_APE_spectrum(self, Tmean, GammaMean):
self.ddata['E_APE_n'] = (self.ddata['E_TT_n'] * g /
(Tmean*(Gamma_dah-GammaMean)))
self.ddata['E_APEb_n'] = (self.ddata['E_TTb_n'] *
g / (Tmean*(Gamma_dah-GammaMean)))
class StackOfSetOfVariables(object):
"""..."""
| 2,028 | 0 | 188 |
8467b955db47e5c9760d26d8fee35028c732a1da | 11,009 | py | Python | bot.py | EndorFinee/PCBot | 08f6f3e2befe5e15c5f874492d1085b5efcc828c | [
"MIT"
] | null | null | null | bot.py | EndorFinee/PCBot | 08f6f3e2befe5e15c5f874492d1085b5efcc828c | [
"MIT"
] | null | null | null | bot.py | EndorFinee/PCBot | 08f6f3e2befe5e15c5f874492d1085b5efcc828c | [
"MIT"
] | 1 | 2019-07-07T14:21:02.000Z | 2019-07-07T14:21:02.000Z | # -*- coding: utf-8 -*-
import os
import telebot
import config
import random
import sys
import time
import psycopg2
from telebot import types
# Config vars
token = os.environ['TELEGRAM_TOKEN']
DATABASE_URL=os.environ['DATABASE_URL']
connect = psycopg2.connect(DATABASE_URL, sslmode='require')
cursor = connect.cursor()
#some_api_token = os.environ['SOME_API_TOKEN']
# some_api = some_api_lib.connect(some_api_token)
# ...
bot = telebot.TeleBot(token)
mu = types.ReplyKeyboardMarkup(resize_keyboard=True)
mu.row('Еще!')
@bot.message_handler(commands=['start'])
@bot.message_handler(content_types=["text"])
@bot.callback_query_handler(func=lambda call: True)
if __name__ == '__main__':
bot.polling(none_stop=True)
| 65.922156 | 161 | 0.619039 | # -*- coding: utf-8 -*-
import os
import telebot
import config
import random
import sys
import time
import psycopg2
from telebot import types
# Config vars
token = os.environ['TELEGRAM_TOKEN']
DATABASE_URL=os.environ['DATABASE_URL']
connect = psycopg2.connect(DATABASE_URL, sslmode='require')
cursor = connect.cursor()
#some_api_token = os.environ['SOME_API_TOKEN']
# some_api = some_api_lib.connect(some_api_token)
# ...
bot = telebot.TeleBot(token)
mu = types.ReplyKeyboardMarkup(resize_keyboard=True)
mu.row('Еще!')
@bot.message_handler(commands=['start'])
def start_msg(message):
bot.send_message(message.chat.id,"Привет!", reply_markup=mu)
keyboard = types.InlineKeyboardMarkup()
button = types.InlineKeyboardButton(text="6-9", callback_data="kids")
button1 = types.InlineKeyboardButton(text="10-13", callback_data="keys")
button2 = types.InlineKeyboardButton(text="14-17", callback_data="keep")
keyboard.add(button,button1,button2)
bot.send_message(message.chat.id,"Выбери возраст!", reply_markup=keyboard)
@bot.message_handler(content_types=["text"])
def any_msg(message):
keyboard = types.InlineKeyboardMarkup()
button = types.InlineKeyboardButton(text="6-9", callback_data="kids")
button1 = types.InlineKeyboardButton(text="10-13", callback_data="keys")
button2 = types.InlineKeyboardButton(text="14-17", callback_data="keep")
keyboard.add(button,button1,button2)
bot.send_message(message.chat.id,"Выбери возраст!", reply_markup=keyboard)
@bot.callback_query_handler(func=lambda call: True)
def callback_inline(call):
if call.message:
if call.data == "kids":
global a
a = 1
keyboard=types.InlineKeyboardMarkup()
callback_button = types.InlineKeyboardButton(text="На взаимодействие", callback_data="inter")
callback_button1 = types.InlineKeyboardButton(text="Уличные", callback_data="street")
callback_button2 = types.InlineKeyboardButton(text="Шутки", callback_data="joke")
callback_button3 = types.InlineKeyboardButton(text="На снятие напряжения", callback_data="relax")
callback_button4 = types.InlineKeyboardButton(text="Быстрые", callback_data="quick")
callback_button5 = types.InlineKeyboardButton(text="Тактильные", callback_data="touch")
callback_button6 = types.InlineKeyboardButton(text="На знакомство", callback_data="know")
keyboard.add(callback_button)
keyboard.add(callback_button3)
keyboard.add(callback_button1,callback_button2,callback_button4,callback_button5)
keyboard.add(callback_button6)
bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id,text="Выбери тип игры!", reply_markup=keyboard)
if call.data == "keys":
a = 2
keyboard=types.InlineKeyboardMarkup()
callback_button = types.InlineKeyboardButton(text="На взаимодействие", callback_data="inter")
callback_button1 = types.InlineKeyboardButton(text="Уличные", callback_data="street")
callback_button2 = types.InlineKeyboardButton(text="Шутки", callback_data="joke")
callback_button3 = types.InlineKeyboardButton(text="На снятие напряжения", callback_data="relax")
callback_button4 = types.InlineKeyboardButton(text="Быстрые", callback_data="quick")
callback_button5 = types.InlineKeyboardButton(text="Тактильные", callback_data="touch")
callback_button6 = types.InlineKeyboardButton(text="На знакомство", callback_data="know")
keyboard.add(callback_button)
keyboard.add(callback_button3)
keyboard.add(callback_button1,callback_button2,callback_button4,callback_button5)
keyboard.add(callback_button6)
bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id,text="Выбери тип игры!", reply_markup=keyboard)
if call.data == "keep":
a = 3
keyboard=types.InlineKeyboardMarkup()
callback_button = types.InlineKeyboardButton(text="На взаимодействие", callback_data="inter")
callback_button1 = types.InlineKeyboardButton(text="Уличные", callback_data="street")
callback_button2 = types.InlineKeyboardButton(text="Шутки", callback_data="joke")
callback_button3 = types.InlineKeyboardButton(text="На снятие напряжения", callback_data="relax")
callback_button4 = types.InlineKeyboardButton(text="Быстрые", callback_data="quick")
callback_button5 = types.InlineKeyboardButton(text="Тактильные", callback_data="touch")
callback_button6 = types.InlineKeyboardButton(text="На знакомство", callback_data="know")
keyboard.add(callback_button)
keyboard.add(callback_button3)
keyboard.add(callback_button1,callback_button2,callback_button4,callback_button5)
keyboard.add(callback_button6)
bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id,text="Выбери тип игры!", reply_markup=keyboard)
if call.data == "inter":
if a == 1:
cursor.execute("SELECT game FROM games WHERE type LIKE '%inter%' AND (age = '6-9' OR age = '0' OR age = '6-13') ORDER BY RANDOM() LIMIT 1 ")
row = cursor.fetchall()
if a == 2:
cursor.execute("SELECT game FROM games WHERE type LIKE '%inter%' AND (age = '10-17' OR age = '0' OR age = '6-13') ORDER BY RANDOM() LIMIT 1 ")
row = cursor.fetchall()
if a == 3:
cursor.execute("SELECT game FROM games WHERE type LIKE '%inter%' AND (age = '10-17' OR age = '0' OR age = '14-17') ORDER BY RANDOM() LIMIT 1 ")
row = cursor.fetchall()
bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id,text=row)
if call.data == "street":
if a == 1:
cursor.execute("SELECT game FROM games WHERE type LIKE '%street%' AND (age = '6-9' OR age = '0' OR age = '6-13') ORDER BY RANDOM() LIMIT 1 ")
row = cursor.fetchall()
if a == 2:
cursor.execute("SELECT game FROM games WHERE type LIKE '%street%' AND (age = '10-17' OR age = '0' OR age = '6-13') ORDER BY RANDOM() LIMIT 1 ")
row = cursor.fetchall()
if a == 3:
cursor.execute("SELECT game FROM games WHERE type LIKE '%street%' AND (age = '10-17' OR age = '0' OR age = '14-17') ORDER BY RANDOM() LIMIT 1 ")
row = cursor.fetchall()
bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id,text=row)
if call.data == "joke":
if a == 1:
cursor.execute("SELECT game FROM games WHERE type LIKE '%joke%' AND (age = '6-9' OR age = '0' OR age = '6-13') ORDER BY RANDOM() LIMIT 1 ")
row = cursor.fetchall()
if a == 2:
cursor.execute("SELECT game FROM games WHERE type LIKE '%joke%' AND (age = '10-17' OR age = '0' OR age = '6-13') ORDER BY RANDOM() LIMIT 1 ")
row = cursor.fetchall()
if a == 3:
cursor.execute("SELECT game FROM games WHERE type LIKE '%joke%' AND (age = '10-17' OR age = '0' OR age = '14-17') ORDER BY RANDOM() LIMIT 1 ")
row = cursor.fetchall()
bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id,text=row)
if call.data == "relax":
if a == 1:
cursor.execute("SELECT game FROM games WHERE type LIKE '%relax%' AND (age = '6-9' OR age = '0' OR age = '6-13') ORDER BY RANDOM() LIMIT 1 ")
row = cursor.fetchall()
if a == 2:
cursor.execute("SELECT game FROM games WHERE type LIKE '%relax%' AND (age = '10-17' OR age = '0' OR age = '6-13') ORDER BY RANDOM() LIMIT 1 ")
row = cursor.fetchall()
if a == 3:
cursor.execute("SELECT game FROM games WHERE type LIKE '%relax%' AND (age = '10-17' OR age = '0' OR age = '14-17') ORDER BY RANDOM() LIMIT 1 ")
row = cursor.fetchall()
bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id,text=row)
if call.data == "quick":
if a == 1:
cursor.execute("SELECT game FROM games WHERE type LIKE '%quick%' AND (age = '6-9' OR age = '0' OR age = '6-13') ORDER BY RANDOM() LIMIT 1 ")
row = cursor.fetchall()
if a == 2:
cursor.execute("SELECT game FROM games WHERE type LIKE '%quick%' AND (age = '10-17' OR age = '0' OR age = '6-13') ORDER BY RANDOM() LIMIT 1 ")
row = cursor.fetchall()
if a == 3:
cursor.execute("SELECT game FROM games WHERE type LIKE '%quick%' AND (age = '10-17' OR age = '0' OR age = '14-17') ORDER BY RANDOM() LIMIT 1 ")
row = cursor.fetchall()
bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id,text=row)
if call.data == "touch":
if a == 1:
cursor.execute("SELECT game FROM games WHERE type LIKE '%touch%' AND (age = '6-9' OR age = '0' OR age = '6-13') ORDER BY RANDOM() LIMIT 1 ")
row = cursor.fetchall()
if a == 2:
cursor.execute("SELECT game FROM games WHERE type LIKE '%touch%' AND (age = '10-17' OR age = '0' OR age = '6-13') ORDER BY RANDOM() LIMIT 1 ")
row = cursor.fetchall()
if a == 3:
cursor.execute("SELECT game FROM games WHERE type LIKE '%touch%' AND (age = '10-17' OR age = '0' OR age = '14-17') ORDER BY RANDOM() LIMIT 1 ")
row = cursor.fetchall()
bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id,text=row)
if call.data == "know":
if a == 1:
cursor.execute("SELECT game FROM games WHERE type LIKE '%know%' AND (age = '6-9' OR age = '0' OR age = '6-13') ORDER BY RANDOM() LIMIT 1 ")
row = cursor.fetchall()
if a == 2:
cursor.execute("SELECT game FROM games WHERE type LIKE '%know%' AND (age = '10-17' OR age = '0' OR age = '6-13') ORDER BY RANDOM() LIMIT 1 ")
row = cursor.fetchall()
if a == 3:
cursor.execute("SELECT game FROM games WHERE type LIKE '%know%' AND (age = '10-17' OR age = '0' OR age = '14-17') ORDER BY RANDOM() LIMIT 1 ")
row = cursor.fetchall()
bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id,text=row)
if __name__ == '__main__':
bot.polling(none_stop=True)
| 10,499 | 0 | 66 |
3e544d67615f49b64523f295d335b5698c500a37 | 26 | py | Python | simple_speech/__init__.py | innovationOUtside/nb_simple_speech | c9e78980de1813c68e82d3df32393f2ee079c9f4 | [
"MIT"
] | null | null | null | simple_speech/__init__.py | innovationOUtside/nb_simple_speech | c9e78980de1813c68e82d3df32393f2ee079c9f4 | [
"MIT"
] | null | null | null | simple_speech/__init__.py | innovationOUtside/nb_simple_speech | c9e78980de1813c68e82d3df32393f2ee079c9f4 | [
"MIT"
] | null | null | null | from .speech import Speech | 26 | 26 | 0.846154 | from .speech import Speech | 0 | 0 | 0 |
cc897b11910cd602b094ea8e7f9823468db50c02 | 16,297 | py | Python | vase/vep_filter.py | david-a-parry/vase | 698c19eb1d92b56248ad0a1d32be3e4f21137cf5 | [
"MIT"
] | 4 | 2020-03-25T06:09:39.000Z | 2021-03-23T11:22:00.000Z | vase/vep_filter.py | gantzgraf/vase | 52eb6a514fd89e7c4e290c258fdbbc1636db6168 | [
"MIT"
] | 1 | 2020-10-02T14:50:30.000Z | 2020-10-12T15:24:24.000Z | vase/vep_filter.py | gantzgraf/vase | 52eb6a514fd89e7c4e290c258fdbbc1636db6168 | [
"MIT"
] | 1 | 2021-02-20T11:32:34.000Z | 2021-02-20T11:32:34.000Z | import os
import logging
from .insilico_filter import InSilicoFilter
from .csq_filter import CsqFilter
lof_csq = {'frameshift_variant', 'stop_gained', 'splice_acceptor_variant',
'splice_donor_variant'}
class VepFilter(CsqFilter):
'''An object that filters VCF records based on annotated VEP data.'''
def __init__(self, vcf, csq=[], impact=[], canonical=False, biotypes=[],
in_silico=[], filter_unpredicted=False,
keep_any_damaging=False, splice_in_silico=[],
loftee=False, splice_filter_unpredicted=False,
splice_keep_any_damaging=False, retain_labels=[],
filter_flagged_features=False, freq=None, min_freq=None,
afs=[], gene_filter=None, blacklist=None,filter_known=False,
filter_novel=False, pathogenic=False, no_conflicted=False,
g2p=None, check_g2p_consequence=False,
logging_level=logging.WARNING):
'''
Args:
vcf: input VcfReader object
csq: list of consequence types to keep. If 'default'
appears anywhere in this list then the default
consequence set (as indicated in
data/vep_classes.tsv) will be used. Similarly if
'all' appears anywhere in this list no filtering
on consequence type will occur.
impact: list of variant impacts to retain.
canonical:
Filter consequences on non-canonical transcirpts.
biotypes:
Filter consequences for features not of the
given biotypes. If not provided the default set
of biotypes (as indicated in data/biotypes.tsv)
will be used for biotype filtering.
in_silico:
List of programs and optionally score criteria
for filtering of missense variants using the
InSilicoFilter class in vase.insilico_filter.
filter_unpredicted:
If using 'in_silico' option, filter missense
variants that have missing values for any of the
specified filtering programs.
keep_any_damaging:
If using 'in_silico' option, retain variants if
any of the criteria are met for any of the
specified filtering programs.
loftee: Only retain LoF (i.e. high impact variants)
variants if the LoF annotation from loftee is
'HC' (high confidence).
splice_in_silico:
Similar to 'in_silico' but the prediction
programs are checked for splice_donor_variants,
splice_acceptor_variants and
splice_region_variants rather than missense.
Currently only dbscSNV (rf_score and ada_score),
MaxEntScan and SpliceDistance
(https://github.com/david-a-parry/SpliceDistance)
annotations are supported. This option can be
used to, for example, retain
splice region variants that are have
an 'ada_score' > 0.6 by specifying
'ada_score=0.6' with this option.
splice_filter_unpredicted:
If using 'splice_in_silico' option, filter
splice region variants that have missing values
for any of the specified filtering programs.
splice_keep_any_damaging:
If using 'splice_in_silico' option, retain
variants if any of the criteria are met for any
of the specified filtering programs.
retain_labels:
Do not filter on consequence type if the
following values are present for a label. Labels
and values must be separated by '=' sign. For
example, to retain any consequence which has
a VEP annotation named 'FOO' with value 'BAR'
use 'FOO=BAR'.
filter_flagged_features:
Filter consequences on features which are
flagged by VEP.
freq: Filter consequences if the annotated allele
frequency is equal to or greater than this value.
By default all allele frequency annotations as
listed in "data/vep_maf.tsv" are used, but this
can be altered using the 'afs' option.
min_freq:
As for 'freq' argument but filters consequences
if the allele frequency annotation is less than
this value.
filter_known:
Filter consequences if allele frequency is given
for any of the available VEP frequency
annotations.
filter_novel:
Filter consequences if no allele frequency is
given for any of the available VEP frequency
annotations.
afs: Only use the listed allele frequency annotations
for freq/min_freq/novelty filtering.
gene_filter:
VarByRegion object from vase.var_by_region. If
provided, consequences will be filtered if they
do not alter the features specified in the
VarByRegion object for the current region.
blacklist:
File containing a list of Feature IDs to ignore.
pathogenic:
If True, retain consequences regardless of type
if annotated as 'pathogenic' or 'likely
pathogenic' in 'CLIN_SIG' or 'clinvar_clnsig'
VEP fields. Frequency, biotype and canonical
filtering will still be applied.
no_conflicted:
If 'pathogenic' option is True, only retain
'likely pathogenic' and 'pathogenic'
consequences if there are no conflicting
'benign' or 'likely benign' assertions.
g2p:
G2P object from vase.g2p for filtering on
presence and/or requirements from a G2P file.
check_g2p_consequence:
If a G2P object is provided above, require that
that the observed consequence matches the
'mutation consequence' in the G2P file.
logging_level:
Logging level to use. Default=logging.WARNING.
'''
self.logger = self._get_logger(logging_level)
self.canonical = canonical
self.loftee = loftee
self.filter_flagged = filter_flagged_features
self.freq = freq
self.min_freq = min_freq
self.afs = afs
self.filter_known = filter_known
self.filter_novel = filter_novel
self._check_freq_fields(vcf)
self.in_silico = False
self.splice_in_silico = False
if in_silico:
in_silico = set(in_silico)
self.in_silico = InSilicoFilter(in_silico, filter_unpredicted,
keep_any_damaging)
if splice_in_silico:
splice_in_silico = set(splice_in_silico)
self.splice_in_silico = InSilicoFilter(
programs=splice_in_silico,
filter_unpredicted=splice_filter_unpredicted,
keep_if_any_damaging=splice_keep_any_damaging,
pred_file=os.path.join(os.path.dirname(__file__),
"data",
"vep_splice_insilico_pred.tsv"))
self.pathogenic = pathogenic
self.no_conflicted = no_conflicted
if pathogenic:
self.path_fields = self._get_path_fields(vcf)
super().__init__(vcf=vcf, csq_attribute='CSQ', csq=csq, impact=impact,
biotypes=biotypes, retain_labels=retain_labels,
filter_flagged_features=filter_flagged_features,
gene_filter=gene_filter, blacklist=blacklist, g2p=g2p,
check_g2p_consequence=check_g2p_consequence)
def filter_csq(self, csq):
'''
Returns two boolean values. The first indicates whether the consequence
annotation should be filtered. The second indicates whether the ALT
allele should be filtered irrespective of the given or any other
consequence annotation.
'''
if self.canonical:
try:
if csq['CANONICAL'] != 'YES':
return True, False
except KeyError:
pass
if self.filter_flagged:
try:
if csq['FLAGS']:
return True, False
except KeyError:
pass
if (self.biotypes is not None and csq['BIOTYPE'].lower() not in
self.biotypes):
return True, False
if self.gene_filter:
if not self.gene_filter.target_in_csq(csq):
return True, False
if self.g2p:
if csq['SYMBOL'] not in self.g2p.g2p:
return True, False
if self.blacklist and csq['Feature'] in self.blacklist:
return True, False
if (self.freq or self.min_freq or self.filter_known or
self.filter_novel):
known = False
for af in self.freq_fields:
if csq[af] == '' or csq[af] == '.':
continue
try:
c_af = float(csq[af])
except ValueError:
try:
c_af = max(float(x) for x in csq[af].split('&') if x
!= '.')
except ValueError:
continue
known = True
if self.filter_known:
return True, True
if self.freq:
if c_af >= self.freq:
return True, True
if self.min_freq:
if c_af < self.min_freq:
return True, True
if self.filter_novel and not known:
return True, True
if (self.csq is None and self.impact is None and
not self.check_g2p_consequence):
# if only using biotypes/MAF for filtering
return False, False
if self.pathogenic and self._has_pathogenic_annotation(csq):
return False, False
if self._retain_label_matched(csq):
return False, False
if self.check_g2p_consequence and self.g2p:
filt_csq = self.g2p.consequences_from_gene(csq['SYMBOL'])
else:
filt_csq = self.csq
for s_csq in [x.lower() for x in csq['Consequence'].split('&')]:
matches_csq = False
matches_impact = False
if filt_csq is not None and s_csq in filt_csq:
matches_csq = True
if self.impact is not None and csq['IMPACT'] in self.impact:
matches_impact = True
if matches_csq or matches_impact:
if self.in_silico and s_csq == 'missense_variant':
do_filter = self.in_silico.filter(csq)
if not do_filter:
return False, False
elif self.splice_in_silico and s_csq.startswith("splice"):
do_filter = self.splice_in_silico.filter(csq)
if not do_filter:
return False, False
elif self.loftee and (s_csq in lof_csq or matches_impact
and csq['IMPACT'] == 'HIGH'):
if csq['LoF'] == 'HC':
return False, False
else:
return False, False
return True, False
def get_required_header_fields(self):
'''
Check which CSQ/ANN annotation fields are required given arguments
passed to __init__
'''
required = ['Consequence', 'BIOTYPE']
if self.impact:
required.append('IMPACT')
if self.canonical:
required.append('CANONICAL')
if self.loftee:
required.append('LoF')
if self.filter_flagged:
required.append('FLAGS')
return required
| 44.285326 | 79 | 0.517519 | import os
import logging
from .insilico_filter import InSilicoFilter
from .csq_filter import CsqFilter
lof_csq = {'frameshift_variant', 'stop_gained', 'splice_acceptor_variant',
'splice_donor_variant'}
class VepFilter(CsqFilter):
'''An object that filters VCF records based on annotated VEP data.'''
def __init__(self, vcf, csq=[], impact=[], canonical=False, biotypes=[],
in_silico=[], filter_unpredicted=False,
keep_any_damaging=False, splice_in_silico=[],
loftee=False, splice_filter_unpredicted=False,
splice_keep_any_damaging=False, retain_labels=[],
filter_flagged_features=False, freq=None, min_freq=None,
afs=[], gene_filter=None, blacklist=None,filter_known=False,
filter_novel=False, pathogenic=False, no_conflicted=False,
g2p=None, check_g2p_consequence=False,
logging_level=logging.WARNING):
'''
Args:
vcf: input VcfReader object
csq: list of consequence types to keep. If 'default'
appears anywhere in this list then the default
consequence set (as indicated in
data/vep_classes.tsv) will be used. Similarly if
'all' appears anywhere in this list no filtering
on consequence type will occur.
impact: list of variant impacts to retain.
canonical:
Filter consequences on non-canonical transcirpts.
biotypes:
Filter consequences for features not of the
given biotypes. If not provided the default set
of biotypes (as indicated in data/biotypes.tsv)
will be used for biotype filtering.
in_silico:
List of programs and optionally score criteria
for filtering of missense variants using the
InSilicoFilter class in vase.insilico_filter.
filter_unpredicted:
If using 'in_silico' option, filter missense
variants that have missing values for any of the
specified filtering programs.
keep_any_damaging:
If using 'in_silico' option, retain variants if
any of the criteria are met for any of the
specified filtering programs.
loftee: Only retain LoF (i.e. high impact variants)
variants if the LoF annotation from loftee is
'HC' (high confidence).
splice_in_silico:
Similar to 'in_silico' but the prediction
programs are checked for splice_donor_variants,
splice_acceptor_variants and
splice_region_variants rather than missense.
Currently only dbscSNV (rf_score and ada_score),
MaxEntScan and SpliceDistance
(https://github.com/david-a-parry/SpliceDistance)
annotations are supported. This option can be
used to, for example, retain
splice region variants that are have
an 'ada_score' > 0.6 by specifying
'ada_score=0.6' with this option.
splice_filter_unpredicted:
If using 'splice_in_silico' option, filter
splice region variants that have missing values
for any of the specified filtering programs.
splice_keep_any_damaging:
If using 'splice_in_silico' option, retain
variants if any of the criteria are met for any
of the specified filtering programs.
retain_labels:
Do not filter on consequence type if the
following values are present for a label. Labels
and values must be separated by '=' sign. For
example, to retain any consequence which has
a VEP annotation named 'FOO' with value 'BAR'
use 'FOO=BAR'.
filter_flagged_features:
Filter consequences on features which are
flagged by VEP.
freq: Filter consequences if the annotated allele
frequency is equal to or greater than this value.
By default all allele frequency annotations as
listed in "data/vep_maf.tsv" are used, but this
can be altered using the 'afs' option.
min_freq:
As for 'freq' argument but filters consequences
if the allele frequency annotation is less than
this value.
filter_known:
Filter consequences if allele frequency is given
for any of the available VEP frequency
annotations.
filter_novel:
Filter consequences if no allele frequency is
given for any of the available VEP frequency
annotations.
afs: Only use the listed allele frequency annotations
for freq/min_freq/novelty filtering.
gene_filter:
VarByRegion object from vase.var_by_region. If
provided, consequences will be filtered if they
do not alter the features specified in the
VarByRegion object for the current region.
blacklist:
File containing a list of Feature IDs to ignore.
pathogenic:
If True, retain consequences regardless of type
if annotated as 'pathogenic' or 'likely
pathogenic' in 'CLIN_SIG' or 'clinvar_clnsig'
VEP fields. Frequency, biotype and canonical
filtering will still be applied.
no_conflicted:
If 'pathogenic' option is True, only retain
'likely pathogenic' and 'pathogenic'
consequences if there are no conflicting
'benign' or 'likely benign' assertions.
g2p:
G2P object from vase.g2p for filtering on
presence and/or requirements from a G2P file.
check_g2p_consequence:
If a G2P object is provided above, require that
that the observed consequence matches the
'mutation consequence' in the G2P file.
logging_level:
Logging level to use. Default=logging.WARNING.
'''
self.logger = self._get_logger(logging_level)
self.canonical = canonical
self.loftee = loftee
self.filter_flagged = filter_flagged_features
self.freq = freq
self.min_freq = min_freq
self.afs = afs
self.filter_known = filter_known
self.filter_novel = filter_novel
self._check_freq_fields(vcf)
self.in_silico = False
self.splice_in_silico = False
if in_silico:
in_silico = set(in_silico)
self.in_silico = InSilicoFilter(in_silico, filter_unpredicted,
keep_any_damaging)
if splice_in_silico:
splice_in_silico = set(splice_in_silico)
self.splice_in_silico = InSilicoFilter(
programs=splice_in_silico,
filter_unpredicted=splice_filter_unpredicted,
keep_if_any_damaging=splice_keep_any_damaging,
pred_file=os.path.join(os.path.dirname(__file__),
"data",
"vep_splice_insilico_pred.tsv"))
self.pathogenic = pathogenic
self.no_conflicted = no_conflicted
if pathogenic:
self.path_fields = self._get_path_fields(vcf)
super().__init__(vcf=vcf, csq_attribute='CSQ', csq=csq, impact=impact,
biotypes=biotypes, retain_labels=retain_labels,
filter_flagged_features=filter_flagged_features,
gene_filter=gene_filter, blacklist=blacklist, g2p=g2p,
check_g2p_consequence=check_g2p_consequence)
def filter_csq(self, csq):
'''
Returns two boolean values. The first indicates whether the consequence
annotation should be filtered. The second indicates whether the ALT
allele should be filtered irrespective of the given or any other
consequence annotation.
'''
if self.canonical:
try:
if csq['CANONICAL'] != 'YES':
return True, False
except KeyError:
pass
if self.filter_flagged:
try:
if csq['FLAGS']:
return True, False
except KeyError:
pass
if (self.biotypes is not None and csq['BIOTYPE'].lower() not in
self.biotypes):
return True, False
if self.gene_filter:
if not self.gene_filter.target_in_csq(csq):
return True, False
if self.g2p:
if csq['SYMBOL'] not in self.g2p.g2p:
return True, False
if self.blacklist and csq['Feature'] in self.blacklist:
return True, False
if (self.freq or self.min_freq or self.filter_known or
self.filter_novel):
known = False
for af in self.freq_fields:
if csq[af] == '' or csq[af] == '.':
continue
try:
c_af = float(csq[af])
except ValueError:
try:
c_af = max(float(x) for x in csq[af].split('&') if x
!= '.')
except ValueError:
continue
known = True
if self.filter_known:
return True, True
if self.freq:
if c_af >= self.freq:
return True, True
if self.min_freq:
if c_af < self.min_freq:
return True, True
if self.filter_novel and not known:
return True, True
if (self.csq is None and self.impact is None and
not self.check_g2p_consequence):
# if only using biotypes/MAF for filtering
return False, False
if self.pathogenic and self._has_pathogenic_annotation(csq):
return False, False
if self._retain_label_matched(csq):
return False, False
if self.check_g2p_consequence and self.g2p:
filt_csq = self.g2p.consequences_from_gene(csq['SYMBOL'])
else:
filt_csq = self.csq
for s_csq in [x.lower() for x in csq['Consequence'].split('&')]:
matches_csq = False
matches_impact = False
if filt_csq is not None and s_csq in filt_csq:
matches_csq = True
if self.impact is not None and csq['IMPACT'] in self.impact:
matches_impact = True
if matches_csq or matches_impact:
if self.in_silico and s_csq == 'missense_variant':
do_filter = self.in_silico.filter(csq)
if not do_filter:
return False, False
elif self.splice_in_silico and s_csq.startswith("splice"):
do_filter = self.splice_in_silico.filter(csq)
if not do_filter:
return False, False
elif self.loftee and (s_csq in lof_csq or matches_impact
and csq['IMPACT'] == 'HIGH'):
if csq['LoF'] == 'HC':
return False, False
else:
return False, False
return True, False
def _has_pathogenic_annotation(self, csq):
path = []
benign = []
for annot in self.path_fields:
if not csq[annot]:
continue
assertions = csq[annot].split('&')
if annot == 'clinvar_clnsig':
# benign = 2, likely benign = 3
# likely pathogenic = 4, pathogenic = 5
try:
benign.extend((4 > int(x) > 1 for x in assertions))
path.extend((6 > int(x) > 3 for x in assertions))
except ValueError:
self.logger.warn("Error parsing 'clinvar_clnsig' field " +
"'{}' - expected numeric values.".format(
csq[annot]))
else:
benign.extend(('benign' in x for x in assertions))
path.extend(('pathogenic' in x for x in assertions))
if self.no_conflicted:
return any(path) and not any(benign)
return any(path)
def _read_maf_file(self):
data_file = os.path.join(os.path.dirname(__file__),
"data",
"vep_maf.tsv")
values = []
with open(data_file, encoding='UTF-8') as fh:
for line in fh:
if line.startswith('#'):
continue
values.append(line.rstrip())
return values
def _check_freq_fields(self, vcf):
self.freq_fields = []
if (not self.freq and not self.min_freq and not self.filter_novel and
not self.filter_known):
return
if self.afs:
for fq in self.afs:
if fq in vcf.header.csq_fields:
self.freq_fields.append(fq)
self.logger.info("Found '{}' VEP allele ".format(fq) +
"frequency annotation")
else:
raise RuntimeError("Could not find '{}' ".format(fq) +
"VEP AF field in VEP annotations.")
else:
for fq in self._read_maf_file():
if fq in vcf.header.csq_fields:
self.freq_fields.append(fq)
self.logger.info("Found '{}' VEP allele ".format(fq) +
"frequency annotation")
if not self.freq_fields:
self.logger.warn("No compatible (>= v90) allele frequency fields" +
" in VEP annotations.")
def _get_path_fields(self, vcf):
cln_fields = ['CLIN_SIG', 'clinvar_clnsig']
path_fields = [f for f in vcf.header.csq_fields if f in cln_fields]
if not path_fields:
self.logger.warn("No compatible ClinVar VEP annotations found " +
"for use with pathogenic allele identification.")
return path_fields
def get_required_header_fields(self):
'''
Check which CSQ/ANN annotation fields are required given arguments
passed to __init__
'''
required = ['Consequence', 'BIOTYPE']
if self.impact:
required.append('IMPACT')
if self.canonical:
required.append('CANONICAL')
if self.loftee:
required.append('LoF')
if self.filter_flagged:
required.append('FLAGS')
return required
| 2,853 | 0 | 108 |
93ea7ae18c0399f765785f8b3f05735d7bd8fccf | 236 | py | Python | test/scenarios/command_check/tests/test_default.py | gzm55/molecule | 1d6ff8fe861fb5657c6f28c2f5918cce4f25adce | [
"MIT"
] | 1 | 2019-02-02T21:47:19.000Z | 2019-02-02T21:47:19.000Z | test/scenarios/command_check/tests/test_default.py | DalavanCloud/molecule | 86e497c1f81a99df4e1d357c417a6d315e903db2 | [
"MIT"
] | 2 | 2021-03-25T23:55:15.000Z | 2022-03-29T22:01:50.000Z | test/scenarios/command_check/tests/test_default.py | DalavanCloud/molecule | 86e497c1f81a99df4e1d357c417a6d315e903db2 | [
"MIT"
] | 1 | 2019-02-02T21:46:56.000Z | 2019-02-02T21:46:56.000Z | import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
'.molecule/ansible_inventory').get_hosts('all')
| 21.454545 | 63 | 0.754237 | import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
'.molecule/ansible_inventory').get_hosts('all')
def test_hosts_file(File):
f = File('/tmp/check')
assert not f.exists
| 57 | 0 | 23 |
824803f06491ef81f75eb0d5b0132969f56c12cf | 6,808 | py | Python | progressivis/stats/blobs_table.py | jdfekete/progressivis | 3bc79ce229cd628ef0aa4663136a674743697b47 | [
"BSD-2-Clause"
] | 51 | 2015-09-14T16:31:02.000Z | 2022-01-12T17:56:53.000Z | progressivis/stats/blobs_table.py | jdfekete/progressivis | 3bc79ce229cd628ef0aa4663136a674743697b47 | [
"BSD-2-Clause"
] | 10 | 2017-11-15T15:10:05.000Z | 2022-01-19T07:36:43.000Z | progressivis/stats/blobs_table.py | jdfekete/progressivis | 3bc79ce229cd628ef0aa4663136a674743697b47 | [
"BSD-2-Clause"
] | 5 | 2017-11-14T20:20:56.000Z | 2020-01-22T06:26:51.000Z | """
Isotropic Gaussian blobs
"""
from collections import OrderedDict
import logging
import numpy as np
from abc import ABCMeta, abstractmethod
from ..utils.errors import ProgressiveError, ProgressiveStopIteration
from progressivis import ProgressiveError, SlotDescriptor
from ..table.module import TableModule
from ..table.table import Table
from ..table.constant import Constant
from ..utils.psdict import PsDict
from ..core.utils import integer_types
from sklearn.datasets import make_blobs
from sklearn.utils import shuffle as multi_shuffle
logger = logging.getLogger(__name__)
RESERVOIR_SIZE = 10000
class BlobsTableABC(TableModule):
"""Isotropic Gaussian blobs => table
The purpose of the "reservoir" approach is to ensure the reproducibility of the results
"""
outputs = [SlotDescriptor('labels', type=Table, required=False)]
kw_fun = None
@abstractmethod
| 36.021164 | 111 | 0.606345 | """
Isotropic Gaussian blobs
"""
from collections import OrderedDict
import logging
import numpy as np
from abc import ABCMeta, abstractmethod
from ..utils.errors import ProgressiveError, ProgressiveStopIteration
from progressivis import ProgressiveError, SlotDescriptor
from ..table.module import TableModule
from ..table.table import Table
from ..table.constant import Constant
from ..utils.psdict import PsDict
from ..core.utils import integer_types
from sklearn.datasets import make_blobs
from sklearn.utils import shuffle as multi_shuffle
logger = logging.getLogger(__name__)
RESERVOIR_SIZE = 10000
def make_mv_blobs(means, covs, n_samples, **kwds):
assert len(means) == len(covs)
n_blobs = len(means)
size = n_samples // n_blobs
blobs = []
labels = []
for i, (mean, cov) in enumerate(zip(means, covs)):
blobs.append(np.random.multivariate_normal(mean, cov, size, **kwds))
arr = np.empty(size, dtype='int64')
arr[:] = i
labels.append(arr)
blobs = np.concatenate(blobs)
labels = np.concatenate(labels)
return multi_shuffle(blobs, labels)
def xy_to_dict(x, y, i, size, cols):
res = {}
k = None if size is None else i + size
for j, col in enumerate(cols):
res[col] = x[i:,j] if k is None else x[i:k,j]
labs = y[i:] if k is None else y[i:k]
return res, labs
class BlobsTableABC(TableModule):
"""Isotropic Gaussian blobs => table
The purpose of the "reservoir" approach is to ensure the reproducibility of the results
"""
outputs = [SlotDescriptor('labels', type=Table, required=False)]
kw_fun = None
def __init__(self, columns, rows=-1, dtype='float64', seed=0, throttle=False, **kwds):
super().__init__(**kwds)
self._kwds = {} #self._filter_kwds(kwds, self.kw_fun)
"""assert 'centers' in self._kwds
assert 'n_samples' not in self._kwds
assert 'n_features' not in self._kwds
assert 'random_state' not in self._kwds"""
#self._kwds['n_samples'] = rows
#self._kwds['n_features']
self.default_step_size = 1000
if isinstance(columns, integer_types):
self.columns = ["_%d"%i for i in range(1, columns+1)]
#self._kwds['n_features'] = columns
elif isinstance(columns, (list, np.ndarray)):
self.columns = columns
#self._kwds['n_features'] = len(columns)
else:
raise ProgressiveError('Invalid type for columns')
self.rows = rows
self.seed = seed
self._reservoir = None
self._labels = None
self._reservoir_idx = 0
if throttle and isinstance(throttle, integer_types+(float,)):
self.throttle = throttle
else:
self.throttle = False
dshape = ", ".join([f"{col}: {dtype}" for col in self.columns])
dshape = "{" + dshape + "}"
self.result = Table(self.generate_table_name('table'),
dshape=dshape,
create=True)
self.columns = self.result.columns
def is_source(self):
return True
def starting(self):
super().starting()
opt_slot = self.get_output_slot('labels')
if opt_slot:
logger.debug('Maintaining labels')
self.maintain_labels(True)
else:
logger.debug('Not maintaining labels')
self.maintain_labels(False)
def maintain_labels(self, yes=True):
if yes and self._labels is None:
self._labels = Table(self.generate_table_name('blobs_labels'),
dshape="{labels: int64}",
create=True)
elif not yes:
self._labels = None
def labels(self):
return self._labels
def get_data(self, name):
if name == 'labels':
return self.labels()
return super().get_data(name)
@abstractmethod
def fill_reservoir(self):
pass
def run_step(self, run_number, step_size, howlong):
if step_size == 0:
logger.error('Received a step_size of 0')
return self._return_run_step(self.state_ready, steps_run=0)
logger.info('generating %d lines', step_size)
if self.throttle:
step_size = np.min([self.throttle, step_size])
if self.rows >= 0 and (len(self.result)+step_size) > self.rows:
step_size = self.rows - len(self.result)
logger.info('truncating to %d lines', step_size)
if step_size <= 0:
raise ProgressiveStopIteration
if self._reservoir is None:
self.fill_reservoir()
steps = step_size
while steps>0:
level = len(self._reservoir[0]) - self._reservoir_idx
assert level >=0
if steps >= level:
blobs_dict, y_ = xy_to_dict(*self._reservoir, self._reservoir_idx, None, self.columns)
steps -= level
# reservoir was emptied so:
self.fill_reservoir()
else: # steps < level
blobs_dict, y_ = xy_to_dict(*self._reservoir, self._reservoir_idx, steps, self.columns)
self._reservoir_idx += steps
steps = 0
self.result.append(blobs_dict)
if self._labels is not None:
self._labels.append({'labels': y_})
if len(self.result) == self.rows:
next_state = self.state_zombie
elif self.throttle:
next_state = self.state_blocked
else:
next_state = self.state_ready
return self._return_run_step(next_state, steps_run=step_size)
class BlobsTable(BlobsTableABC):
kw_fun = make_blobs
def __init__(self, *args, **kwds):
#import pdb;pdb.set_trace()
super().__init__(*args, **kwds)
#assert 'centers' in self._kwds
self.centers = kwds['centers']
assert 'n_samples' not in self._kwds
assert 'n_features' not in self._kwds
assert 'random_state' not in self._kwds
def fill_reservoir(self):
X, y = make_blobs(n_samples=RESERVOIR_SIZE, random_state=self.seed, centers=self.centers, **self._kwds)
self.seed += 1
self._reservoir = (X, y)
self._reservoir_idx = 0
class MVBlobsTable(BlobsTableABC):
kw_fun = make_mv_blobs
def __init__(self, *args, **kwds):
super().__init__(*args, **kwds)
self.means = kwds['means']
self.covs = kwds['covs']
def fill_reservoir(self):
np.random.seed(self.seed)
X, y = make_mv_blobs(n_samples=RESERVOIR_SIZE, means=self.means, covs=self.covs, **self._kwds)
self.seed += 1
self._reservoir = (X, y)
self._reservoir_idx = 0
| 5,424 | 181 | 306 |
79e89d3d683da0d16d985f1e6446742d0dfd7fc1 | 950 | py | Python | SRT/lib/datasets/optflow_utils.py | yerang823/landmark-detection | a01bcf79abcf9d203c1b92f29b49aab9005952c3 | [
"MIT"
] | 612 | 2019-06-01T07:10:57.000Z | 2022-03-30T13:44:41.000Z | SRT/lib/datasets/optflow_utils.py | yerang823/landmark-detection | a01bcf79abcf9d203c1b92f29b49aab9005952c3 | [
"MIT"
] | 67 | 2019-06-06T15:03:02.000Z | 2021-12-17T01:51:14.000Z | SRT/lib/datasets/optflow_utils.py | yerang823/landmark-detection | a01bcf79abcf9d203c1b92f29b49aab9005952c3 | [
"MIT"
] | 121 | 2019-06-01T16:37:23.000Z | 2022-03-27T19:20:28.000Z | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
#
import cv2
| 35.185185 | 76 | 0.726316 | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
#
import cv2
def get_optflow_retval(algorithm):
if algorithm.lower() == 'deepflow':
retval = cv2.optflow.createOptFlow_DeepFlow()
elif algorithm.lower() == 'farneback':
retval = cv2.optflow.createOptFlow_Farneback()
elif algorithm.lower() == 'tvl1':
retval = cv2.createOptFlow_DualTVL1()
elif algorithm.lower() == 'sparse2dense':
retval = cv2.optflow.createOptFlow_SparseToDense()
elif algorithm == 'DISflow_ultrafast':
retval = cv2.optflow.createOptFlow_DIS(0)
elif algorithm == 'DISflow_fast':
retval = cv2.optflow.createOptFlow_DIS(1)
elif algorithm == 'DISflow_medium':
retval = cv2.optflow.createOptFlow_DIS(2)
else: raise ValueError('algorithm is not found : {:}'.format( algorithm ))
return retval
| 716 | 0 | 23 |
0943ec50d15d9751f0cada78ac17061c04574a8b | 1,142 | py | Python | jskparser/ast/stmt/returnstmt.py | natebragg/java-sketch | f5ac26f2cc46ae4556f9a61c55afd37f55c961ff | [
"MIT"
] | 15 | 2015-12-15T18:33:50.000Z | 2021-09-29T11:48:54.000Z | jskparser/ast/stmt/returnstmt.py | natebragg/java-sketch | f5ac26f2cc46ae4556f9a61c55afd37f55c961ff | [
"MIT"
] | 11 | 2015-11-16T22:14:58.000Z | 2021-09-23T05:28:40.000Z | jskparser/ast/stmt/returnstmt.py | natebragg/java-sketch | f5ac26f2cc46ae4556f9a61c55afd37f55c961ff | [
"MIT"
] | 8 | 2015-11-16T21:50:08.000Z | 2021-03-23T15:15:34.000Z | #!/usr/bin/env python
from .statement import Statement
from ..expr.nameexpr import NameExpr
from ..expr.literalexpr import LiteralExpr
from ..type.type import Type
from . import _import
| 24.297872 | 88 | 0.59282 | #!/usr/bin/env python
from .statement import Statement
from ..expr.nameexpr import NameExpr
from ..expr.literalexpr import LiteralExpr
from ..type.type import Type
from . import _import
class ReturnStmt(Statement):
def __init__(self, kwargs={}):
super(ReturnStmt, self).__init__(kwargs)
locs = _import()
# Expression expr;
e = kwargs.get(u'expr', {})
self._expr = locs[e[u'@t']](e) if e else None
self._type = u'void'
def t(n):
if type(n) == NameExpr: self._type = n
elif isinstance(n, Type) or isinstance(n, LiteralExpr): self._type = n.typee
walk(t, self)
self.add_as_parent([self.expr])
@property
def expr(self): return self._expr
@expr.setter
def expr(self, v): self._expr = v
@property
def typee(self): return self._type
@typee.setter
def typee(self, v): self._type = v
@property
def out_set(self): return set([])
@out_set.setter
def out_set(self, v): pass
def walk(f, n, *args):
f(n, *args)
for c in n.childrenNodes:
walk(f, c, *args)
| 605 | 299 | 46 |
d7d215053e155a284da941b003f807461f834711 | 8,669 | py | Python | eval/eval_recognition/eval_ijb.py | yzhHoward/MagFace | 98b56d3a13a06cb9c081cc42a6e8137169af7798 | [
"Apache-2.0"
] | 431 | 2021-03-04T03:17:01.000Z | 2022-03-31T12:59:48.000Z | eval/eval_recognition/eval_ijb.py | yzhHoward/MagFace | 98b56d3a13a06cb9c081cc42a6e8137169af7798 | [
"Apache-2.0"
] | 40 | 2021-03-12T07:56:24.000Z | 2022-03-29T04:27:20.000Z | eval/eval_recognition/eval_ijb.py | yzhHoward/MagFace | 98b56d3a13a06cb9c081cc42a6e8137169af7798 | [
"Apache-2.0"
] | 70 | 2021-03-12T09:10:02.000Z | 2022-03-30T02:32:45.000Z | #!/usr/bin/env python
import argparse
import os
import cv2
import math
import numpy as np
from tqdm import tqdm
from sklearn.model_selection import KFold
from sklearn import metrics
from scipy.optimize import brentq
from scipy import interpolate
from sklearn.metrics import roc_curve, auc
import torch
import torch.nn as nn
import torch.utils.data as data
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
# the ijbc dataset is from insightface
# using the cos similarity
# no flip test
# basic args
parser = argparse.ArgumentParser(description='Evaluation')
parser.add_argument('--feat_list', type=str,
help='The cache folder for validation report')
parser.add_argument('--base_dir', default='data/IJBC/')
parser.add_argument('--type', default='c')
parser.add_argument('--embedding_size', default=512, type=int)
parser.add_argument('--magface_qlt', default=0, type=int)
if __name__ == '__main__':
main()
"""
score_save_path = './IJBC/result'
files = glob.glob(score_save_path + '/MS1MV2*.npy')
methods = []
scores = []
for file in files:
methods.append(Path(file).stem)
scores.append(np.load(file))
methods = np.array(methods)
scores = dict(zip(methods,scores))
colours = dict(zip(methods, sample_colours_from_colourmap(methods.shape[0], 'Set2')))
#x_labels = [1/(10**x) for x in np.linspace(6, 0, 6)]
x_labels = [10**-6, 10**-5, 10**-4,10**-3, 10**-2, 10**-1]
tpr_fpr_table = PrettyTable(['Methods'] + map(str, x_labels))
fig = plt.figure()
for method in methods:
fpr, tpr, _ = roc_curve(label, scores[method])
roc_auc = auc(fpr, tpr)
fpr = np.flipud(fpr)
tpr = np.flipud(tpr) # select largest tpr at same fpr
plt.plot(fpr, tpr, color=colours[method], lw=1, label=('[%s (AUC = %0.4f %%)]' % (method.split('-')[-1], roc_auc*100)))
tpr_fpr_row = []
tpr_fpr_row.append(method)
for fpr_iter in np.arange(len(x_labels)):
_, min_index = min(list(zip(abs(fpr-x_labels[fpr_iter]), range(len(fpr)))))
tpr_fpr_row.append('%.4f' % tpr[min_index])
tpr_fpr_table.add_row(tpr_fpr_row)
plt.xlim([10**-6, 0.1])
plt.ylim([0.3, 1.0])
plt.grid(linestyle='--', linewidth=1)
plt.xticks(x_labels)
plt.yticks(np.linspace(0.3, 1.0, 8, endpoint=True))
plt.xscale('log')
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC on IJB-C')
plt.legend(loc="lower right")
plt.show()
#fig.savefig('IJB-B.pdf')
"""
| 36.120833 | 123 | 0.619218 | #!/usr/bin/env python
import argparse
import os
import cv2
import math
import numpy as np
from tqdm import tqdm
from sklearn.model_selection import KFold
from sklearn import metrics
from scipy.optimize import brentq
from scipy import interpolate
from sklearn.metrics import roc_curve, auc
import torch
import torch.nn as nn
import torch.utils.data as data
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
# the ijbc dataset is from insightface
# using the cos similarity
# no flip test
# basic args
parser = argparse.ArgumentParser(description='Evaluation')
parser.add_argument('--feat_list', type=str,
help='The cache folder for validation report')
parser.add_argument('--base_dir', default='data/IJBC/')
parser.add_argument('--type', default='c')
parser.add_argument('--embedding_size', default=512, type=int)
parser.add_argument('--magface_qlt', default=0, type=int)
def read_template_media_list(path):
ijb_meta, templates, medias = [], [], []
with open(path, 'r') as f:
lines = f.readlines()
for line in lines:
parts = line.strip().split(' ')
ijb_meta.append(parts[0])
templates.append(int(parts[1]))
medias.append(int(parts[2]))
return np.array(templates), np.array(medias)
def read_template_pair_list(path):
t1, t2, label = [], [], []
with open(path, 'r') as f:
lines = f.readlines()
for line in lines:
data = line.strip().split(' ')
t1.append(int(data[0]))
t2.append(int(data[1]))
label.append(int(data[2]))
return np.array(t1), np.array(t2), np.array(label)
def read_feats(args):
with open(args.feat_list, 'r') as f:
lines = f.readlines()
img_feats = []
for line in lines:
data = line.strip().split(' ')
img_feats.append([float(ele) for ele in data[1:1+args.embedding_size]])
img_feats = np.array(img_feats).astype(np.float32)
return img_feats
def image2template_feature(img_feats=None,
templates=None,
medias=None,
magface_qlt=None):
# ==========================================================
# 1. face image feature l2 normalization. img_feats:[number_image x feats_dim]
# 2. compute media feature.
# 3. compute template feature.
# ==========================================================
unique_templates = np.unique(templates)
# template_feats = np.zeros((len(unique_templates), img_feats.shape[1]))
template_feats = torch.zeros((len(unique_templates), img_feats.shape[1]))
for count_template, uqt in enumerate(unique_templates):
(ind_t,) = np.where(templates == uqt)
face_norm_feats = img_feats[ind_t]
face_medias = medias[ind_t]
unique_medias, unique_media_counts = np.unique(
face_medias, return_counts=True)
media_norm_feats = []
for u, ct in zip(unique_medias, unique_media_counts):
(ind_m,) = np.where(face_medias == u)
media_norm_feats += [np.mean(face_norm_feats[ind_m],
0, keepdims=False)]
# media_norm_feats = np.array(media_norm_feats)
media_norm_feats = torch.tensor(media_norm_feats)
if magface_qlt != 1:
# media_norm_feats = media_norm_feats / np.sqrt(np.sum(media_norm_feats ** 2, -1, keepdims=True))
media_norm_feats = F.normalize(media_norm_feats)
# template_feats[count_template] = np.mean(media_norm_feats, 0)
template_feats[count_template] = torch.mean(media_norm_feats, 0)
if count_template % 2000 == 0:
print('Finish Calculating {} template features.'.format(count_template))
# template_norm_feats = template_feats / np.sqrt(np.sum(template_feats ** 2, -1, keepdims=True))
return template_feats, unique_templates
def distance_(embeddings0, embeddings1):
# # Distance based on cosine similarity
# dot = np.sum(np.multiply(embeddings0, embeddings1), axis=1)
# norm = np.linalg.norm(embeddings0, axis=1) * np.linalg.norm(embeddings1, axis=1)
# # shaving
# similarity = np.clip(dot / norm, -1., 1.)
# dist = np.arccos(similarity) / math.pi
# return dist
cos = nn.CosineSimilarity(dim=1, eps=0)
simi = torch.clamp(cos(embeddings0, embeddings1), min=-1, max=1)
dist = torch.acos(simi)/math.pi
return dist.cpu().numpy()
def verification(template_feats, unique_templates, p1=None, p2=None):
# ==========================================================
# Compute set-to-set Similarity Score.
# ==========================================================
template2id = np.zeros((max(unique_templates)+1), dtype=int)
for count_template, uqt in enumerate(unique_templates):
template2id[uqt] = count_template
scores = np.zeros((len(p1),)) # save cosine distance between pairs
total_pairs = np.array(range(len(p1)))
# small batchsize instead of all pairs in one batch due to the memory limiation
batchsize = 100000
sublists = [total_pairs[i:i + batchsize]
for i in range(0, len(p1), batchsize)]
total_sublists = len(sublists)
for c, s in enumerate(sublists):
feat1 = template_feats[template2id[p1[s]]]
feat2 = template_feats[template2id[p2[s]]]
similarity_score = distance_(feat1, feat2)
scores[s] = 1-similarity_score
if c % 10 == 0:
print('Finish {}/{} pairs.'.format(c, total_sublists))
return scores
def perform_verification(args):
# load the data
templates, medias = read_template_media_list(
'{}/meta/ijb{}_face_tid_mid.txt'.format(args.base_dir, args.type)
)
p1, p2, label = read_template_pair_list(
'{}/meta/ijb{}_template_pair_label.txt'.format(
args.base_dir, args.type)
)
img_feats = read_feats(args)
# calculate scores
template_feats, unique_templates = image2template_feature(img_feats,
templates,
medias,
args.magface_qlt)
scores = verification(template_feats, unique_templates, p1, p2)
# show the results
print('IJB{} 1v1 verification:\n'.format(args.type))
fpr, tpr, _ = roc_curve(label, scores)
roc_auc = auc(fpr, tpr)
fpr = np.flipud(fpr)
tpr = np.flipud(tpr) # select largest tpr at same fpr
tpr_fpr_row = []
x_labels = [10**-6, 10**-5, 10**-4, 10**-3, 10**-2, 10**-1]
to_print = ''
for fpr_iter in np.arange(len(x_labels)):
_, min_index = min(
list(zip(abs(fpr-x_labels[fpr_iter]), range(len(fpr)))))
# print(' {} TAR.FAR{}'.format(tpr[min_index], x_labels[fpr_iter]))
print(' {:0.4f}'.format(tpr[min_index]))
to_print = to_print + ' {:0.4f}'.format(tpr[min_index])
print(to_print)
def perform_recognition(args):
pass
def main():
args = parser.parse_args()
perform_verification(args)
perform_recognition(args)
if __name__ == '__main__':
main()
"""
score_save_path = './IJBC/result'
files = glob.glob(score_save_path + '/MS1MV2*.npy')
methods = []
scores = []
for file in files:
methods.append(Path(file).stem)
scores.append(np.load(file))
methods = np.array(methods)
scores = dict(zip(methods,scores))
colours = dict(zip(methods, sample_colours_from_colourmap(methods.shape[0], 'Set2')))
#x_labels = [1/(10**x) for x in np.linspace(6, 0, 6)]
x_labels = [10**-6, 10**-5, 10**-4,10**-3, 10**-2, 10**-1]
tpr_fpr_table = PrettyTable(['Methods'] + map(str, x_labels))
fig = plt.figure()
for method in methods:
fpr, tpr, _ = roc_curve(label, scores[method])
roc_auc = auc(fpr, tpr)
fpr = np.flipud(fpr)
tpr = np.flipud(tpr) # select largest tpr at same fpr
plt.plot(fpr, tpr, color=colours[method], lw=1, label=('[%s (AUC = %0.4f %%)]' % (method.split('-')[-1], roc_auc*100)))
tpr_fpr_row = []
tpr_fpr_row.append(method)
for fpr_iter in np.arange(len(x_labels)):
_, min_index = min(list(zip(abs(fpr-x_labels[fpr_iter]), range(len(fpr)))))
tpr_fpr_row.append('%.4f' % tpr[min_index])
tpr_fpr_table.add_row(tpr_fpr_row)
plt.xlim([10**-6, 0.1])
plt.ylim([0.3, 1.0])
plt.grid(linestyle='--', linewidth=1)
plt.xticks(x_labels)
plt.yticks(np.linspace(0.3, 1.0, 8, endpoint=True))
plt.xscale('log')
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC on IJB-C')
plt.legend(loc="lower right")
plt.show()
#fig.savefig('IJB-B.pdf')
"""
| 6,025 | 0 | 207 |
b145d80f92580481752a2e1d689188659a644d55 | 8,091 | py | Python | src/flwr_experimental/baseline/run.py | yiliucs/flower | db4d7db353a702b79cbef48b9d4fa5831d91eb00 | [
"Apache-2.0"
] | 1 | 2020-12-29T17:37:12.000Z | 2020-12-29T17:37:12.000Z | src/flwr_experimental/baseline/run.py | yiliucs/flower | db4d7db353a702b79cbef48b9d4fa5831d91eb00 | [
"Apache-2.0"
] | null | null | null | src/flwr_experimental/baseline/run.py | yiliucs/flower | db4d7db353a702b79cbef48b9d4fa5831d91eb00 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Adap GmbH. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Execute Fashion-MNIST baseline locally in Docker."""
import argparse
import concurrent.futures
import configparser
from logging import INFO
from os import path
from time import strftime
from typing import List, Optional
import flwr_experimental.baseline.tf_cifar.settings as tf_cifar_settings
import flwr_experimental.baseline.tf_fashion_mnist.settings as tf_fashion_mnist_settings
import flwr_experimental.baseline.tf_hotkey.settings as tf_hotkey_settings
from flwr.logger import configure, log
from flwr_experimental.baseline import command
from flwr_experimental.ops.cluster import Cluster, Instance
from flwr_experimental.ops.compute.adapter import Adapter
from flwr_experimental.ops.compute.docker_adapter import DockerAdapter
from flwr_experimental.ops.compute.ec2_adapter import EC2Adapter
OPS_INI_PATH = path.normpath(
f"{path.dirname(path.realpath(__file__))}/../../../.flower_ops"
)
# Read config file and extract all values which are needed further down.
CONFIG = configparser.ConfigParser()
CONFIG.read(OPS_INI_PATH)
WHEEL_FILENAME = CONFIG.get("paths", "wheel_filename")
WHEEL_LOCAL_PATH = path.expanduser(CONFIG.get("paths", "wheel_dir")) + WHEEL_FILENAME
DOCKER_PRIVATE_KEY = path.realpath(path.dirname(__file__) + "/../../../docker/ssh_key")
def now() -> str:
"""Return current date and time as string."""
return strftime("%Y%m%dT%H%M%S")
def configure_cluster(
adapter: str, instances: List[Instance], baseline: str, setting: str
) -> Cluster:
"""Return configured compute cluster."""
adapter_instance: Optional[Adapter] = None
private_key: Optional[str] = None
if adapter == "docker":
adapter_instance = DockerAdapter()
user = "root"
private_key = DOCKER_PRIVATE_KEY
elif adapter == "ec2":
adapter_instance = EC2Adapter(
image_id=CONFIG.get("aws", "image_id"),
key_name=path.expanduser(CONFIG.get("aws", "key_name")),
subnet_id=CONFIG.get("aws", "subnet_id"),
security_group_ids=CONFIG.get("aws", "security_group_ids").split(","),
tags=[
("Purpose", "flwr_experimental.baseline"),
("Baseline Name", baseline),
("Baseline Setting", setting),
],
)
user = "ubuntu"
private_key = path.expanduser(CONFIG.get("ssh", "private_key"))
else:
raise Exception(f"Adapter of type {adapter} does not exist.")
cluster = Cluster(
adapter=adapter_instance,
ssh_credentials=(user, private_key),
instances=instances,
timeout=60,
)
return cluster
# pylint: disable=too-many-arguments, too-many-locals
def run(baseline: str, setting: str, adapter: str) -> None:
"""Run baseline."""
print(f"Starting baseline with {setting} settings.")
wheel_remote_path = (
f"/root/{WHEEL_FILENAME}"
if adapter == "docker"
else f"/home/ubuntu/{WHEEL_FILENAME}"
)
if baseline == "tf_cifar":
settings = tf_cifar_settings.get_setting(setting)
elif baseline == "tf_fashion_mnist":
settings = tf_fashion_mnist_settings.get_setting(setting)
elif baseline == "tf_hotkey":
settings = tf_hotkey_settings.get_setting(setting)
else:
raise Exception("Setting not found.")
# Get instances and add a logserver to the list
instances = settings.instances
instances.append(
Instance(name="logserver", group="logserver", num_cpu=2, num_ram=2)
)
# Configure cluster
log(INFO, "(1/9) Configure cluster.")
cluster = configure_cluster(adapter, instances, baseline, setting)
# Start the cluster; this takes some time
log(INFO, "(2/9) Start cluster.")
cluster.start()
# Upload wheel to all instances
log(INFO, "(3/9) Upload wheel to all instances.")
cluster.upload_all(WHEEL_LOCAL_PATH, wheel_remote_path)
# Install the wheel on all instances
log(INFO, "(4/9) Install wheel on all instances.")
cluster.exec_all(command.install_wheel(wheel_remote_path))
# Download datasets in server and clients
log(INFO, "(5/9) Download dataset on server and clients.")
cluster.exec_all(
command.download_dataset(baseline=baseline), groups=["server", "clients"]
)
# Start logserver
log(INFO, "(6/9) Start logserver.")
logserver = cluster.get_instance("logserver")
cluster.exec(
logserver.name,
command.start_logserver(
logserver_s3_bucket=CONFIG.get("aws", "logserver_s3_bucket"),
logserver_s3_key=f"{baseline}_{setting}_{now()}.log",
),
)
# Start Flower server on Flower server instances
log(INFO, "(7/9) Start server.")
cluster.exec(
"server",
command.start_server(
log_host=f"{logserver.private_ip}:8081", baseline=baseline, setting=setting,
),
)
# Start Flower clients
log(INFO, "(8/9) Start clients.")
server = cluster.get_instance("server")
with concurrent.futures.ThreadPoolExecutor(max_workers=10) as executor:
# Start the load operations and mark each future with its URL
concurrent.futures.wait(
[
executor.submit(
cluster.exec,
client_setting.instance_name,
command.start_client(
log_host=f"{logserver.private_ip}:8081",
server_address=f"{server.private_ip}:8080",
baseline=baseline,
setting=setting,
cid=client_setting.cid,
),
)
for client_setting in settings.clients
]
)
# Shutdown server and client instance after 10min if not at least one Flower
# process is running it
log(INFO, "(9/9) Start shutdown watcher script.")
cluster.exec_all(command.watch_and_shutdown("flower", adapter))
# Give user info how to tail logfile
private_key = (
DOCKER_PRIVATE_KEY
if adapter == "docker"
else path.expanduser(CONFIG.get("ssh", "private_key"))
)
log(
INFO,
"If you would like to tail the central logfile run:\n\n\t%s\n",
command.tail_logfile(adapter, private_key, logserver),
)
def main() -> None:
"""Start Flower baseline."""
parser = argparse.ArgumentParser(description="Flower")
parser.add_argument(
"--baseline",
type=str,
required=True,
choices=["tf_cifar", "tf_fashion_mnist", "tf_hotkey"],
help="Name of baseline name to run.",
)
parser.add_argument(
"--setting",
type=str,
required=True,
choices=list(
set(
list(tf_cifar_settings.SETTINGS.keys())
+ list(tf_fashion_mnist_settings.SETTINGS.keys())
+ list(tf_hotkey_settings.SETTINGS.keys())
)
),
help="Name of setting to run.",
)
parser.add_argument(
"--adapter",
type=str,
required=True,
choices=["docker", "ec2"],
help="Set adapter to be used.",
)
args = parser.parse_args()
# Configure logger
configure(f"flower_{args.baseline}_{args.setting}")
run(baseline=args.baseline, setting=args.setting, adapter=args.adapter)
if __name__ == "__main__":
main()
| 33.296296 | 88 | 0.642937 | # Copyright 2020 Adap GmbH. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Execute Fashion-MNIST baseline locally in Docker."""
import argparse
import concurrent.futures
import configparser
from logging import INFO
from os import path
from time import strftime
from typing import List, Optional
import flwr_experimental.baseline.tf_cifar.settings as tf_cifar_settings
import flwr_experimental.baseline.tf_fashion_mnist.settings as tf_fashion_mnist_settings
import flwr_experimental.baseline.tf_hotkey.settings as tf_hotkey_settings
from flwr.logger import configure, log
from flwr_experimental.baseline import command
from flwr_experimental.ops.cluster import Cluster, Instance
from flwr_experimental.ops.compute.adapter import Adapter
from flwr_experimental.ops.compute.docker_adapter import DockerAdapter
from flwr_experimental.ops.compute.ec2_adapter import EC2Adapter
OPS_INI_PATH = path.normpath(
f"{path.dirname(path.realpath(__file__))}/../../../.flower_ops"
)
# Read config file and extract all values which are needed further down.
CONFIG = configparser.ConfigParser()
CONFIG.read(OPS_INI_PATH)
WHEEL_FILENAME = CONFIG.get("paths", "wheel_filename")
WHEEL_LOCAL_PATH = path.expanduser(CONFIG.get("paths", "wheel_dir")) + WHEEL_FILENAME
DOCKER_PRIVATE_KEY = path.realpath(path.dirname(__file__) + "/../../../docker/ssh_key")
def now() -> str:
"""Return current date and time as string."""
return strftime("%Y%m%dT%H%M%S")
def configure_cluster(
adapter: str, instances: List[Instance], baseline: str, setting: str
) -> Cluster:
"""Return configured compute cluster."""
adapter_instance: Optional[Adapter] = None
private_key: Optional[str] = None
if adapter == "docker":
adapter_instance = DockerAdapter()
user = "root"
private_key = DOCKER_PRIVATE_KEY
elif adapter == "ec2":
adapter_instance = EC2Adapter(
image_id=CONFIG.get("aws", "image_id"),
key_name=path.expanduser(CONFIG.get("aws", "key_name")),
subnet_id=CONFIG.get("aws", "subnet_id"),
security_group_ids=CONFIG.get("aws", "security_group_ids").split(","),
tags=[
("Purpose", "flwr_experimental.baseline"),
("Baseline Name", baseline),
("Baseline Setting", setting),
],
)
user = "ubuntu"
private_key = path.expanduser(CONFIG.get("ssh", "private_key"))
else:
raise Exception(f"Adapter of type {adapter} does not exist.")
cluster = Cluster(
adapter=adapter_instance,
ssh_credentials=(user, private_key),
instances=instances,
timeout=60,
)
return cluster
# pylint: disable=too-many-arguments, too-many-locals
def run(baseline: str, setting: str, adapter: str) -> None:
"""Run baseline."""
print(f"Starting baseline with {setting} settings.")
wheel_remote_path = (
f"/root/{WHEEL_FILENAME}"
if adapter == "docker"
else f"/home/ubuntu/{WHEEL_FILENAME}"
)
if baseline == "tf_cifar":
settings = tf_cifar_settings.get_setting(setting)
elif baseline == "tf_fashion_mnist":
settings = tf_fashion_mnist_settings.get_setting(setting)
elif baseline == "tf_hotkey":
settings = tf_hotkey_settings.get_setting(setting)
else:
raise Exception("Setting not found.")
# Get instances and add a logserver to the list
instances = settings.instances
instances.append(
Instance(name="logserver", group="logserver", num_cpu=2, num_ram=2)
)
# Configure cluster
log(INFO, "(1/9) Configure cluster.")
cluster = configure_cluster(adapter, instances, baseline, setting)
# Start the cluster; this takes some time
log(INFO, "(2/9) Start cluster.")
cluster.start()
# Upload wheel to all instances
log(INFO, "(3/9) Upload wheel to all instances.")
cluster.upload_all(WHEEL_LOCAL_PATH, wheel_remote_path)
# Install the wheel on all instances
log(INFO, "(4/9) Install wheel on all instances.")
cluster.exec_all(command.install_wheel(wheel_remote_path))
# Download datasets in server and clients
log(INFO, "(5/9) Download dataset on server and clients.")
cluster.exec_all(
command.download_dataset(baseline=baseline), groups=["server", "clients"]
)
# Start logserver
log(INFO, "(6/9) Start logserver.")
logserver = cluster.get_instance("logserver")
cluster.exec(
logserver.name,
command.start_logserver(
logserver_s3_bucket=CONFIG.get("aws", "logserver_s3_bucket"),
logserver_s3_key=f"{baseline}_{setting}_{now()}.log",
),
)
# Start Flower server on Flower server instances
log(INFO, "(7/9) Start server.")
cluster.exec(
"server",
command.start_server(
log_host=f"{logserver.private_ip}:8081", baseline=baseline, setting=setting,
),
)
# Start Flower clients
log(INFO, "(8/9) Start clients.")
server = cluster.get_instance("server")
with concurrent.futures.ThreadPoolExecutor(max_workers=10) as executor:
# Start the load operations and mark each future with its URL
concurrent.futures.wait(
[
executor.submit(
cluster.exec,
client_setting.instance_name,
command.start_client(
log_host=f"{logserver.private_ip}:8081",
server_address=f"{server.private_ip}:8080",
baseline=baseline,
setting=setting,
cid=client_setting.cid,
),
)
for client_setting in settings.clients
]
)
# Shutdown server and client instance after 10min if not at least one Flower
# process is running it
log(INFO, "(9/9) Start shutdown watcher script.")
cluster.exec_all(command.watch_and_shutdown("flower", adapter))
# Give user info how to tail logfile
private_key = (
DOCKER_PRIVATE_KEY
if adapter == "docker"
else path.expanduser(CONFIG.get("ssh", "private_key"))
)
log(
INFO,
"If you would like to tail the central logfile run:\n\n\t%s\n",
command.tail_logfile(adapter, private_key, logserver),
)
def main() -> None:
"""Start Flower baseline."""
parser = argparse.ArgumentParser(description="Flower")
parser.add_argument(
"--baseline",
type=str,
required=True,
choices=["tf_cifar", "tf_fashion_mnist", "tf_hotkey"],
help="Name of baseline name to run.",
)
parser.add_argument(
"--setting",
type=str,
required=True,
choices=list(
set(
list(tf_cifar_settings.SETTINGS.keys())
+ list(tf_fashion_mnist_settings.SETTINGS.keys())
+ list(tf_hotkey_settings.SETTINGS.keys())
)
),
help="Name of setting to run.",
)
parser.add_argument(
"--adapter",
type=str,
required=True,
choices=["docker", "ec2"],
help="Set adapter to be used.",
)
args = parser.parse_args()
# Configure logger
configure(f"flower_{args.baseline}_{args.setting}")
run(baseline=args.baseline, setting=args.setting, adapter=args.adapter)
if __name__ == "__main__":
main()
| 0 | 0 | 0 |
366340617b1c5151be2142533461628bcb242cb4 | 1,121 | py | Python | in_class_examples_part_2/ucr_e-mail_input.py | ucrgradquant/ucr_python_seminar | cc3988aac334abf5cf1861da0770cd6c63784bf2 | [
"Apache-2.0"
] | null | null | null | in_class_examples_part_2/ucr_e-mail_input.py | ucrgradquant/ucr_python_seminar | cc3988aac334abf5cf1861da0770cd6c63784bf2 | [
"Apache-2.0"
] | null | null | null | in_class_examples_part_2/ucr_e-mail_input.py | ucrgradquant/ucr_python_seminar | cc3988aac334abf5cf1861da0770cd6c63784bf2 | [
"Apache-2.0"
] | null | null | null | #
# Copyright 2009-2013 by The Regents of the University of California
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# you may obtain a copy of the License from
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Calculate the persons UCR Net ID.
# Get first and last name and number from user.
first_name = raw_input("Enter your first name: ")
last_name = raw_input("Enter your last name: ")
number = raw_input("Enter a number: ")
# Normalize the input.
first_name = first_name.lower()
last_name = last_name.lower()
int_number = int(number)
# Create UCR Net ID
net_id = "{0}{1}{2:03d}".format(first_name[0], last_name[0:4], int_number)
# Print the UCR Net ID
print "Your UCR Net ID is {0}.".format(net_id) | 35.03125 | 74 | 0.742194 | #
# Copyright 2009-2013 by The Regents of the University of California
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# you may obtain a copy of the License from
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Calculate the persons UCR Net ID.
# Get first and last name and number from user.
first_name = raw_input("Enter your first name: ")
last_name = raw_input("Enter your last name: ")
number = raw_input("Enter a number: ")
# Normalize the input.
first_name = first_name.lower()
last_name = last_name.lower()
int_number = int(number)
# Create UCR Net ID
net_id = "{0}{1}{2:03d}".format(first_name[0], last_name[0:4], int_number)
# Print the UCR Net ID
print "Your UCR Net ID is {0}.".format(net_id) | 0 | 0 | 0 |
88236ad0c9019c5c1c99d371d7ebf056d477d5a8 | 21,470 | py | Python | feature/5_extract_feature.py | DataXujing/Boutique-Travel-Services-Predict | 8daf77165c7f7a9f7daa895f200515db59ba171e | [
"MIT"
] | 43 | 2018-06-22T07:53:21.000Z | 2021-11-23T03:31:46.000Z | feature/5_extract_feature.py | Bifzivkar/Boutique-Travel-Services-Predict | 8daf77165c7f7a9f7daa895f200515db59ba171e | [
"MIT"
] | null | null | null | feature/5_extract_feature.py | Bifzivkar/Boutique-Travel-Services-Predict | 8daf77165c7f7a9f7daa895f200515db59ba171e | [
"MIT"
] | 15 | 2018-07-22T13:36:02.000Z | 2021-01-09T14:30:15.000Z | # -*- encoding:utf-8 -*-
import pandas as pd
import numpy as np
from datetime import datetime
dire = '../../data/'
start = datetime.now()
orderHistory_train = pd.read_csv(dire + 'train/orderHistory_train.csv', encoding='utf-8')
orderFuture_train = pd.read_csv(dire + 'train/orderFuture_train3.csv', encoding='utf-8')
userProfile_train = pd.read_csv(dire + 'train/userProfile_train.csv', encoding='utf-8')
userComment_train = pd.read_csv(dire + 'train/userComment_train.csv', encoding='utf-8')
action_train = pd.read_csv(dire + 'train/action_train.csv', encoding='utf-8')
orderHistory_test = pd.read_csv(dire + 'test/orderHistory_test.csv', encoding='utf-8')
orderFuture_test = pd.read_csv(dire + 'test/orderFuture_test3.csv', encoding='utf-8')
userProfile_test = pd.read_csv(dire + 'test/userProfile_test.csv', encoding='utf-8')
userComment_test = pd.read_csv(dire + 'test/userComment_test.csv', encoding='utf-8')
action_test = pd.read_csv(dire + 'test/action_test.csv', encoding='utf-8')
# """
############# 1.user feature #############
"""
# 1. 用户地点划分1 2 3 线城市
"""
# # orderFuture_train = province_123(userProfile_train, orderFuture_train)
# # orderFuture_test = province_123(userProfile_test, orderFuture_test)
############# 2.history order feature #############
"""
# 1.
"""
# 历史纪录中城市的精品占比
# orderFuture = pd.concat([orderFuture_train,orderFuture_test])
# orderHistory = pd.concat([orderHistory_train,orderHistory_test])
# dataset = history_type1_rate(orderFuture, orderHistory)
# orderFuture_train = dataset[dataset.orderType.notnull()]
# orderFuture_test = dataset[dataset.orderType.isnull()]
############# 3.action feature #############
"""
# 1. action中大于6出现的次数
# 2. 对应点击2-4的和值 与 5-9 的比值
# 3. 全部点击2-4的和值 与 5-9 的比值
# 4. 对应浏览记录 1-9 操作所用平均时间
# 5. 全部浏览记录 1-9 操作所用平均时间
# """
# action中大于6出现的次数
orderFuture_train = greater_6_c(orderFuture_train)
orderFuture_test = greater_6_c(orderFuture_test)
# 对应点击2-4的和值 与 5-9 的比值
orderFuture_train = rate_24_59_c(orderFuture_train)
orderFuture_test = rate_24_59_c(orderFuture_test)
# 全部点击2-4的和值 与 5-9 的比值
orderFuture_train = rate_24_59(orderFuture_train)
orderFuture_test = rate_24_59(orderFuture_test)
# 全部action 最后一次 的类型
orderFuture_train = latest_actionType(orderFuture_train, action_train)
orderFuture_test = latest_actionType(orderFuture_test, action_test)
# 全部 action 倒数第2-6次操作的类型
orderFuture_train = latest2_actionType(orderFuture_train, action_train)
orderFuture_test = latest2_actionType(orderFuture_test, action_test)
# 时间间隔
# 最后1 2 3 4 次操作的时间间隔
# 时间间隔的均值 最小值 最大值 方差
orderFuture_train = time_interval(orderFuture_train, action_train)
orderFuture_test = time_interval(orderFuture_test, action_test)
# action 最后4 5 6 次操作时间的方差 和 均值
orderFuture_train = var_actionTime(orderFuture_train, action_train)
orderFuture_test = var_actionTime(orderFuture_test, action_test)
# 对应浏览记录浏览平均时间(可以改成最近几天的)
orderFuture_train = sum_actionType_time(orderFuture_train, action_train)
orderFuture_test = sum_actionType_time(orderFuture_test, action_test)
# 对应浏览记录 1-9 操作所用平均时间
# orderFuture_test = avg_time_action_c(orderFuture_test, action_test, 1)
# orderFuture_test = avg_time_action_c(orderFuture_test, action_test, 2)
# orderFuture_test = avg_time_action_c(orderFuture_test, action_test, 3)
# orderFuture_test = avg_time_action_c(orderFuture_test, action_test, 4)
# orderFuture_test = avg_time_action_c(orderFuture_test, action_test, 5)
# orderFuture_test = avg_time_action_c(orderFuture_test, action_test, 6)
# orderFuture_test = avg_time_action_c(orderFuture_test, action_test, 7)
# orderFuture_test = avg_time_action_c(orderFuture_test, action_test, 8)
# orderFuture_test = avg_time_action_c(orderFuture_test, action_test, 9)
# orderFuture_train = avg_time_action_c(orderFuture_train, action_train, 1)
# orderFuture_train = avg_time_action_c(orderFuture_train, action_train, 2)
# orderFuture_train = avg_time_action_c(orderFuture_train, action_train, 3)
# orderFuture_train = avg_time_action_c(orderFuture_train, action_train, 4)
# orderFuture_train = avg_time_action_c(orderFuture_train, action_train, 5)
# orderFuture_train = avg_time_action_c(orderFuture_train, action_train, 6)
# orderFuture_train = avg_time_action_c(orderFuture_train, action_train, 7)
# orderFuture_train = avg_time_action_c(orderFuture_train, action_train, 8)
# orderFuture_train = avg_time_action_c(orderFuture_train, action_train, 9)
# 全部浏览记录 1-9 操作所用平均时间
# orderFuture_test = avg_time_action(orderFuture_test, action_test, 1)
# orderFuture_test = avg_time_action(orderFuture_test, action_test, 2)
# orderFuture_test = avg_time_action(orderFuture_test, action_test, 3)
# orderFuture_test = avg_time_action(orderFuture_test, action_test, 4)
# orderFuture_test = avg_time_action(orderFuture_test, action_test, 5)
# orderFuture_test = avg_time_action(orderFuture_test, action_test, 6)
# orderFuture_test = avg_time_action(orderFuture_test, action_test, 7)
# orderFuture_test = avg_time_action(orderFuture_test, action_test, 8)
# orderFuture_test = avg_time_action(orderFuture_test, action_test, 9)
# orderFuture_train = avg_time_action(orderFuture_train, action_train, 1)
# orderFuture_train = avg_time_action(orderFuture_train, action_train, 2)
# orderFuture_train = avg_time_action(orderFuture_train, action_train, 3)
# orderFuture_train = avg_time_action(orderFuture_train, action_train, 4)
# orderFuture_train = avg_time_action(orderFuture_train, action_train, 5)
# orderFuture_train = avg_time_action(orderFuture_train, action_train, 6)
# orderFuture_train = avg_time_action(orderFuture_train, action_train, 7)
# orderFuture_train = avg_time_action(orderFuture_train, action_train, 8)
# orderFuture_train = avg_time_action(orderFuture_train, action_train, 9)
############# 4.time feature #############
"""
# 1. 季节特征
"""
orderFuture_train = season(orderFuture_train)
orderFuture_test = season(orderFuture_test)
# print(orderFuture_train)
# print(orderFuture_test)
print("开始提取:", start)
print("提取完成:", datetime.now())
orderFuture_train.to_csv(dire + 'train3.csv', index=False, encoding='utf-8')
orderFuture_test.to_csv(dire + 'test3.csv', index=False, encoding='utf-8') | 47.924107 | 249 | 0.69292 | # -*- encoding:utf-8 -*-
import pandas as pd
import numpy as np
from datetime import datetime
dire = '../../data/'
start = datetime.now()
orderHistory_train = pd.read_csv(dire + 'train/orderHistory_train.csv', encoding='utf-8')
orderFuture_train = pd.read_csv(dire + 'train/orderFuture_train3.csv', encoding='utf-8')
userProfile_train = pd.read_csv(dire + 'train/userProfile_train.csv', encoding='utf-8')
userComment_train = pd.read_csv(dire + 'train/userComment_train.csv', encoding='utf-8')
action_train = pd.read_csv(dire + 'train/action_train.csv', encoding='utf-8')
orderHistory_test = pd.read_csv(dire + 'test/orderHistory_test.csv', encoding='utf-8')
orderFuture_test = pd.read_csv(dire + 'test/orderFuture_test3.csv', encoding='utf-8')
userProfile_test = pd.read_csv(dire + 'test/userProfile_test.csv', encoding='utf-8')
userComment_test = pd.read_csv(dire + 'test/userComment_test.csv', encoding='utf-8')
action_test = pd.read_csv(dire + 'test/action_test.csv', encoding='utf-8')
# """
############# 1.user feature #############
"""
# 1. 用户地点划分1 2 3 线城市
"""
def province_123(userProfile, orderFuture):
province_1 = ['上海', '北京', '广东']
province_2 = ['福建', '重庆', '山东', '湖南', '陕西', '广西', '辽宁', '安徽', '河北', '重庆', '四川', '湖北', '江苏', '浙江', '天津']
province_3 = ['云南', '黑龙江', '河南', '江西', '贵州', '山西', '内蒙古', '甘肃', '新疆', '海南', '宁夏', '青海', '西藏']
userProfile['province_123'] = None
userProfile['province_123'][userProfile['province'].isin(province_1)] = 1
userProfile['province_123'][userProfile['province'].isin(province_2)] = 2
userProfile['province_123'][userProfile['province'].isin(province_3)] = 3
print(userProfile[['userid', 'province', 'province_123']])
order = pd.merge(orderFuture, userProfile[['userid', 'province_123']], on='userid', how='left')
return order
# # orderFuture_train = province_123(userProfile_train, orderFuture_train)
# # orderFuture_test = province_123(userProfile_test, orderFuture_test)
############# 2.history order feature #############
"""
# 1.
"""
# 历史纪录中城市的精品占比
def history_type1_rate(orderFuture, orderHistory):
all = len(orderHistory)
print("all:", all)
city_type1_rate = pd.DataFrame(columns=['city', 'city_rate'])
country_type1_rate = pd.DataFrame(columns=['country', 'country_rate'])
continent_type1_rate = pd.DataFrame(columns=['continent', 'continent_rate'])
city1 = []
country1 = []
continent1 = []
city_rate = []
country_rate = []
continent_rate = []
city_list = list(set(list(orderHistory.city)))
print(len(city_list))
country_list = list(set(list(orderHistory.country)))
continent_list = list(set(list(orderHistory.continent)))
for city in city_list:
city1.append(city)
city_rate.append((len(orderHistory[orderHistory['city'] == city])/all)*(len(orderHistory[(orderHistory['city'] == city) & (orderHistory['orderType'] == 1)])/len(orderHistory[orderHistory['city'] == city])))
for country in country_list:
country1.append(country)
country_rate.append((len(orderHistory[orderHistory['country'] == country])/all)*(len(orderHistory[(orderHistory['country'] == country) & (orderHistory['orderType'] == 1)])/len(orderHistory[orderHistory['country'] == country])))
for continent in continent_list:
continent1.append(continent)
continent_rate.append((len(orderHistory[orderHistory['continent'] == continent])/all)*(len(orderHistory[(orderHistory['continent'] == continent) & (orderHistory['orderType'] == 1)])/len(orderHistory[orderHistory['continent'] == continent])))
city_type1_rate['city'] = city1
city_type1_rate['city_rate'] = city_rate
country_type1_rate['country'] = country1
country_type1_rate['country_rate'] = country_rate
continent_type1_rate['continent'] = continent1
continent_type1_rate['continent_rate'] = continent_rate
orderHistory = pd.merge(orderHistory, city_type1_rate, on='city', how='left')
orderHistory = pd.merge(orderHistory, country_type1_rate, on='country', how='left')
orderHistory = pd.merge(orderHistory, continent_type1_rate, on='continent', how='left')
orderHistory = orderHistory.groupby(orderHistory.userid)['city_rate', 'country_rate', 'continent_rate'].mean().reset_index()
orderFuture = pd.merge(orderFuture, orderHistory[['userid', 'city_rate', 'country_rate', 'continent_rate']], on='userid', how='left')
return orderFuture
# orderFuture = pd.concat([orderFuture_train,orderFuture_test])
# orderHistory = pd.concat([orderHistory_train,orderHistory_test])
# dataset = history_type1_rate(orderFuture, orderHistory)
# orderFuture_train = dataset[dataset.orderType.notnull()]
# orderFuture_test = dataset[dataset.orderType.isnull()]
############# 3.action feature #############
"""
# 1. action中大于6出现的次数
# 2. 对应点击2-4的和值 与 5-9 的比值
# 3. 全部点击2-4的和值 与 5-9 的比值
# 4. 对应浏览记录 1-9 操作所用平均时间
# 5. 全部浏览记录 1-9 操作所用平均时间
# """
# action中大于6出现的次数
def greater_6_c(orderFuture):
action_7_c = orderFuture['action_7_c'].fillna(0)
action_8_c = orderFuture['action_8_c'].fillna(0)
action_9_c = orderFuture['action_9_c'].fillna(0)
orderFuture['action_greater_7_c'] = action_7_c + action_8_c + action_9_c
return orderFuture
orderFuture_train = greater_6_c(orderFuture_train)
orderFuture_test = greater_6_c(orderFuture_test)
# 对应点击2-4的和值 与 5-9 的比值
def rate_24_59_c(orderFuture):
action = orderFuture.fillna(0)
orderFuture['rate_1_59_c'] = (action['action_1_c'])/(action['action_5_c'] + action['action_6_c'] + action['action_7_c'] + action['action_8_c'] + action['action_9_c'])
orderFuture['rate_24_59_c'] = (action['action_2_c'] + action['action_3_c'] + action['action_4_c'])/(action['action_5_c'] + action['action_6_c'] + action['action_7_c'] + action['action_8_c'] + action['action_9_c'])
# orderFuture['rate_time_1_59_c'] = (action['time_1_c'])/(action['time_5_c'] + action['time_6_c'] + action['time_7_c'] + action['time_8_c'] + action['time_9_c'])
return orderFuture
orderFuture_train = rate_24_59_c(orderFuture_train)
orderFuture_test = rate_24_59_c(orderFuture_test)
# 全部点击2-4的和值 与 5-9 的比值
def rate_24_59(orderFuture):
action = orderFuture.fillna(0)
orderFuture['rate_1_59'] = (action['action_1'])/(action['action_5'] + action['action_6'] + action['action_7'] + action['action_8'] + action['action_9'])
orderFuture['rate_24_59'] = (action['action_2'] + action['action_3'] + action['action_4'])/(action['action_5'] + action['action_6'] + action['action_7'] + action['action_8'] + action['action_9'])
# orderFuture['rate_time_1_59'] = (action['time_1'])/(action['time_5'] + action['time_6'] + action['time_7'] + action['time_8'] + action['time_9'])
return orderFuture
orderFuture_train = rate_24_59(orderFuture_train)
orderFuture_test = rate_24_59(orderFuture_test)
# 全部action 最后一次 的类型
def latest_actionType(orderFuture, action):
latest = action.groupby(['userid']).last().reset_index()
latest.rename(columns={'actionType': 'latest_actionType'}, inplace=True)
orderFuture = pd.merge(orderFuture, latest[['userid', 'latest_actionType']], on='userid', how='left')
return orderFuture
orderFuture_train = latest_actionType(orderFuture_train, action_train)
orderFuture_test = latest_actionType(orderFuture_test, action_test)
# 全部 action 倒数第2-6次操作的类型
def latest2_actionType(orderFuture, action):
userid = []
latest_2_actionType = []
latest_3_actionType = []
latest_4_actionType = []
latest_5_actionType = []
latest_6_actionType = []
latest = action.groupby(['userid'])['actionTime'].idxmax().reset_index()
latest_2 = latest
for index, row in latest.iterrows():
userid.append(row.userid)
if(row.userid == action['userid'][row.actionTime-1]):
latest_2_actionType.append(action['actionType'][row.actionTime-1])
else:
latest_2_actionType.append(None)
if (row.userid == action['userid'][row.actionTime - 2]):
latest_3_actionType.append(action['actionType'][row.actionTime - 2])
else:
latest_3_actionType.append(None)
if (row.userid == action['userid'][row.actionTime - 3]):
latest_4_actionType.append(action['actionType'][row.actionTime - 3])
else:
latest_4_actionType.append(None)
if (row.userid == action['userid'][row.actionTime - 4]):
latest_5_actionType.append(action['actionType'][row.actionTime - 4])
else:
latest_5_actionType.append(None)
if (row.userid == action['userid'][row.actionTime - 5]):
latest_6_actionType.append(action['actionType'][row.actionTime - 5])
else:
latest_6_actionType.append(None)
latest_2['latest_2_actionType'] = latest_2_actionType
latest_2['latest_3_actionType'] = latest_3_actionType
latest_2['latest_4_actionType'] = latest_4_actionType
latest_2['latest_5_actionType'] = latest_5_actionType
latest_2['latest_6_actionType'] = latest_6_actionType
orderFuture = pd.merge(orderFuture, latest_2[['userid', 'latest_2_actionType', 'latest_3_actionType',
'latest_4_actionType', 'latest_5_actionType', 'latest_6_actionType']], on='userid', how='left')
return orderFuture
orderFuture_train = latest2_actionType(orderFuture_train, action_train)
orderFuture_test = latest2_actionType(orderFuture_test, action_test)
# 时间间隔
# 最后1 2 3 4 次操作的时间间隔
# 时间间隔的均值 最小值 最大值 方差
def time_interval(orderFuture, action):
# 1
latest = action.groupby(['userid']).last().reset_index()
latest.rename(columns={'actionType_time': 'latest_1_time_interval'}, inplace=True)
orderFuture = pd.merge(orderFuture, latest[['userid', 'latest_1_time_interval']], on='userid', how='left')
# 2 3 4 5 6
userid = []
latest_2_time_interval = []
latest_3_time_interval = []
latest_4_time_interval = []
latest_5_time_interval = []
latest = action.groupby(['userid'])['actionTime'].idxmax().reset_index()
latest.rename(columns={'actionTime': 'max_index'}, inplace=True)
latest_2 = latest
for index, row in latest.iterrows():
userid.append(row.userid)
# 2
if (row.userid == action['userid'][row.max_index - 1]):
latest_2_time_interval.append(action['actionType_time'][row.max_index - 1])
else:
latest_2_time_interval.append(None)
# 3
if (row.userid == action['userid'][row.max_index - 2]):
latest_3_time_interval.append(action['actionType_time'][row.max_index - 2])
else:
latest_3_time_interval.append(None)
# 4
if (row.userid == action['userid'][row.max_index - 3]):
latest_4_time_interval.append(action['actionType_time'][row.max_index - 3])
else:
latest_4_time_interval.append(None)
# 5
if (row.userid == action['userid'][row.max_index - 4]):
latest_5_time_interval.append(action['actionType_time'][row.max_index - 4])
else:
latest_5_time_interval.append(None)
latest_2['latest_2_time_interval'] = latest_2_time_interval
latest_2['latest_3_time_interval'] = latest_3_time_interval
latest_2['latest_4_time_interval'] = latest_4_time_interval
latest_2['latest_5_time_interval'] = latest_5_time_interval
orderFuture = pd.merge(orderFuture, latest_2[['userid', 'latest_2_time_interval', 'latest_3_time_interval',
'latest_4_time_interval', 'latest_5_time_interval']], on='userid', how='left')
# 均值
latest = action.groupby(['userid'])['actionType_time'].mean().reset_index()
latest.rename(columns={'actionType_time': 'actionType_time_mean'}, inplace=True)
orderFuture = pd.merge(orderFuture, latest[['userid', 'actionType_time_mean']], on='userid', how='left')
# 方差
latest = action.groupby(['userid'])['actionType_time'].agg({'actionType_time_var':'var'}).reset_index()
orderFuture = pd.merge(orderFuture, latest[['userid', 'actionType_time_var']], on='userid', how='left')
# 最小值
latest = action.groupby(['userid'])['actionType_time'].min().reset_index()
latest.rename(columns={'actionType_time': 'actionType_time_min'}, inplace=True)
orderFuture = pd.merge(orderFuture, latest[['userid', 'actionType_time_min']], on='userid', how='left')
return orderFuture
orderFuture_train = time_interval(orderFuture_train, action_train)
orderFuture_test = time_interval(orderFuture_test, action_test)
# action 最后4 5 6 次操作时间的方差 和 均值
def var_actionTime(orderFuture, action):
userid = []
latest_3_actionTime_var = []
latest_4_actionTime_var = []
latest_5_actionTime_var = []
latest = action.groupby(['userid'])['actionTime'].idxmax().reset_index()
latest_2 = latest
for index, row in latest.iterrows():
userid.append(row.userid)
# 2
if ((row.userid == action['userid'][row.actionTime]) and (row.userid == action['userid'][row.actionTime - 1]) and
(row.userid == action['userid'][row.actionTime - 2])):
var = pd.Series([action['actionTime'][row.actionTime]-action['actionTime'][row.actionTime-1],
action['actionTime'][row.actionTime-1]-action['actionTime'][row.actionTime-2]
]).var()
latest_3_actionTime_var.append(var)
else:
latest_3_actionTime_var.append(None)
# 3
if ((row.userid == action['userid'][row.actionTime]) and (row.userid == action['userid'][row.actionTime - 1]) and
(row.userid == action['userid'][row.actionTime - 2]) and (row.userid == action['userid'][row.actionTime - 3])):
var = pd.Series([action['actionTime'][row.actionTime]-action['actionTime'][row.actionTime-1],
action['actionTime'][row.actionTime-1]-action['actionTime'][row.actionTime-2],
action['actionTime'][row.actionTime-2]-action['actionTime'][row.actionTime-3]
]).var()
latest_4_actionTime_var.append(var)
else:
latest_4_actionTime_var.append(None)
# 4
if ((row.userid == action['userid'][row.actionTime]) and (row.userid == action['userid'][row.actionTime - 1]) and
(row.userid == action['userid'][row.actionTime - 2]) and (row.userid == action['userid'][row.actionTime - 3]) and
(row.userid == action['userid'][row.actionTime - 4])):
var = pd.Series([action['actionTime'][row.actionTime]-action['actionTime'][row.actionTime-1],
action['actionTime'][row.actionTime-1]-action['actionTime'][row.actionTime-2],
action['actionTime'][row.actionTime-2]-action['actionTime'][row.actionTime-3],
action['actionTime'][row.actionTime-3]-action['actionTime'][row.actionTime-4]
]).var()
latest_5_actionTime_var.append(var)
else:
latest_5_actionTime_var.append(None)
latest_2['latest_3_actionTime_var'] = latest_3_actionTime_var
latest_2['latest_4_actionTime_var'] = latest_4_actionTime_var
latest_2['latest_5_actionTime_var'] = latest_5_actionTime_var
orderFuture = pd.merge(orderFuture, latest_2[['userid', 'latest_3_actionTime_var', 'latest_4_actionTime_var',
'latest_5_actionTime_var']], on='userid', how='left')
return orderFuture
orderFuture_train = var_actionTime(orderFuture_train, action_train)
orderFuture_test = var_actionTime(orderFuture_test, action_test)
# 对应浏览记录浏览平均时间(可以改成最近几天的)
def sum_actionType_time(orderFuture, action):
action = action[action.orderid.isnull()]
action1 = action.groupby(['userid'])['actionType_time'].sum().reset_index()
action1.rename(columns={'actionType_time': 'actionType_time_sum'}, inplace=True)
action2 = action.groupby(['userid', 'action_date']).count().reset_index()
action3 = action2.groupby(['userid'])['action_date'].count().reset_index()
action3.rename(columns={'action_date': 'days_action'}, inplace=True)
action3 = pd.merge(action1, action3, on='userid', how='left')
print(action3)
action3['actionType_time_day_avg'] = action3['actionType_time_sum']/action3['days_action']
orderFuture = pd.merge(orderFuture, action3[['userid', 'actionType_time_day_avg']], on='userid', how='left')
return orderFuture
orderFuture_train = sum_actionType_time(orderFuture_train, action_train)
orderFuture_test = sum_actionType_time(orderFuture_test, action_test)
# 对应浏览记录 1-9 操作所用平均时间
def avg_time_action_c(orderFuture, action, k):
time_k = []
select = action[action.orderid.isnull()]
for index, row in orderFuture.iterrows():
print(index)
action_k = select[(select['actionType'] == k) & (select['userid'] == row.userid)]
if (len(action_k) == 0):
time = None
else:
time = 0
for index1, row1 in action_k.iterrows():
if(((index1 + 1) < len(action)) and (row1.userid == action['userid'][index1+1])):
time = time + (action['actionTime'][index1+1] - row1.actionTime)
time_k.append(time)
orderFuture['time_'+ str(k) +'_c'] = time_k
orderFuture['time_'+ str(k) +'_c'] = orderFuture['time_'+ str(k) +'_c']/orderFuture['action_'+ str(k) +'_c']
return orderFuture
# orderFuture_test = avg_time_action_c(orderFuture_test, action_test, 1)
# orderFuture_test = avg_time_action_c(orderFuture_test, action_test, 2)
# orderFuture_test = avg_time_action_c(orderFuture_test, action_test, 3)
# orderFuture_test = avg_time_action_c(orderFuture_test, action_test, 4)
# orderFuture_test = avg_time_action_c(orderFuture_test, action_test, 5)
# orderFuture_test = avg_time_action_c(orderFuture_test, action_test, 6)
# orderFuture_test = avg_time_action_c(orderFuture_test, action_test, 7)
# orderFuture_test = avg_time_action_c(orderFuture_test, action_test, 8)
# orderFuture_test = avg_time_action_c(orderFuture_test, action_test, 9)
# orderFuture_train = avg_time_action_c(orderFuture_train, action_train, 1)
# orderFuture_train = avg_time_action_c(orderFuture_train, action_train, 2)
# orderFuture_train = avg_time_action_c(orderFuture_train, action_train, 3)
# orderFuture_train = avg_time_action_c(orderFuture_train, action_train, 4)
# orderFuture_train = avg_time_action_c(orderFuture_train, action_train, 5)
# orderFuture_train = avg_time_action_c(orderFuture_train, action_train, 6)
# orderFuture_train = avg_time_action_c(orderFuture_train, action_train, 7)
# orderFuture_train = avg_time_action_c(orderFuture_train, action_train, 8)
# orderFuture_train = avg_time_action_c(orderFuture_train, action_train, 9)
# 全部浏览记录 1-9 操作所用平均时间
def avg_time_action(orderFuture, action, k):
time_k = []
for index, row in orderFuture.iterrows():
print(index)
action_k = action[(action['actionType'] == k) & (action['userid'] == row.userid)]
if (len(action_k) == 0):
time = None
else:
time = 0
for index1, row1 in action_k.iterrows():
if(((index1 + 1) < len(action)) and (row1.userid == action['userid'][index1+1])):
time = time + (action['actionTime'][index1+1] - row1.actionTime)
time_k.append(time)
orderFuture['time_'+ str(k)] = time_k
orderFuture['time_'+ str(k)] = orderFuture['time_'+ str(k)]/orderFuture['action_'+ str(k)]
return orderFuture
# orderFuture_test = avg_time_action(orderFuture_test, action_test, 1)
# orderFuture_test = avg_time_action(orderFuture_test, action_test, 2)
# orderFuture_test = avg_time_action(orderFuture_test, action_test, 3)
# orderFuture_test = avg_time_action(orderFuture_test, action_test, 4)
# orderFuture_test = avg_time_action(orderFuture_test, action_test, 5)
# orderFuture_test = avg_time_action(orderFuture_test, action_test, 6)
# orderFuture_test = avg_time_action(orderFuture_test, action_test, 7)
# orderFuture_test = avg_time_action(orderFuture_test, action_test, 8)
# orderFuture_test = avg_time_action(orderFuture_test, action_test, 9)
# orderFuture_train = avg_time_action(orderFuture_train, action_train, 1)
# orderFuture_train = avg_time_action(orderFuture_train, action_train, 2)
# orderFuture_train = avg_time_action(orderFuture_train, action_train, 3)
# orderFuture_train = avg_time_action(orderFuture_train, action_train, 4)
# orderFuture_train = avg_time_action(orderFuture_train, action_train, 5)
# orderFuture_train = avg_time_action(orderFuture_train, action_train, 6)
# orderFuture_train = avg_time_action(orderFuture_train, action_train, 7)
# orderFuture_train = avg_time_action(orderFuture_train, action_train, 8)
# orderFuture_train = avg_time_action(orderFuture_train, action_train, 9)
############# 4.time feature #############
"""
# 1. 季节特征
"""
def season(orderFuture):
orderFuture['season'] = 3
orderFuture['season'][orderFuture['future_month'] <= 3] = 1
orderFuture['season'][(orderFuture['future_month'] >= 4) & (orderFuture['future_month'] <= 6)] = 2
orderFuture['season'][orderFuture['future_month'] >= 10] = 4
return orderFuture
orderFuture_train = season(orderFuture_train)
orderFuture_test = season(orderFuture_test)
# print(orderFuture_train)
# print(orderFuture_test)
print("开始提取:", start)
print("提取完成:", datetime.now())
orderFuture_train.to_csv(dire + 'train3.csv', index=False, encoding='utf-8')
orderFuture_test.to_csv(dire + 'test3.csv', index=False, encoding='utf-8') | 15,191 | 0 | 286 |
4641f2457526cdb1e198f1d280179ee2937ae682 | 1,816 | py | Python | rl_traders/agents/core.py | jjakimoto/rl_traders.py | d5411c96d49ba6a54751d12cdd11974e5cc1a8aa | [
"MIT"
] | 2 | 2018-10-07T14:16:32.000Z | 2019-01-28T00:14:29.000Z | rl_traders/agents/core.py | jjakimoto/rl_traders.py | d5411c96d49ba6a54751d12cdd11974e5cc1a8aa | [
"MIT"
] | null | null | null | rl_traders/agents/core.py | jjakimoto/rl_traders.py | d5411c96d49ba6a54751d12cdd11974e5cc1a8aa | [
"MIT"
] | 1 | 2019-11-05T00:51:20.000Z | 2019-11-05T00:51:20.000Z | from abc import ABC, abstractmethod
from tqdm import tqdm, tqdm_notebook
from ..processors import Processor
class BaseAgent(ABC):
"""Abstract Agentclass
Parameters
----------
action_spec: dict
Have to define 'type' and 'shape'
state_spec: dict, optional
Have to define 'type' and 'shape'
is_debug: bool
If True, print out certain properties for debugging
Note
----
You need to define the followings:
_observe: observe and store
"""
@abstractmethod
@abstractmethod
| 29.770492 | 85 | 0.640419 | from abc import ABC, abstractmethod
from tqdm import tqdm, tqdm_notebook
from ..processors import Processor
class BaseAgent(ABC):
"""Abstract Agentclass
Parameters
----------
action_spec: dict
Have to define 'type' and 'shape'
state_spec: dict, optional
Have to define 'type' and 'shape'
is_debug: bool
If True, print out certain properties for debugging
Note
----
You need to define the followings:
_observe: observe and store
"""
def __init__(self, action_spec, state_spec=None,
processor=Processor,
is_debug=False, is_notebook=False, *args, **kwargs):
self.action_spec = action_spec
self.action_shape = self.action_spec["shape"]
self.state_spec = state_spec
self.processor = processor
self.is_debug = is_debug
if is_notebook:
self.pbar = tqdm_notebook()
else:
self.pbar = tqdm()
def observe(self, observation, action, reward, terminal, info, is_store):
observation, action, reward, terminal =\
self.processor(observation, action, reward, terminal)
self.pbar.update(1)
return self._observe(observation, action, reward, terminal, info, is_store)
@abstractmethod
def _observe(self, observation, action, reward, terminal, info, is_store):
raise NotImplementedError("Need to define '_observe' at a subclass of Agent")
def predict(self, observation, *args, **kwargs):
action = self._predict(observation, *args, **kwargs)
return action
@abstractmethod
def _predict(self, state, *args, **kwargs):
raise NotImplementedError("Need to define '_predict' at a subclass of Agent")
def fit(self, *args, **kwargs):
pass
| 1,101 | 0 | 160 |
537d5b87b40494c58dc7a9d373d1124e1379f5f0 | 385 | py | Python | timeblock/time.py | w158rk/timeblock | 5b6f6382bb29217127a2387d9efc1655d22fe3a1 | [
"MIT"
] | 1 | 2021-05-03T13:13:12.000Z | 2021-05-03T13:13:12.000Z | timeblock/time.py | w158rk/timeblock | 5b6f6382bb29217127a2387d9efc1655d22fe3a1 | [
"MIT"
] | null | null | null | timeblock/time.py | w158rk/timeblock | 5b6f6382bb29217127a2387d9efc1655d22fe3a1 | [
"MIT"
] | null | null | null | from datetime import datetime,timedelta
current:datetime = None | 21.388889 | 51 | 0.722078 | from datetime import datetime,timedelta
current:datetime = None
def set_current(d: datetime.date, t:datetime.time):
global current
current = datetime.combine(d, t)
def get_current_date():
return current.date()
def get_current_time():
return current.time()
def inc_current(units: int):
delta = timedelta(minutes=units*30)
global current
current += delta | 229 | 0 | 92 |
166799c5b0bc1df1500ef56cdabafd4e3af994d8 | 3,049 | py | Python | databases.py | Fy-Network/fysql | 9a5910601e9aa13479c9fbd05eb64e958e90dea2 | [
"MIT"
] | 1 | 2016-06-17T08:48:52.000Z | 2016-06-17T08:48:52.000Z | databases.py | Fy-/fysql | 9a5910601e9aa13479c9fbd05eb64e958e90dea2 | [
"MIT"
] | 1 | 2016-06-17T18:06:41.000Z | 2016-06-17T18:06:41.000Z | databases.py | Fy-Network/fysql | 9a5910601e9aa13479c9fbd05eb64e958e90dea2 | [
"MIT"
] | 2 | 2018-02-11T02:14:11.000Z | 2020-01-07T05:40:34.000Z | # -*- coding: utf-8 -*-
"""
fysql.databases
~~~~~~~~~~~~~~~
:copyright: (c) 2016 by Gasquez Florian
:license: MIT, see LICENSE for more details.
"""
from __future__ import unicode_literals
from collections import OrderedDict
from warnings import filterwarnings
import mysql.connector as mysql_connector
from .exceptions import FysqlException
from .tables import Tables
from .static import Tables
| 21.935252 | 70 | 0.702525 | # -*- coding: utf-8 -*-
"""
fysql.databases
~~~~~~~~~~~~~~~
:copyright: (c) 2016 by Gasquez Florian
:license: MIT, see LICENSE for more details.
"""
from __future__ import unicode_literals
from collections import OrderedDict
from warnings import filterwarnings
import mysql.connector as mysql_connector
from .exceptions import FysqlException
from .tables import Tables
from .static import Tables
class Database(object):
def __init__(self, database, **kwargs):
self.conn_kwargs = kwargs
self.closed = True
self._connection = False
self.database = database
for key, value in Tables.tables.items():
value._database = self
@property
def connection(self):
if self.closed:
self._connection = self._connect(self.database, **self.conn_kwargs)
self.closed = False
return self._connection
def create_all(self, ignore=[]):
for key, table in Tables.tables.items():
print(table)
if table not in ignore:
table.drop_table()
table.create_table()
def connect(self):
return self.connection
def close(self):
if not self.closed:
self._close()
self.closed = True
def _connect(self, database, **kwargs):
raise NotImplementedError
def _close(self):
raise NotImplementedError
def _escape_string(self, value):
raise NotImplementedError
def escape_string(self, value):
return self._escape_string(value)
def cursor(self):
if self.closed:
self._connection = self._connect(self.database, **self.conn_kwargs)
self.closed = False
return self._connection.cursor()
def insert_id(self, cursor):
return self._insert_id(cursor)
def commit(self):
self.connection.commit()
def rollback(self):
self.connection.rollback()
def raw(self, sql, commit=False):
return self.execute(sql, commit)
def execute(self, sql, commit=False):
cursor = self.cursor()
cursor.execute(sql)
if commit:
self.commit()
return cursor
class MySQLDatabase(Database):
def _connect(self, database, **kwargs):
conn_kwargs = {'charset': 'utf8mb4', 'use_unicode': True}
conn_kwargs.update(kwargs)
return mysql_connector.connect(
host=conn_kwargs['host'],
user=conn_kwargs['user'],
password=conn_kwargs['password'],
db=database,
charset=conn_kwargs['charset'],
)
def _close(self):
self.connection.close()
def _insert_id(self, cursor):
return cursor.lastrowid
def _row_count(self, cursor):
return cursor.rowcount
def _escape_string(self, value):
_escape_table = [chr(x) for x in range(128)]
_escape_table[0] = u'\\0'
_escape_table[ord('\\')] = u'\\\\'
_escape_table[ord('\n')] = u'\\n'
_escape_table[ord('\r')] = u'\\r'
_escape_table[ord('\032')] = u'\\Z'
_escape_table[ord('"')] = u'\\"'
_escape_table[ord("'")] = u"\\'"
def _escape_unicode(value):
"""escapes *value* without adding quote.
Value should be unicode
"""
return value.translate(_escape_table)
return _escape_unicode(value)
#return value.encode('raw_unicode_escape')
def get_tables(self):
return [row for row, in self.execute('SHOW TABLES')]
| 2,073 | 383 | 191 |
826249957a3d8af93fc3e00b2ec89ef6f61abd92 | 8,159 | py | Python | Selenium/Opencart_windows_operations/conftest.py | BahrmaLe/otus_python_homework | 510a4f1971b35048d760fcc45098e511b81bea31 | [
"MIT"
] | 1 | 2021-02-25T15:37:21.000Z | 2021-02-25T15:37:21.000Z | Selenium/Opencart_windows_operations/conftest.py | BahrmaLe/otus_python_homework | 510a4f1971b35048d760fcc45098e511b81bea31 | [
"MIT"
] | null | null | null | Selenium/Opencart_windows_operations/conftest.py | BahrmaLe/otus_python_homework | 510a4f1971b35048d760fcc45098e511b81bea31 | [
"MIT"
] | null | null | null | """Fixtures to testing Opencart login page"""
import os
import sys
import pytest
from selenium import webdriver as WD
from Selenium.Opencart_windows_operations.models.page_objects.page_objects import LoginPage, \
ProductPage, ProductsPage, ProductManager, DownloadPage, DownloadManager, CustomMenuDesignPage, CustomMenuDesigner
image = os.path.abspath('C:/Users/60064265/PycharmProjects/Homework/Selenium/Opencart_windows_operations/1.JPG')
def pytest_addoption(parser):
"""Setting base URL Openacart and parametrize command line options for select
browsers and set username or password """
parser.addoption("--address", action="store", default="http://192.168.56.103/opencart/",
help="Opencart web address")
parser.addoption("--address2", action="store", default="http://demo23.opencart.pro/",
help="Opencart web address")
parser.addoption("--browser", action="store", default="chrome", help="Browser name")
parser.addoption("--username", action="store", default="admin", help="User Name")
parser.addoption("--password", action="store", default="admin", help="User Password")
parser.addoption("--username2", action="store", default="demo", help="User Name")
parser.addoption("--password2", action="store", default="demo", help="User Password")
parser.addoption("--iwait", action="store", default="30000", help="Implicitly wait parameter")
parser.addoption("--pltimeout", action="store", default="1000", help="Page load timeout")
parser.addoption("--productname", action="store", default="New Product", help="Product Name")
parser.addoption("--keywords", action="store",
default="New Meta Tag Keyword",
help="Meta Tag Keyword")
parser.addoption("--modelname", action="store", default="New model", help="Model Name")
parser.addoption("--meta", action="store", default="New meta", help="Meta Tag Title")
parser.addoption("--dname", action="store", default="New File for Download", help="Download name")
parser.addoption("--filename", action="store", default="New File Name", help="File name")
parser.addoption("--maskname", action="store", default="New Mask", help="Mask Name")
@pytest.fixture(scope="session", autouse=True)
def driver(request):
"""Launching webdriver"""
browser_name = request.config.getoption("--browser")
print(browser_name)
if browser_name == 'firefox':
capabilities = WD.DesiredCapabilities.FIREFOX.copy()
capabilities['timeouts'] = {'implicit': 300000, 'pageLoad': 300000, 'script': 30000}
capabilities['loggingPrefs'] = {'browser': 'ALL', 'client': 'ALL', 'driver': 'ALL',
'performance': 'ALL', 'server': 'ALL'}
capabilities['unexpectedAlertBehaviour'] = 'accept'
profile = WD.FirefoxProfile()
profile.set_preference('app.update.auto', False)
profile.set_preference('app.update.enabled', False)
profile.accept_untrusted_certs = True
wd = WD.Firefox(firefox_profile=profile, capabilities=capabilities)
wd.maximize_window()
elif browser_name == 'chrome':
capabilities = WD.DesiredCapabilities.CHROME.copy()
capabilities['acceptSslCerts'] = True
capabilities['acceptInsecureCerts'] = True
capabilities['unexpectedAlertBehaviour'] = 'dismiss'
wd = WD.Chrome(desired_capabilities=capabilities)
wd.fullscreen_window()
else:
print('Unsupported browser!')
sys.exit(1)
wd.implicitly_wait((request.config.getoption("--iwait")))
wd.set_page_load_timeout((request.config.getoption("--pltimeout")))
implicitly_wait = request.config.getoption("--iwait")
page_load_timeout = request.config.getoption("--pltimeout")
print(implicitly_wait)
print(page_load_timeout)
yield wd
wd.quit()
@pytest.fixture(scope="function")
def open_store_page(driver, request):
"""Get base URL and attend admin link"""
return driver.get("".join([request.config.getoption("--address")]))
@pytest.fixture(scope="function")
def open_opencart_admin_url(driver, request):
"""Get base URL and attend admin link"""
url = 'admin/'
return driver.get("".join([request.config.getoption("--address2"), url]))
@pytest.fixture(scope="function")
def login_form_operator(driver, open_opencart_admin_url):
"""Use class from page objects module for managing elements on the page"""
return LoginPage(driver)
@pytest.fixture(scope="function")
def set_login_data(login_form_operator, request, driver):
"""Open admin login page and login in"""
login_form_operator.login(request.config.getoption("--username2"), request.config.getoption("--password2"))
@pytest.fixture(scope="function")
def products_page_opening(driver, open_opencart_admin_url):
"""Use class from page objects module for managing elements on the page"""
return ProductsPage(driver)
@pytest.fixture(scope="function")
def downloads_page_opening(driver, open_opencart_admin_url):
"""Use class from page objects module for managing elements on the page"""
return DownloadPage(driver)
@pytest.fixture()
def custom_menu_page_opening(driver, open_opencart_admin_url):
"""Use"""
return CustomMenuDesignPage(driver)
@pytest.fixture(scope="function")
def products_page_operator(driver, open_opencart_admin_url):
"""Use class from page objects module for managing elements on the page"""
return ProductPage(driver)
@pytest.fixture(scope="function")
def product_manager(driver, open_opencart_admin_url):
"""Use class from page objects module for managing elements on the page"""
return ProductManager(driver)
@pytest.fixture(scope="function")
def store_manager(driver, open_store_page):
"""Use class from page objects module for managing elements on the page"""
return ProductManager(driver)
@pytest.fixture(scope="function")
def downloads_manager(driver, open_opencart_admin_url):
"""Use class from page objects module for managing elements on the page"""
return DownloadManager(driver)
@pytest.fixture()
def custom_menu_designer(driver, open_opencart_admin_url):
"""USe"""
return CustomMenuDesigner(driver)
# @pytest.fixture(scope="function")
# def add_new_product(driver, set_login_data, products_page_opening, product_manager, request):
# product_manager.add_new_product(request.config.getoption("--productname"),
# request.config.getoption("--modelname"))
@pytest.fixture(scope='function')
def add_product_with_image(driver, set_login_data, products_page_opening, product_manager, request):
"""Adding new product"""
product_manager.add_new_product_with_image(request.config.getoption("--productname"),
request.config.getoption("--meta"),
request.config.getoption("--modelname"),
image)
@pytest.fixture(scope='function')
def find_product_image(driver, open_store_page, store_manager):
"""Find image"""
store_manager.find_product_image("MacBook Pro")
src = store_manager.get_image_link()
print(type(src))
print(src)
return src
@pytest.fixture(scope='function')
def upload_file(driver, set_login_data, downloads_page_opening, downloads_manager, request):
"""Upload file to Downloads page"""
downloads_manager.add_file(request.config.getoption("--dname"),
request.config.getoption("--filename"),
request.config.getoption("--maskname"),
image)
@pytest.fixture(scope='function')
def check_uploaded_file(driver, set_login_data, downloads_page_opening, downloads_manager):
"""Upload file to Downloads page"""
return downloads_manager.get_file_name()
@pytest.fixture(scope='function')
def drag_and_drop_custom(driver, set_login_data, custom_menu_page_opening, custom_menu_designer):
"""Return products list with names"""
return custom_menu_designer.drag_and_drop_menu()
| 42.717277 | 118 | 0.697022 | """Fixtures to testing Opencart login page"""
import os
import sys
import pytest
from selenium import webdriver as WD
from Selenium.Opencart_windows_operations.models.page_objects.page_objects import LoginPage, \
ProductPage, ProductsPage, ProductManager, DownloadPage, DownloadManager, CustomMenuDesignPage, CustomMenuDesigner
image = os.path.abspath('C:/Users/60064265/PycharmProjects/Homework/Selenium/Opencart_windows_operations/1.JPG')
def pytest_addoption(parser):
"""Setting base URL Openacart and parametrize command line options for select
browsers and set username or password """
parser.addoption("--address", action="store", default="http://192.168.56.103/opencart/",
help="Opencart web address")
parser.addoption("--address2", action="store", default="http://demo23.opencart.pro/",
help="Opencart web address")
parser.addoption("--browser", action="store", default="chrome", help="Browser name")
parser.addoption("--username", action="store", default="admin", help="User Name")
parser.addoption("--password", action="store", default="admin", help="User Password")
parser.addoption("--username2", action="store", default="demo", help="User Name")
parser.addoption("--password2", action="store", default="demo", help="User Password")
parser.addoption("--iwait", action="store", default="30000", help="Implicitly wait parameter")
parser.addoption("--pltimeout", action="store", default="1000", help="Page load timeout")
parser.addoption("--productname", action="store", default="New Product", help="Product Name")
parser.addoption("--keywords", action="store",
default="New Meta Tag Keyword",
help="Meta Tag Keyword")
parser.addoption("--modelname", action="store", default="New model", help="Model Name")
parser.addoption("--meta", action="store", default="New meta", help="Meta Tag Title")
parser.addoption("--dname", action="store", default="New File for Download", help="Download name")
parser.addoption("--filename", action="store", default="New File Name", help="File name")
parser.addoption("--maskname", action="store", default="New Mask", help="Mask Name")
@pytest.fixture(scope="session", autouse=True)
def driver(request):
"""Launching webdriver"""
browser_name = request.config.getoption("--browser")
print(browser_name)
if browser_name == 'firefox':
capabilities = WD.DesiredCapabilities.FIREFOX.copy()
capabilities['timeouts'] = {'implicit': 300000, 'pageLoad': 300000, 'script': 30000}
capabilities['loggingPrefs'] = {'browser': 'ALL', 'client': 'ALL', 'driver': 'ALL',
'performance': 'ALL', 'server': 'ALL'}
capabilities['unexpectedAlertBehaviour'] = 'accept'
profile = WD.FirefoxProfile()
profile.set_preference('app.update.auto', False)
profile.set_preference('app.update.enabled', False)
profile.accept_untrusted_certs = True
wd = WD.Firefox(firefox_profile=profile, capabilities=capabilities)
wd.maximize_window()
elif browser_name == 'chrome':
capabilities = WD.DesiredCapabilities.CHROME.copy()
capabilities['acceptSslCerts'] = True
capabilities['acceptInsecureCerts'] = True
capabilities['unexpectedAlertBehaviour'] = 'dismiss'
wd = WD.Chrome(desired_capabilities=capabilities)
wd.fullscreen_window()
else:
print('Unsupported browser!')
sys.exit(1)
wd.implicitly_wait((request.config.getoption("--iwait")))
wd.set_page_load_timeout((request.config.getoption("--pltimeout")))
implicitly_wait = request.config.getoption("--iwait")
page_load_timeout = request.config.getoption("--pltimeout")
print(implicitly_wait)
print(page_load_timeout)
yield wd
wd.quit()
@pytest.fixture(scope="function")
def open_store_page(driver, request):
"""Get base URL and attend admin link"""
return driver.get("".join([request.config.getoption("--address")]))
@pytest.fixture(scope="function")
def open_opencart_admin_url(driver, request):
"""Get base URL and attend admin link"""
url = 'admin/'
return driver.get("".join([request.config.getoption("--address2"), url]))
@pytest.fixture(scope="function")
def login_form_operator(driver, open_opencart_admin_url):
"""Use class from page objects module for managing elements on the page"""
return LoginPage(driver)
@pytest.fixture(scope="function")
def set_login_data(login_form_operator, request, driver):
"""Open admin login page and login in"""
login_form_operator.login(request.config.getoption("--username2"), request.config.getoption("--password2"))
@pytest.fixture(scope="function")
def products_page_opening(driver, open_opencart_admin_url):
"""Use class from page objects module for managing elements on the page"""
return ProductsPage(driver)
@pytest.fixture(scope="function")
def downloads_page_opening(driver, open_opencart_admin_url):
"""Use class from page objects module for managing elements on the page"""
return DownloadPage(driver)
@pytest.fixture()
def custom_menu_page_opening(driver, open_opencart_admin_url):
"""Use"""
return CustomMenuDesignPage(driver)
@pytest.fixture(scope="function")
def products_page_operator(driver, open_opencart_admin_url):
"""Use class from page objects module for managing elements on the page"""
return ProductPage(driver)
@pytest.fixture(scope="function")
def product_manager(driver, open_opencart_admin_url):
"""Use class from page objects module for managing elements on the page"""
return ProductManager(driver)
@pytest.fixture(scope="function")
def store_manager(driver, open_store_page):
"""Use class from page objects module for managing elements on the page"""
return ProductManager(driver)
@pytest.fixture(scope="function")
def downloads_manager(driver, open_opencart_admin_url):
"""Use class from page objects module for managing elements on the page"""
return DownloadManager(driver)
@pytest.fixture()
def custom_menu_designer(driver, open_opencart_admin_url):
"""USe"""
return CustomMenuDesigner(driver)
# @pytest.fixture(scope="function")
# def add_new_product(driver, set_login_data, products_page_opening, product_manager, request):
# product_manager.add_new_product(request.config.getoption("--productname"),
# request.config.getoption("--modelname"))
@pytest.fixture(scope='function')
def add_product_with_image(driver, set_login_data, products_page_opening, product_manager, request):
"""Adding new product"""
product_manager.add_new_product_with_image(request.config.getoption("--productname"),
request.config.getoption("--meta"),
request.config.getoption("--modelname"),
image)
@pytest.fixture(scope='function')
def find_product_image(driver, open_store_page, store_manager):
"""Find image"""
store_manager.find_product_image("MacBook Pro")
src = store_manager.get_image_link()
print(type(src))
print(src)
return src
@pytest.fixture(scope='function')
def upload_file(driver, set_login_data, downloads_page_opening, downloads_manager, request):
"""Upload file to Downloads page"""
downloads_manager.add_file(request.config.getoption("--dname"),
request.config.getoption("--filename"),
request.config.getoption("--maskname"),
image)
@pytest.fixture(scope='function')
def check_uploaded_file(driver, set_login_data, downloads_page_opening, downloads_manager):
"""Upload file to Downloads page"""
return downloads_manager.get_file_name()
@pytest.fixture(scope='function')
def drag_and_drop_custom(driver, set_login_data, custom_menu_page_opening, custom_menu_designer):
"""Return products list with names"""
return custom_menu_designer.drag_and_drop_menu()
| 0 | 0 | 0 |
051e4d78d4085682cbb0206e37e6ac2c0da1a589 | 1,646 | py | Python | cynpy/i2c.py | rayjhuang/cynpy | 1bf4476691f561322f6b2bfb5b2bef36f2e5e04b | [
"MIT"
] | null | null | null | cynpy/i2c.py | rayjhuang/cynpy | 1bf4476691f561322f6b2bfb5b2bef36f2e5e04b | [
"MIT"
] | null | null | null | cynpy/i2c.py | rayjhuang/cynpy | 1bf4476691f561322f6b2bfb5b2bef36f2e5e04b | [
"MIT"
] | null | null | null |
TRUE = 1 # ACK, YES
FALSE = 0 # NAK, NO
class i2c (object): # for polymorphism
'''
i2c class hierarchy
-------------------
i2c
\
aardvark_i2c
/ (aardv.py)
aardvark
'''
def choose_master (rpt=FALSE):
'''
TO CONSIDER FOLLOWING SCENARIOS
-------------------------------
1. use AARDARK in a non-Windows system
'''
from aardv import aardvark_i2c as aa_i2c
num = aa_i2c().enum (rpt)
return aa_i2c(0) # i2cmst
if __name__ == '__main__':
i2cmst = choose_master (rpt=TRUE)
from basic import *
if not no_argument ():
# if i2cmst!=0:
if sys.argv[1]=='probe' : print i2cmst.probe ()
elif sys.argv[1]=='baud' : print i2cmst.baud (argv_dec[2])
elif sys.argv[1]=='write' : print i2cmst.i2cw (argv_hex[2:])[1]
elif sys.argv[1]=='read' : print ['0x%02X' % xx for xx in i2cmst.read (argv_hex[2], argv_hex[3], argv_hex[4])]
else: print "command not recognized"
# else: print "I2C master not found"
| 27.433333 | 123 | 0.53706 |
TRUE = 1 # ACK, YES
FALSE = 0 # NAK, NO
class i2c (object): # for polymorphism
'''
i2c class hierarchy
-------------------
i2c
\
aardvark_i2c
/ (aardv.py)
aardvark
'''
def enum (me): raise NotImplementedError()
def baud (me, ask): raise NotImplementedError()
def i2cw (me, wdat): raise NotImplementedError()
def read (me, dev, adr, rcnt, rpt=FALSE): raise NotImplementedError()
def write (me, dev, adr, wdat): # SMB write
return me.i2cw ([dev,adr]+wdat)[1]
def probe (me):
print 'Searching I2C slave.....'
hit = []
for dev in range(0x80):
if me.i2cw ([dev]):
print 'device 0x%02x found' % (dev)
hit += [dev]
return hit
def choose_master (rpt=FALSE):
'''
TO CONSIDER FOLLOWING SCENARIOS
-------------------------------
1. use AARDARK in a non-Windows system
'''
from aardv import aardvark_i2c as aa_i2c
num = aa_i2c().enum (rpt)
return aa_i2c(0) # i2cmst
if __name__ == '__main__':
i2cmst = choose_master (rpt=TRUE)
from basic import *
if not no_argument ():
# if i2cmst!=0:
if sys.argv[1]=='probe' : print i2cmst.probe ()
elif sys.argv[1]=='baud' : print i2cmst.baud (argv_dec[2])
elif sys.argv[1]=='write' : print i2cmst.i2cw (argv_hex[2:])[1]
elif sys.argv[1]=='read' : print ['0x%02X' % xx for xx in i2cmst.read (argv_hex[2], argv_hex[3], argv_hex[4])]
else: print "command not recognized"
# else: print "I2C master not found"
| 403 | 0 | 158 |
622d3cc99f73955ecb3d5b33a08bb2260c6a5623 | 1,465 | py | Python | Calibration - Nozzle Response/callibrate_servo_tringle.py | technosap/FlyVR_TL_2E343 | bd21acf22cca8403b93862ccbcc348750f997923 | [
"MIT"
] | null | null | null | Calibration - Nozzle Response/callibrate_servo_tringle.py | technosap/FlyVR_TL_2E343 | bd21acf22cca8403b93862ccbcc348750f997923 | [
"MIT"
] | null | null | null | Calibration - Nozzle Response/callibrate_servo_tringle.py | technosap/FlyVR_TL_2E343 | bd21acf22cca8403b93862ccbcc348750f997923 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Tue Jun 4 16:35:53 2019
@author: technosap
"""
import ctypes
import time
import math
from modular_client import ModularClient # for nozzle control
if __name__ == "__main__":
main()
| 29.897959 | 112 | 0.470307 | # -*- coding: utf-8 -*-
"""
Created on Tue Jun 4 16:35:53 2019
@author: technosap
"""
import ctypes
import time
import math
from modular_client import ModularClient # for nozzle control
def main():
dev = ModularClient(port='COM10') # Windows specific port
# dev.get_device_id()
# dev.get_methods()
dev.velocity_max('setValue',[500]) # about 0.25 s per turn (1536/6000)
dev.acceleration_max('setValue',[500]) # 6/8 s to accerelate to max
dev.move_to(0,180-32)
time.sleep(5)
a_s = [15,30,60,120,240]
p_s = [20,50,100,200,500]
n_rep = 5
for a in a_s:
for p in p_s:
for r in range(n_rep):
start = time.time()
heading = [2*a/p*abs((i%p)-(p/2)) - a/2 + 180 for i in range(0,int(10000))]
hs = open('log_a_'+str(a)+"_p_"+str(p)+"_r_"+str(r)+'.csv','a')
i = 0
try:
while time.time()-start < 40:
dev.move_to(0,heading[i]-32)
t = time.time()-start
print(a,p,r,t,i)
i+=1
hs.write(str(t)+","+str(heading[i]-32)+","+str(dev.get_positions()[0]%360)+"\n")
except KeyboardInterrupt:
dev.close()
del dev
hs.close()
if __name__ == "__main__":
main()
| 1,212 | 0 | 23 |
c7e316a6cc5964fa9b30106676cb6a6534f08812 | 5,430 | py | Python | wikitablescrape.py | feedthebeat90/topos-challenge | 56842be8ffb33bed428762cc06f002f983457a4d | [
"MIT"
] | null | null | null | wikitablescrape.py | feedthebeat90/topos-challenge | 56842be8ffb33bed428762cc06f002f983457a4d | [
"MIT"
] | 1 | 2021-03-31T19:17:31.000Z | 2021-03-31T19:17:31.000Z | wikitablescrape.py | feedthebeat90/topos-challenge | 56842be8ffb33bed428762cc06f002f983457a4d | [
"MIT"
] | null | null | null | """Create CSVs from all tables on a Wikipedia article."""
import csv
import os
from bs4 import BeautifulSoup
import requests
from pprint import pprint
def scrape(url, output_name):
"""Create CSVs from all tables in a Wikipedia article.
ARGS:
url (str): The full URL of the Wikipedia article to scrape tables from.
output_name (str): The base file name (without filepath) to write to.
"""
# Read tables from Wikipedia article into list of HTML strings
resp = requests.get(url)
soup = BeautifulSoup(resp.content, "lxml")
table_classes = {"class": ["sortable", "plainrowheaders"]}
wikitables = soup.findAll("table", table_classes)
# Create folder for output if it doesn't exist
os.makedirs(output_name, exist_ok=True)
for index, table in enumerate(wikitables):
# Make a unique file name for each CSV
if index == 0:
filename = output_name
else:
filename = output_name + "_" + str(index)
filepath = os.path.join(output_name, filename) + ".csv"
with open(filepath, mode="w", newline="", encoding="utf-8") as output:
csv_writer = csv.writer(output, quoting=csv.QUOTE_ALL, lineterminator="\n")
write_html_table_to_csv(table, csv_writer)
def first_row_check(header):
"""This function checks whether there is a column in header row
spanning multiple subcolumns under the same category.
If there is such a column this function will reapeat the header element
for each one of the corresponding subcolumns.
ARGS:
header (bs4.Tag): The bs4 Tag object being analyzed.
RETURNS:
cells (list): List of elements from bs4.ResultSet.
"""
cells = []
for elem in header.findAll(["th", "td"]):
if elem.has_attr("colspan"):
span = int(elem["colspan"])
counter = 0
while counter < span:
#pprint(vars(elem))
cells.append(elem)
counter += 1
else:
cells.append(elem)
return cells
def write_html_table_to_csv(table, writer):
"""Write HTML table from Wikipedia to a CSV file.
ARGS:
table (bs4.Tag): The bs4 Tag object being analyzed.
writer (csv.writer): The csv Writer object creating the output.
"""
# Hold elements that span multiple rows in a list of
# dictionaries that track 'rows_left' and 'value'
first_row = True
saved_rowspans = []
for row in table.findAll("tr"):
if first_row:
cells = first_row_check(row)
first_row = False
else:
cells = row.findAll(["th", "td"])
# If the first row, use it to define width of table
if len(saved_rowspans) == 0:
saved_rowspans = [None for _ in cells]
# Insert values from cells that span into this row
elif len(cells) != len(saved_rowspans):
for index, rowspan_data in enumerate(saved_rowspans):
if rowspan_data is not None:
# Insert the data from previous row; decrement rows left
value = rowspan_data["value"]
cells.insert(index, value)
if saved_rowspans[index]["rows_left"] == 1:
saved_rowspans[index] = None
else:
saved_rowspans[index]["rows_left"] -= 1
# If an element with rowspan, save it for future cells
for index, cell in enumerate(cells):
if cell.has_attr("rowspan"):
rowspan_data = {"rows_left": int(cell["rowspan"]), "value": cell}
saved_rowspans[index] = rowspan_data
if cells:
# Clean the data of references and unusual whitespace
cleaned = clean_data(cells)
# Fill the row with empty columns if some are missing
# (Some HTML tables leave final empty cells without a <td> tag)
columns_missing = len(saved_rowspans) - len(cleaned)
if columns_missing:
cleaned += [None] * columns_missing
writer.writerow(cleaned)
def clean_data(row):
"""Clean table row list from Wikipedia into a string for CSV.
ARGS:
row (bs4.ResultSet): The bs4 result set being cleaned for output.
RETURNS:
cleaned_cells (list[str]): List of cleaned text items in this row.
"""
cleaned_cells = []
for cell in row:
# Strip references from the cell
references = cell.findAll("sup", {"class": "reference"})
if references:
for ref in references:
ref.extract()
# Strip sortkeys from the cell
sortkeys = cell.findAll("span", {"class": "sortkey"})
if sortkeys:
for ref in sortkeys:
ref.extract()
# Strip footnotes from text and join into a single string
text_items = cell.findAll(text=True)
no_footnotes = [text for text in text_items if text[0] != "["]
cleaned = (
"".join(no_footnotes) # Combine elements into single string
.replace("\xa0", " ") # Replace non-breaking spaces
.replace("\n", " ") # Replace newlines
.strip()
)
cleaned_cells += [cleaned]
return cleaned_cells
| 32.710843 | 87 | 0.588214 | """Create CSVs from all tables on a Wikipedia article."""
import csv
import os
from bs4 import BeautifulSoup
import requests
from pprint import pprint
def scrape(url, output_name):
"""Create CSVs from all tables in a Wikipedia article.
ARGS:
url (str): The full URL of the Wikipedia article to scrape tables from.
output_name (str): The base file name (without filepath) to write to.
"""
# Read tables from Wikipedia article into list of HTML strings
resp = requests.get(url)
soup = BeautifulSoup(resp.content, "lxml")
table_classes = {"class": ["sortable", "plainrowheaders"]}
wikitables = soup.findAll("table", table_classes)
# Create folder for output if it doesn't exist
os.makedirs(output_name, exist_ok=True)
for index, table in enumerate(wikitables):
# Make a unique file name for each CSV
if index == 0:
filename = output_name
else:
filename = output_name + "_" + str(index)
filepath = os.path.join(output_name, filename) + ".csv"
with open(filepath, mode="w", newline="", encoding="utf-8") as output:
csv_writer = csv.writer(output, quoting=csv.QUOTE_ALL, lineterminator="\n")
write_html_table_to_csv(table, csv_writer)
def first_row_check(header):
"""This function checks whether there is a column in header row
spanning multiple subcolumns under the same category.
If there is such a column this function will reapeat the header element
for each one of the corresponding subcolumns.
ARGS:
header (bs4.Tag): The bs4 Tag object being analyzed.
RETURNS:
cells (list): List of elements from bs4.ResultSet.
"""
cells = []
for elem in header.findAll(["th", "td"]):
if elem.has_attr("colspan"):
span = int(elem["colspan"])
counter = 0
while counter < span:
#pprint(vars(elem))
cells.append(elem)
counter += 1
else:
cells.append(elem)
return cells
def write_html_table_to_csv(table, writer):
"""Write HTML table from Wikipedia to a CSV file.
ARGS:
table (bs4.Tag): The bs4 Tag object being analyzed.
writer (csv.writer): The csv Writer object creating the output.
"""
# Hold elements that span multiple rows in a list of
# dictionaries that track 'rows_left' and 'value'
first_row = True
saved_rowspans = []
for row in table.findAll("tr"):
if first_row:
cells = first_row_check(row)
first_row = False
else:
cells = row.findAll(["th", "td"])
# If the first row, use it to define width of table
if len(saved_rowspans) == 0:
saved_rowspans = [None for _ in cells]
# Insert values from cells that span into this row
elif len(cells) != len(saved_rowspans):
for index, rowspan_data in enumerate(saved_rowspans):
if rowspan_data is not None:
# Insert the data from previous row; decrement rows left
value = rowspan_data["value"]
cells.insert(index, value)
if saved_rowspans[index]["rows_left"] == 1:
saved_rowspans[index] = None
else:
saved_rowspans[index]["rows_left"] -= 1
# If an element with rowspan, save it for future cells
for index, cell in enumerate(cells):
if cell.has_attr("rowspan"):
rowspan_data = {"rows_left": int(cell["rowspan"]), "value": cell}
saved_rowspans[index] = rowspan_data
if cells:
# Clean the data of references and unusual whitespace
cleaned = clean_data(cells)
# Fill the row with empty columns if some are missing
# (Some HTML tables leave final empty cells without a <td> tag)
columns_missing = len(saved_rowspans) - len(cleaned)
if columns_missing:
cleaned += [None] * columns_missing
writer.writerow(cleaned)
def clean_data(row):
"""Clean table row list from Wikipedia into a string for CSV.
ARGS:
row (bs4.ResultSet): The bs4 result set being cleaned for output.
RETURNS:
cleaned_cells (list[str]): List of cleaned text items in this row.
"""
cleaned_cells = []
for cell in row:
# Strip references from the cell
references = cell.findAll("sup", {"class": "reference"})
if references:
for ref in references:
ref.extract()
# Strip sortkeys from the cell
sortkeys = cell.findAll("span", {"class": "sortkey"})
if sortkeys:
for ref in sortkeys:
ref.extract()
# Strip footnotes from text and join into a single string
text_items = cell.findAll(text=True)
no_footnotes = [text for text in text_items if text[0] != "["]
cleaned = (
"".join(no_footnotes) # Combine elements into single string
.replace("\xa0", " ") # Replace non-breaking spaces
.replace("\n", " ") # Replace newlines
.strip()
)
cleaned_cells += [cleaned]
return cleaned_cells
| 0 | 0 | 0 |
d3ea40ac70a6361d94b605bce607160d63e360fa | 26,387 | py | Python | evillimiter/menus/main_menu.py | mostafa-yasen/evillimiter | 46d2033b022f4a51fb2f419393a5344ad3edea4a | [
"MIT"
] | 1,096 | 2019-03-27T23:21:34.000Z | 2022-03-27T12:33:21.000Z | evillimiter/menus/main_menu.py | mostafa-yasen/evillimiter | 46d2033b022f4a51fb2f419393a5344ad3edea4a | [
"MIT"
] | 116 | 2019-04-10T17:19:47.000Z | 2022-03-20T19:27:25.000Z | evillimiter/menus/main_menu.py | mostafa-yasen/evillimiter | 46d2033b022f4a51fb2f419393a5344ad3edea4a | [
"MIT"
] | 286 | 2019-03-27T23:21:52.000Z | 2022-03-25T19:02:00.000Z | import time
import socket
import curses
import netaddr
import threading
import collections
from terminaltables import SingleTable
import evillimiter.networking.utils as netutils
from .menu import CommandMenu
from evillimiter.networking.utils import BitRate
from evillimiter.console.io import IO
from evillimiter.console.chart import BarChart
from evillimiter.console.banner import get_main_banner
from evillimiter.networking.host import Host
from evillimiter.networking.limit import Limiter, Direction
from evillimiter.networking.spoof import ARPSpoofer
from evillimiter.networking.scan import HostScanner
from evillimiter.networking.monitor import BandwidthMonitor
from evillimiter.networking.watch import HostWatcher
| 36.801953 | 180 | 0.549134 | import time
import socket
import curses
import netaddr
import threading
import collections
from terminaltables import SingleTable
import evillimiter.networking.utils as netutils
from .menu import CommandMenu
from evillimiter.networking.utils import BitRate
from evillimiter.console.io import IO
from evillimiter.console.chart import BarChart
from evillimiter.console.banner import get_main_banner
from evillimiter.networking.host import Host
from evillimiter.networking.limit import Limiter, Direction
from evillimiter.networking.spoof import ARPSpoofer
from evillimiter.networking.scan import HostScanner
from evillimiter.networking.monitor import BandwidthMonitor
from evillimiter.networking.watch import HostWatcher
class MainMenu(CommandMenu):
def __init__(self, version, interface, gateway_ip, gateway_mac, netmask):
super().__init__()
self.prompt = '({}Main{}) >>> '.format(IO.Style.BRIGHT, IO.Style.RESET_ALL)
self.parser.add_subparser('clear', self._clear_handler)
hosts_parser = self.parser.add_subparser('hosts', self._hosts_handler)
hosts_parser.add_flag('--force', 'force')
scan_parser = self.parser.add_subparser('scan', self._scan_handler)
scan_parser.add_parameterized_flag('--range', 'iprange')
limit_parser = self.parser.add_subparser('limit', self._limit_handler)
limit_parser.add_parameter('id')
limit_parser.add_parameter('rate')
limit_parser.add_flag('--upload', 'upload')
limit_parser.add_flag('--download', 'download')
block_parser = self.parser.add_subparser('block', self._block_handler)
block_parser.add_parameter('id')
block_parser.add_flag('--upload', 'upload')
block_parser.add_flag('--download', 'download')
free_parser = self.parser.add_subparser('free', self._free_handler)
free_parser.add_parameter('id')
add_parser = self.parser.add_subparser('add', self._add_handler)
add_parser.add_parameter('ip')
add_parser.add_parameterized_flag('--mac', 'mac')
monitor_parser = self.parser.add_subparser('monitor', self._monitor_handler)
monitor_parser.add_parameterized_flag('--interval', 'interval')
analyze_parser = self.parser.add_subparser('analyze', self._analyze_handler)
analyze_parser.add_parameter('id')
analyze_parser.add_parameterized_flag('--duration', 'duration')
watch_parser = self.parser.add_subparser('watch', self._watch_handler)
watch_add_parser = watch_parser.add_subparser('add', self._watch_add_handler)
watch_add_parser.add_parameter('id')
watch_remove_parser = watch_parser.add_subparser('remove', self._watch_remove_handler)
watch_remove_parser.add_parameter('id')
watch_set_parser = watch_parser.add_subparser('set', self._watch_set_handler)
watch_set_parser.add_parameter('attribute')
watch_set_parser.add_parameter('value')
self.parser.add_subparser('help', self._help_handler)
self.parser.add_subparser('?', self._help_handler)
self.parser.add_subparser('quit', self._quit_handler)
self.parser.add_subparser('exit', self._quit_handler)
self.version = version # application version
self.interface = interface # specified IPv4 interface
self.gateway_ip = gateway_ip
self.gateway_mac = gateway_mac
self.netmask = netmask
# range of IP address calculated from gateway IP and netmask
self.iprange = list(netaddr.IPNetwork('{}/{}'.format(self.gateway_ip, self.netmask)))
self.host_scanner = HostScanner(self.interface, self.iprange)
self.arp_spoofer = ARPSpoofer(self.interface, self.gateway_ip, self.gateway_mac)
self.limiter = Limiter(self.interface)
self.bandwidth_monitor = BandwidthMonitor(self.interface, 1)
self.host_watcher = HostWatcher(self.host_scanner, self._reconnect_callback)
# holds discovered hosts
self.hosts = []
self.hosts_lock = threading.Lock()
self._print_help_reminder()
# start the spoof thread
self.arp_spoofer.start()
# start the bandwidth monitor thread
self.bandwidth_monitor.start()
# start the host watch thread
self.host_watcher.start()
def interrupt_handler(self, ctrl_c=True):
if ctrl_c:
IO.spacer()
IO.ok('cleaning up... stand by...')
self.arp_spoofer.stop()
self.bandwidth_monitor.stop()
for host in self.hosts:
self._free_host(host)
def _scan_handler(self, args):
"""
Handles 'scan' command-line argument
(Re)scans for hosts on the network
"""
if args.iprange:
iprange = self._parse_iprange(args.iprange)
if iprange is None:
IO.error('invalid ip range.')
return
else:
iprange = None
with self.hosts_lock:
for host in self.hosts:
self._free_host(host)
IO.spacer()
hosts = self.host_scanner.scan(iprange)
self.hosts_lock.acquire()
self.hosts = hosts
self.hosts_lock.release()
IO.ok('{}{}{} hosts discovered.'.format(IO.Fore.LIGHTYELLOW_EX, len(hosts), IO.Style.RESET_ALL))
IO.spacer()
def _hosts_handler(self, args):
"""
Handles 'hosts' command-line argument
Displays discovered hosts
"""
table_data = [[
'{}ID{}'.format(IO.Style.BRIGHT, IO.Style.RESET_ALL),
'{}IP address{}'.format(IO.Style.BRIGHT, IO.Style.RESET_ALL),
'{}MAC address{}'.format(IO.Style.BRIGHT, IO.Style.RESET_ALL),
'{}Hostname{}'.format(IO.Style.BRIGHT, IO.Style.RESET_ALL),
'{}Status{}'.format(IO.Style.BRIGHT, IO.Style.RESET_ALL)
]]
with self.hosts_lock:
for host in self.hosts:
table_data.append([
'{}{}{}'.format(IO.Fore.LIGHTYELLOW_EX, self._get_host_id(host, lock=False), IO.Style.RESET_ALL),
host.ip,
host.mac,
host.name,
host.pretty_status()
])
table = SingleTable(table_data, 'Hosts')
if not args.force and not table.ok:
IO.error('table does not fit terminal. resize or decrease font size. you can also force the display (--force).')
return
IO.spacer()
IO.print(table.table)
IO.spacer()
def _limit_handler(self, args):
"""
Handles 'limit' command-line argument
Limits bandwith of host to specified rate
"""
hosts = self._get_hosts_by_ids(args.id)
if hosts is None or len(hosts) == 0:
return
try:
rate = BitRate.from_rate_string(args.rate)
except Exception:
IO.error('limit rate is invalid.')
return
direction = self._parse_direction_args(args)
for host in hosts:
self.arp_spoofer.add(host)
self.limiter.limit(host, direction, rate)
self.bandwidth_monitor.add(host)
IO.ok('{}{}{r} {} {}limited{r} to {}.'.format(IO.Fore.LIGHTYELLOW_EX, host.ip, Direction.pretty_direction(direction), IO.Fore.LIGHTRED_EX, rate, r=IO.Style.RESET_ALL))
def _block_handler(self, args):
"""
Handles 'block' command-line argument
Blocks internet communication for host
"""
hosts = self._get_hosts_by_ids(args.id)
direction = self._parse_direction_args(args)
if hosts is not None and len(hosts) > 0:
for host in hosts:
if not host.spoofed:
self.arp_spoofer.add(host)
self.limiter.block(host, direction)
self.bandwidth_monitor.add(host)
IO.ok('{}{}{r} {} {}blocked{r}.'.format(IO.Fore.LIGHTYELLOW_EX, host.ip, Direction.pretty_direction(direction), IO.Fore.RED, r=IO.Style.RESET_ALL))
def _free_handler(self, args):
"""
Handles 'free' command-line argument
Frees the host from all limitations
"""
hosts = self._get_hosts_by_ids(args.id)
if hosts is not None and len(hosts) > 0:
for host in hosts:
self._free_host(host)
def _add_handler(self, args):
"""
Handles 'add' command-line argument
Adds custom host to host list
"""
ip = args.ip
if not netutils.validate_ip_address(ip):
IO.error('invalid ip address.')
return
if args.mac:
mac = args.mac
if not netutils.validate_mac_address(mac):
IO.error('invalid mac address.')
return
else:
mac = netutils.get_mac_by_ip(self.interface, ip)
if mac is None:
IO.error('unable to resolve mac address. specify manually (--mac).')
return
name = None
try:
host_info = socket.gethostbyaddr(ip)
name = None if host_info is None else host_info[0]
except socket.herror:
pass
host = Host(ip, mac, name)
with self.hosts_lock:
if host in self.hosts:
IO.error('host does already exist.')
return
self.hosts.append(host)
IO.ok('host added.')
def _monitor_handler(self, args):
"""
Handles 'monitor' command-line argument
Monitors hosts bandwidth usage
"""
def get_bandwidth_results():
with self.hosts_lock:
return [x for x in [(y, self.bandwidth_monitor.get(y)) for y in self.hosts] if x[1] is not None]
def display(stdscr, interval):
host_results = get_bandwidth_results()
hname_max_len = max([len(x[0].name) for x in host_results])
header_off = [
('ID', 5), ('IP address', 18), ('Hostname', hname_max_len + 2),
('Current (per s)', 20), ('Total', 16), ('Packets', 0)
]
y_rst = 1
x_rst = 2
while True:
y_off = y_rst
x_off = x_rst
stdscr.clear()
for header in header_off:
stdscr.addstr(y_off, x_off, header[0])
x_off += header[1]
y_off += 2
x_off = x_rst
for host, result in host_results:
result_data = [
str(self._get_host_id(host)),
host.ip,
host.name,
'{}↑ {}↓'.format(result.upload_rate, result.download_rate),
'{}↑ {}↓'.format(result.upload_total_size, result.download_total_size),
'{}↑ {}↓'.format(result.upload_total_count, result.download_total_count)
]
for j, string in enumerate(result_data):
stdscr.addstr(y_off, x_off, string)
x_off += header_off[j][1]
y_off += 1
x_off = x_rst
y_off += 2
stdscr.addstr(y_off, x_off, 'press \'ctrl+c\' to exit.')
try:
stdscr.refresh()
time.sleep(interval)
host_results = get_bandwidth_results()
except KeyboardInterrupt:
return
interval = 0.5 # in s
if args.interval:
if not args.interval.isdigit():
IO.error('invalid interval.')
return
interval = int(args.interval) / 1000 # from ms to s
if len(get_bandwidth_results()) == 0:
IO.error('no hosts to be monitored.')
return
try:
curses.wrapper(display, interval)
except curses.error:
IO.error('monitor error occurred. maybe terminal too small?')
def _analyze_handler(self, args):
hosts = self._get_hosts_by_ids(args.id)
if hosts is None or len(hosts) == 0:
IO.error('no hosts to be analyzed.')
return
duration = 30 # in s
if args.duration:
if not args.duration.isdigit():
IO.error('invalid duration.')
return
duration = int(args.duration)
hosts_to_be_freed = set()
host_values = {}
for host in hosts:
if not host.spoofed:
hosts_to_be_freed.add(host)
self.arp_spoofer.add(host)
self.bandwidth_monitor.add(host)
host_result = self.bandwidth_monitor.get(host)
host_values[host] = {}
host_values[host]['prev'] = (host_result.upload_total_size, host_result.download_total_size)
IO.ok('analyzing traffic for {}s.'.format(duration))
time.sleep(duration)
error_occurred = False
for host in hosts:
host_result = self.bandwidth_monitor.get(host)
if host_result is None:
# host reconnected during analysis
IO.error('host reconnected during analysis.')
error_occurred = True
else:
host_values[host]['current'] = (host_result.upload_total_size, host_result.download_total_size)
IO.ok('cleaning up...')
for host in hosts_to_be_freed:
self._free_host(host)
if error_occurred:
return
upload_chart = BarChart(max_bar_length=29)
download_chart = BarChart(max_bar_length=29)
for host in hosts:
upload_value = host_values[host]['current'][0] - host_values[host]['prev'][0]
download_value = host_values[host]['current'][1] - host_values[host]['prev'][1]
prefix = '{}{}{} ({}, {})'.format(
IO.Fore.LIGHTYELLOW_EX, self._get_host_id(host), IO.Style.RESET_ALL,
host.ip,
host.name
)
upload_chart.add_value(upload_value.value, prefix, upload_value)
download_chart.add_value(download_value.value, prefix, download_value)
upload_table = SingleTable([[upload_chart.get()]], 'Upload')
download_table = SingleTable([[download_chart.get()]], 'Download')
upload_table.inner_heading_row_border = False
download_table.inner_heading_row_border = False
IO.spacer()
IO.print(upload_table.table)
IO.print(download_table.table)
IO.spacer()
def _watch_handler(self, args):
if len(args) == 0:
watch_table_data = [[
'{}ID{}'.format(IO.Style.BRIGHT, IO.Style.RESET_ALL),
'{}IP address{}'.format(IO.Style.BRIGHT, IO.Style.RESET_ALL),
'{}MAC address{}'.format(IO.Style.BRIGHT, IO.Style.RESET_ALL)
]]
set_table_data = [[
'{}Attribute{}'.format(IO.Style.BRIGHT, IO.Style.RESET_ALL),
'{}Value{}'.format(IO.Style.BRIGHT, IO.Style.RESET_ALL)
]]
hist_table_data = [[
'{}ID{}'.format(IO.Style.BRIGHT, IO.Style.RESET_ALL),
'{}Old IP address{}'.format(IO.Style.BRIGHT, IO.Style.RESET_ALL),
'{}New IP address{}'.format(IO.Style.BRIGHT, IO.Style.RESET_ALL),
'{}Time{}'.format(IO.Style.BRIGHT, IO.Style.RESET_ALL)
]]
iprange = self.host_watcher.iprange
interval = self.host_watcher.interval
set_table_data.append([
'{}range{}'.format(IO.Fore.LIGHTYELLOW_EX, IO.Style.RESET_ALL),
'{} addresses'.format(len(iprange)) if iprange is not None else 'default'
])
set_table_data.append([
'{}interval{}'.format(IO.Fore.LIGHTYELLOW_EX, IO.Style.RESET_ALL),
'{}s'.format(interval)
])
for host in self.host_watcher.hosts:
watch_table_data.append([
'{}{}{}'.format(IO.Fore.LIGHTYELLOW_EX, self._get_host_id(host), IO.Style.RESET_ALL),
host.ip,
host.mac
])
for recon in self.host_watcher.log_list:
hist_table_data.append([
recon['old'].mac,
recon['old'].ip,
recon['new'].ip,
recon['time']
])
watch_table = SingleTable(watch_table_data, "Watchlist")
set_table = SingleTable(set_table_data, "Settings")
hist_table = SingleTable(hist_table_data, 'Reconnection History')
IO.spacer()
IO.print(watch_table.table)
IO.spacer()
IO.print(set_table.table)
IO.spacer()
IO.print(hist_table.table)
IO.spacer()
def _watch_add_handler(self, args):
"""
Handles 'watch add' command-line argument
Adds host to the reconnection watch list
"""
hosts = self._get_hosts_by_ids(args.id)
if hosts is None or len(hosts) == 0:
return
for host in hosts:
self.host_watcher.add(host)
def _watch_remove_handler(self, args):
"""
Handles 'watch remove' command-line argument
Removes host from the reconnection watch list
"""
hosts = self._get_hosts_by_ids(args.id)
if hosts is None or len(hosts) == 0:
return
for host in hosts:
self.host_watcher.remove(host)
def _watch_set_handler(self, args):
"""
Handles 'watch set' command-line argument
Modifies settings of the reconnection reconnection watcher
"""
if args.attribute.lower() in ('range', 'iprange', 'ip_range'):
iprange = self._parse_iprange(args.value)
if iprange is not None:
self.host_watcher.iprange = iprange
else:
IO.error('invalid ip range.')
elif args.attribute.lower() in ('interval'):
if args.value.isdigit():
self.host_watcher.interval = int(args.value)
else:
IO.error('invalid interval.')
else:
IO.error('{}{}{} is an invalid settings attribute.'.format(IO.Fore.LIGHTYELLOW_EX, args.attribute, IO.Style.RESET_ALL))
def _reconnect_callback(self, old_host, new_host):
"""
Callback that is called when a watched host reconnects
Method will run in a separate thread
"""
with self.hosts_lock:
if old_host in self.hosts:
self.hosts[self.hosts.index(old_host)] = new_host
else:
return
self.arp_spoofer.remove(old_host, restore=False)
self.arp_spoofer.add(new_host)
self.host_watcher.remove(old_host)
self.host_watcher.add(new_host)
self.limiter.replace(old_host, new_host)
self.bandwidth_monitor.replace(old_host, new_host)
def _clear_handler(self, args):
"""
Handler for the 'clear' command-line argument
Clears the terminal window and re-prints the banner
"""
IO.clear()
IO.print(get_main_banner(self.version))
self._print_help_reminder()
def _help_handler(self, args):
"""
Handles 'help' command-line argument
Prints help message including commands and usage
"""
spaces = ' ' * 35
IO.print(
"""
{y}scan (--range [IP range]){r}{}scans for online hosts on your network.
{s}required to find the hosts you want to limit.
{b}{s}e.g.: scan
{s} scan --range 192.168.178.1-192.168.178.50
{s} scan --range 192.168.178.1/24{r}
{y}hosts (--force){r}{}lists all scanned hosts.
{s}contains host information, including IDs.
{y}limit [ID1,ID2,...] [rate]{r}{}limits bandwith of host(s) (uload/dload).
{y} (--upload) (--download){r}{}{b}e.g.: limit 4 100kbit
{s} limit 2,3,4 1gbit --download
{s} limit all 200kbit --upload{r}
{y}block [ID1,ID2,...]{r}{}blocks internet access of host(s).
{y} (--upload) (--download){r}{}{b}e.g.: block 3,2
{s} block all --upload{r}
{y}free [ID1,ID2,...]{r}{}unlimits/unblocks host(s).
{b}{s}e.g.: free 3
{s} free all{r}
{y}add [IP] (--mac [MAC]){r}{}adds custom host to host list.
{s}mac resolved automatically.
{b}{s}e.g.: add 192.168.178.24
{s} add 192.168.1.50 --mac 1c:fc:bc:2d:a6:37{r}
{y}monitor (--interval [time in ms]){r}{}monitors bandwidth usage of limited host(s).
{b}{s}e.g.: monitor --interval 600{r}
{y}analyze [ID1,ID2,...]{r}{}analyzes traffic of host(s) without limiting
{y} (--duration [time in s]){r}{}to determine who uses how much bandwidth.
{b}{s}e.g.: analyze 2,3 --duration 120{r}
{y}watch{r}{}detects host reconnects with different IP.
{y}watch add [ID1,ID2,...]{r}{}adds host to the reconnection watchlist.
{b}{s}e.g.: watch add 3,4{r}
{y}watch remove [ID1,ID2,...]{r}{}removes host from the reconnection watchlist.
{b}{s}e.g.: watch remove all{r}
{y}watch set [attr] [value]{r}{}changes reconnect watch settings.
{b}{s}e.g.: watch set interval 120{r}
{y}clear{r}{}clears the terminal window.
{y}quit{r}{}quits the application.
""".format(
spaces[len('scan (--range [IP range])'):],
spaces[len('hosts (--force)'):],
spaces[len('limit [ID1,ID2,...] [rate]'):],
spaces[len(' (--upload) (--download)'):],
spaces[len('block [ID1,ID2,...]'):],
spaces[len(' (--upload) (--download)'):],
spaces[len('free [ID1,ID2,...]'):],
spaces[len('add [IP] (--mac [MAC])'):],
spaces[len('monitor (--interval [time in ms])'):],
spaces[len('analyze [ID1,ID2,...]'):],
spaces[len(' (--duration [time in s])'):],
spaces[len('watch'):],
spaces[len('watch add [ID1,ID2,...]'):],
spaces[len('watch remove [ID1,ID2,...]'):],
spaces[len('watch set [attr] [value]'):],
spaces[len('clear'):],
spaces[len('quit'):],
y=IO.Fore.LIGHTYELLOW_EX, r=IO.Style.RESET_ALL, b=IO.Style.BRIGHT,
s=spaces
)
)
def _quit_handler(self, args):
self.interrupt_handler(False)
self.stop()
def _get_host_id(self, host, lock=True):
ret = None
if lock:
self.hosts_lock.acquire()
for i, host_ in enumerate(self.hosts):
if host_ == host:
ret = i
break
if lock:
self.hosts_lock.release()
return ret
def _print_help_reminder(self):
IO.print('type {Y}help{R} or {Y}?{R} to show command information.'.format(Y=IO.Fore.LIGHTYELLOW_EX, R=IO.Style.RESET_ALL))
def _get_hosts_by_ids(self, ids_string):
if ids_string == 'all':
with self.hosts_lock:
return self.hosts.copy()
ids = ids_string.split(',')
hosts = set()
with self.hosts_lock:
for id_ in ids:
is_mac = netutils.validate_mac_address(id_)
is_ip = netutils.validate_ip_address(id_)
is_id_ = id_.isdigit()
if not is_mac and not is_ip and not is_id_:
IO.error('invalid identifier(s): \'{}\'.'.format(ids_string))
return
if is_mac or is_ip:
found = False
for host in self.hosts:
if host.mac == id_.lower() or host.ip == id_:
found = True
hosts.add(host)
break
if not found:
IO.error('no host matching {}{}{}.'.format(IO.Fore.LIGHTYELLOW_EX, id_, IO.Style.RESET_ALL))
return
else:
id_ = int(id_)
if len(self.hosts) == 0 or id_ not in range(len(self.hosts)):
IO.error('no host with id {}{}{}.'.format(IO.Fore.LIGHTYELLOW_EX, id_, IO.Style.RESET_ALL))
return
hosts.add(self.hosts[id_])
return hosts
def _parse_direction_args(self, args):
direction = Direction.NONE
if args.upload:
direction |= Direction.OUTGOING
if args.download:
direction |= Direction.INCOMING
return Direction.BOTH if direction == Direction.NONE else direction
def _parse_iprange(self, range):
try:
if '-' in range:
return list(netaddr.iter_iprange(*range.split('-')))
else:
return list(netaddr.IPNetwork(range))
except netaddr.core.AddrFormatError:
return
def _free_host(self, host):
"""
Stops ARP spoofing and unlimits host
"""
if host.spoofed:
self.arp_spoofer.remove(host)
self.limiter.unlimit(host, Direction.BOTH)
self.bandwidth_monitor.remove(host)
self.host_watcher.remove(host)
| 13,418 | 12,214 | 25 |
84324bd99a44d53d1f1b47653ba819853cafe98f | 5,235 | py | Python | Pyrlang/gen.py | s2hc-johan/Pyrlang | cd77f44b06677d313b241078282e4cb2bd7bd7a1 | [
"Apache-2.0"
] | null | null | null | Pyrlang/gen.py | s2hc-johan/Pyrlang | cd77f44b06677d313b241078282e4cb2bd7bd7a1 | [
"Apache-2.0"
] | null | null | null | Pyrlang/gen.py | s2hc-johan/Pyrlang | cd77f44b06677d313b241078282e4cb2bd7bd7a1 | [
"Apache-2.0"
] | null | null | null | """ A helper module to assist with gen:call-style message parsing and replying.
A generic incoming message looks like ``{$gen_call, {From, Ref}, Message}``.
"""
from Pyrlang import Term
class GenBase:
""" Base class for Gen messages, do not use directly. See
``GenIncomingMessage`` and ``GenIncomingCall``.
"""
def reply(self, local_pid, result):
""" Reply with a gen:call result
"""
from Pyrlang.node import Node
Node.singleton.send(sender=local_pid,
receiver=self.sender_,
message=(self.ref_, result))
def reply_exit(self, local_pid, reason):
""" Reply to remote gen:call with EXIT message which causes reason to be
re-raised as exit() on the caller side
NOTE: The gen:call caller attempts to monitor the target first. If
the monitor attempt fails, the exit here won't work
"""
from Pyrlang.node import Node
reply = ('monitor_p_exit', local_pid, self.sender_, self.ref_, reason)
Node.singleton.dist_command(receiver_node=self.sender_.node_.text_,
message=reply)
class GenIncomingMessage(GenBase):
""" A helper class which contains elements from a generic incoming
``gen_server`` message.
For those situations when gen message is not a call, or is an incoming
``gen_server`` call.
"""
class GenIncomingCall(GenBase):
""" A helper class which contains elements from the incoming
``gen:call`` RPC call message.
"""
def get_args(self):
""" Returns parsed args for the RPC call. """
if isinstance(self.args_, list): return self.args_
return self.args_.elements_
def get_mod_str(self):
""" Returns module name as a string. """
return self.mod_.text_
def get_fun_str(self):
""" Returns function name as a string. """
return self.fun_.text_
def parse_gen_call(msg):
""" Determine if msg is a gen:call message
:param msg: An Erlang tuple hopefully starting with a '$gen_call'
:return: str with error if msg wasn't a call message, otherwise
constructs and returns a ``GenIncomingCall`` object.
"""
# Incoming {$gen_call, {From, Ref}, {call, Mod, Fun, Args}}
if type(msg) != tuple: # ignore all non-tuple messages
return "Only {tuple} messages allowed"
# ignore tuples with non-atom 1st, ignore non-gen_call mesages
if not isinstance(msg[0], Term.Atom) or msg[0].text_ != '$gen_call':
return "Only {$gen_call, _, _} messages allowed"
(_, _sender_mref, _call_mfa_gl) = msg
(msender, mref) = _sender_mref
if len(_call_mfa_gl) != 5:
return "Expecting a 5-tuple (with a 'call' atom)"
# TODO: Maybe also check first element to be an atom 'call'
# A gen_call call tuple has 5 elements, otherwise see below
(call, m, f, args, group_leader) = _call_mfa_gl
if not isinstance(m, Term.Atom):
return "Module must be an atom: %s" % str(m)
if not isinstance(f, Term.Atom):
return "Function must be an atom: %s" % str(f)
return GenIncomingCall(mod=m,
fun=f,
args=args,
group_leader=group_leader,
sender=msender, # pid of the sender
ref=mref # reference used in response
)
def parse_gen_message(msg):
""" Might be an 'is_auth' request which is not a call
:return: string on error, otherwise a ``GenIncomingMessage`` object
"""
# Incoming {$gen_call, {From, Ref}, Message}
if type(msg) != tuple: # ignore all non-tuple messages
return "Only {tuple} messages allowed"
# ignore tuples with non-atom 1st, ignore non-gen_call mesages
if not isinstance(msg[0], Term.Atom) or msg[0].text_ != '$gen_call':
return "Only {$gen_call, _, _} messages allowed"
(_, _sender_mref, gcmsg) = msg
(msender, mref) = _sender_mref
return GenIncomingMessage(sender=msender,
ref=mref,
message=gcmsg)
__all__ = ['GenIncomingCall', 'GenIncomingMessage',
'parse_gen_call', 'parse_gen_message']
| 35.856164 | 80 | 0.600191 | """ A helper module to assist with gen:call-style message parsing and replying.
A generic incoming message looks like ``{$gen_call, {From, Ref}, Message}``.
"""
from Pyrlang import Term
class GenBase:
""" Base class for Gen messages, do not use directly. See
``GenIncomingMessage`` and ``GenIncomingCall``.
"""
def __init__(self, sender, ref):
self.sender_ = sender
""" Where to send replies to. """
self.ref_ = ref
""" An unique ref generated by the caller.
A ``term.Reference`` object.
"""
def reply(self, local_pid, result):
""" Reply with a gen:call result
"""
from Pyrlang.node import Node
Node.singleton.send(sender=local_pid,
receiver=self.sender_,
message=(self.ref_, result))
def reply_exit(self, local_pid, reason):
""" Reply to remote gen:call with EXIT message which causes reason to be
re-raised as exit() on the caller side
NOTE: The gen:call caller attempts to monitor the target first. If
the monitor attempt fails, the exit here won't work
"""
from Pyrlang.node import Node
reply = ('monitor_p_exit', local_pid, self.sender_, self.ref_, reason)
Node.singleton.dist_command(receiver_node=self.sender_.node_.text_,
message=reply)
class GenIncomingMessage(GenBase):
""" A helper class which contains elements from a generic incoming
``gen_server`` message.
For those situations when gen message is not a call, or is an incoming
``gen_server`` call.
"""
def __init__(self, sender, ref, message):
GenBase.__init__(self, sender=sender, ref=ref)
self.message_ = message
""" The last part of the incoming message, the payload. """
class GenIncomingCall(GenBase):
""" A helper class which contains elements from the incoming
``gen:call`` RPC call message.
"""
def __init__(self, mod, fun, args, group_leader, sender, ref):
GenBase.__init__(self, sender=sender, ref=ref)
self.mod_ = mod
""" Module name as atom. """
self.fun_ = fun
""" Function name as atom. """
self.args_ = args
""" Call arguments as a ``term.List`` object. """
self.group_leader_ = group_leader
""" Remote group leader pid, comes in as a part of message. """
def get_args(self):
""" Returns parsed args for the RPC call. """
if isinstance(self.args_, list): return self.args_
return self.args_.elements_
def get_mod_str(self):
""" Returns module name as a string. """
return self.mod_.text_
def get_fun_str(self):
""" Returns function name as a string. """
return self.fun_.text_
def parse_gen_call(msg):
""" Determine if msg is a gen:call message
:param msg: An Erlang tuple hopefully starting with a '$gen_call'
:return: str with error if msg wasn't a call message, otherwise
constructs and returns a ``GenIncomingCall`` object.
"""
# Incoming {$gen_call, {From, Ref}, {call, Mod, Fun, Args}}
if type(msg) != tuple: # ignore all non-tuple messages
return "Only {tuple} messages allowed"
# ignore tuples with non-atom 1st, ignore non-gen_call mesages
if not isinstance(msg[0], Term.Atom) or msg[0].text_ != '$gen_call':
return "Only {$gen_call, _, _} messages allowed"
(_, _sender_mref, _call_mfa_gl) = msg
(msender, mref) = _sender_mref
if len(_call_mfa_gl) != 5:
return "Expecting a 5-tuple (with a 'call' atom)"
# TODO: Maybe also check first element to be an atom 'call'
# A gen_call call tuple has 5 elements, otherwise see below
(call, m, f, args, group_leader) = _call_mfa_gl
if not isinstance(m, Term.Atom):
return "Module must be an atom: %s" % str(m)
if not isinstance(f, Term.Atom):
return "Function must be an atom: %s" % str(f)
return GenIncomingCall(mod=m,
fun=f,
args=args,
group_leader=group_leader,
sender=msender, # pid of the sender
ref=mref # reference used in response
)
def parse_gen_message(msg):
""" Might be an 'is_auth' request which is not a call
:return: string on error, otherwise a ``GenIncomingMessage`` object
"""
# Incoming {$gen_call, {From, Ref}, Message}
if type(msg) != tuple: # ignore all non-tuple messages
return "Only {tuple} messages allowed"
# ignore tuples with non-atom 1st, ignore non-gen_call mesages
if not isinstance(msg[0], Term.Atom) or msg[0].text_ != '$gen_call':
return "Only {$gen_call, _, _} messages allowed"
(_, _sender_mref, gcmsg) = msg
(msender, mref) = _sender_mref
return GenIncomingMessage(sender=msender,
ref=mref,
message=gcmsg)
__all__ = ['GenIncomingCall', 'GenIncomingMessage',
'parse_gen_call', 'parse_gen_message']
| 806 | 0 | 79 |
8f463170aadd58297b706c73c8ba914d8390c5e9 | 14,908 | py | Python | src/sage/geometry/relative_interior.py | UCD4IDS/sage | 43474c96d533fd396fe29fe0782d44dc7f5164f7 | [
"BSL-1.0"
] | 1,742 | 2015-01-04T07:06:13.000Z | 2022-03-30T11:32:52.000Z | src/sage/geometry/relative_interior.py | UCD4IDS/sage | 43474c96d533fd396fe29fe0782d44dc7f5164f7 | [
"BSL-1.0"
] | 66 | 2015-03-19T19:17:24.000Z | 2022-03-16T11:59:30.000Z | src/sage/geometry/relative_interior.py | UCD4IDS/sage | 43474c96d533fd396fe29fe0782d44dc7f5164f7 | [
"BSL-1.0"
] | 495 | 2015-01-10T10:23:18.000Z | 2022-03-24T22:06:11.000Z | r"""
Relative Interiors of Polyhedra and Cones
"""
# ****************************************************************************
# Copyright (C) 2021 Matthias Koeppe
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# https://www.gnu.org/licenses/
# ****************************************************************************
from sage.geometry.convex_set import ConvexSet_relatively_open
class RelativeInterior(ConvexSet_relatively_open):
r"""
The relative interior of a polyhedron or cone
This class should not be used directly. Use methods
:meth:`~sage.geometry.polyhedron.Polyhedron_base.relative_interior`,
:meth:`~sage.geometry.polyhedron.Polyhedron_base.interior`,
:meth:`~sage.geometry.cone.ConvexRationalPolyhedralCone.relative_interior`,
:meth:`~sage.geometry.cone.ConvexRationalPolyhedralCone.interior` instead.
EXAMPLES::
sage: segment = Polyhedron([[1, 2], [3, 4]])
sage: segment.relative_interior()
Relative interior of
a 1-dimensional polyhedron in ZZ^2 defined as the convex hull of 2 vertices
sage: octant = Cone([(1,0,0), (0,1,0), (0,0,1)])
sage: octant.relative_interior()
Relative interior of 3-d cone in 3-d lattice N
"""
def __init__(self, polyhedron):
r"""
Initialize ``self``.
INPUT:
- ``polyhedron`` - an instance of :class:`Polyhedron_base` or
:class:`ConvexRationalPolyhedralCone`.
TESTS::
sage: P = Polyhedron([[1, 2], [3, 4]])
sage: from sage.geometry.relative_interior import RelativeInterior
sage: TestSuite(RelativeInterior(P)).run()
"""
self._polyhedron = polyhedron
if hasattr(polyhedron, "is_mutable") and polyhedron.is_mutable():
if hasattr(polyhedron, "_add_dependent_object"):
polyhedron._add_dependent_object(self)
def __hash__(self):
r"""
TESTS::
sage: P = Polyhedron([[1, 2], [3, 4]])
sage: Q = Polyhedron([[3, 4], [1, 2]])
sage: hash(P.relative_interior()) == hash(Q.relative_interior())
True
"""
return hash(self._polyhedron) ^ 1789
def __contains__(self, point):
r"""
Return whether ``self`` contains ``point``.
EXAMPLES::
sage: octant = Cone([(1,0,0), (0,1,0), (0,0,1)])
sage: ri_octant = octant.relative_interior(); ri_octant
Relative interior of 3-d cone in 3-d lattice N
sage: (1, 1, 1) in ri_octant
True
sage: (1, 0, 0) in ri_octant
False
"""
return self._polyhedron.relative_interior_contains(point)
def ambient(self):
r"""
Return the ambient convex set or space.
EXAMPLES::
sage: segment = Polyhedron([[1, 2], [3, 4]])
sage: ri_segment = segment.relative_interior(); ri_segment
Relative interior of
a 1-dimensional polyhedron in ZZ^2 defined as the convex hull of 2 vertices
sage: ri_segment.ambient()
Vector space of dimension 2 over Rational Field
"""
return self._polyhedron.ambient()
def ambient_vector_space(self, base_field=None):
r"""
Return the ambient vector space.
EXAMPLES::
sage: segment = Polyhedron([[1, 2], [3, 4]])
sage: ri_segment = segment.relative_interior(); ri_segment
Relative interior of
a 1-dimensional polyhedron in ZZ^2 defined as the convex hull of 2 vertices
sage: ri_segment.ambient_vector_space()
Vector space of dimension 2 over Rational Field
"""
return self._polyhedron.ambient_vector_space(base_field=base_field)
def ambient_dim(self):
r"""
Return the dimension of the ambient space.
EXAMPLES::
sage: segment = Polyhedron([[1, 2], [3, 4]])
sage: segment.ambient_dim()
2
sage: ri_segment = segment.relative_interior(); ri_segment
Relative interior of
a 1-dimensional polyhedron in ZZ^2 defined as the convex hull of 2 vertices
sage: ri_segment.ambient_dim()
2
"""
return self._polyhedron.ambient_dim()
def an_affine_basis(self):
r"""
Return points that form an affine basis for the affine hull.
The points are guaranteed to lie in the topological closure of ``self``.
EXAMPLES::
sage: segment = Polyhedron([[1, 0], [0, 1]])
sage: segment.relative_interior().an_affine_basis()
[A vertex at (1, 0), A vertex at (0, 1)]
"""
return self._polyhedron.an_affine_basis()
def dim(self):
r"""
Return the dimension of ``self``.
EXAMPLES::
sage: segment = Polyhedron([[1, 2], [3, 4]])
sage: segment.dim()
1
sage: ri_segment = segment.relative_interior(); ri_segment
Relative interior of
a 1-dimensional polyhedron in ZZ^2 defined as the convex hull of 2 vertices
sage: ri_segment.dim()
1
"""
return self._polyhedron.dim()
def interior(self):
r"""
Return the interior of ``self``.
EXAMPLES::
sage: segment = Polyhedron([[1, 2], [3, 4]])
sage: ri_segment = segment.relative_interior(); ri_segment
Relative interior of
a 1-dimensional polyhedron in ZZ^2 defined as the convex hull of 2 vertices
sage: ri_segment.interior()
The empty polyhedron in ZZ^2
sage: octant = Cone([(1,0,0), (0,1,0), (0,0,1)])
sage: ri_octant = octant.relative_interior(); ri_octant
Relative interior of 3-d cone in 3-d lattice N
sage: ri_octant.interior() is ri_octant
True
"""
return self._polyhedron.interior()
def relative_interior(self):
r"""
Return the relative interior of ``self``.
As ``self`` is already relatively open, this method just returns ``self``.
EXAMPLES::
sage: segment = Polyhedron([[1, 2], [3, 4]])
sage: ri_segment = segment.relative_interior(); ri_segment
Relative interior of
a 1-dimensional polyhedron in ZZ^2 defined as the convex hull of 2 vertices
sage: ri_segment.relative_interior() is ri_segment
True
"""
return self
def closure(self):
r"""
Return the topological closure of ``self``.
EXAMPLES::
sage: segment = Polyhedron([[1, 2], [3, 4]])
sage: ri_segment = segment.relative_interior(); ri_segment
Relative interior of
a 1-dimensional polyhedron in ZZ^2 defined as the convex hull of 2 vertices
sage: ri_segment.closure() is segment
True
"""
return self._polyhedron
def is_universe(self):
r"""
Return whether ``self`` is the whole ambient space
OUTPUT:
Boolean.
EXAMPLES::
sage: segment = Polyhedron([[1, 2], [3, 4]])
sage: ri_segment = segment.relative_interior(); ri_segment
Relative interior of
a 1-dimensional polyhedron in ZZ^2 defined as the convex hull of 2 vertices
sage: ri_segment.is_universe()
False
"""
# Relies on ``self`` not set up for polyhedra that are already
# relatively open themselves.
assert not self._polyhedron.is_universe()
return False
def is_closed(self):
r"""
Return whether ``self`` is closed.
OUTPUT:
Boolean.
EXAMPLES::
sage: segment = Polyhedron([[1, 2], [3, 4]])
sage: ri_segment = segment.relative_interior(); ri_segment
Relative interior of a 1-dimensional polyhedron in ZZ^2 defined as the convex hull of 2 vertices
sage: ri_segment.is_closed()
False
"""
# Relies on ``self`` not set up for polyhedra that are already
# relatively open themselves.
assert not self._polyhedron.is_relatively_open()
return False
def _some_elements_(self):
r"""
Generate some points of ``self``.
If ``self`` is empty, no points are generated; no exception will be raised.
EXAMPLES::
sage: P = polytopes.simplex()
sage: ri_P = P.relative_interior()
sage: ri_P.an_element() # indirect doctest
(1/4, 1/4, 1/4, 1/4)
sage: ri_P.some_elements() # indirect doctest
[(1/4, 1/4, 1/4, 1/4), (1/2, 1/4, 1/8, 1/8)]
"""
for p in self._polyhedron.some_elements():
if p in self:
yield p
def _repr_(self):
r"""
Return a description of ``self``.
EXAMPLES::
sage: P = Polyhedron(vertices = [[1,2,3,4],[2,1,3,4],[4,3,2,1]])
sage: P.relative_interior()._repr_()
'Relative interior of a 2-dimensional polyhedron in ZZ^4 defined as the convex hull of 3 vertices'
sage: P.rename('A')
sage: P.relative_interior()._repr_()
'Relative interior of A'
"""
repr_P = repr(self._polyhedron)
if repr_P.startswith('A '):
repr_P = 'a ' + repr_P[2:]
return 'Relative interior of ' + repr_P
def __eq__(self, other):
r"""
Compare ``self`` and ``other``.
INPUT:
- ``other`` -- any object
EXAMPLES::
sage: segment = Polyhedron([[1, 2], [3, 4]])
sage: ri_segment = segment.relative_interior(); ri_segment
Relative interior of
a 1-dimensional polyhedron in ZZ^2 defined as the convex hull of 2 vertices
sage: segment2 = Polyhedron([[1, 2], [3, 4]], base_ring=AA)
sage: ri_segment2 = segment2.relative_interior(); ri_segment2
Relative interior of
a 1-dimensional polyhedron in AA^2 defined as the convex hull of 2 vertices
sage: ri_segment == ri_segment2
True
TESTS::
sage: empty = Polyhedron(ambient_dim=2)
sage: ri_segment == empty
False
"""
if type(self) != type(other):
return False
return self._polyhedron == other._polyhedron
def __ne__(self, other):
r"""
Compare ``self`` and ``other``.
INPUT:
- ``other`` -- any object
TESTS::
sage: segment = Polyhedron([[1, 2], [3, 4]])
sage: ri_segment = segment.relative_interior(); ri_segment
Relative interior of
a 1-dimensional polyhedron in ZZ^2 defined as the convex hull of 2 vertices
sage: segment2 = Polyhedron([[1, 2], [3, 4]], base_ring=AA)
sage: ri_segment2 = segment2.relative_interior(); ri_segment2
Relative interior of
a 1-dimensional polyhedron in AA^2 defined as the convex hull of 2 vertices
sage: ri_segment != ri_segment2
False
"""
return not (self == other)
def dilation(self, scalar):
"""
Return the dilated (uniformly stretched) set.
INPUT:
- ``scalar`` -- A scalar
EXAMPLES::
sage: segment = Polyhedron([[1, 2], [3, 4]])
sage: ri_segment = segment.relative_interior(); ri_segment
Relative interior of a
1-dimensional polyhedron in ZZ^2 defined as the convex hull of 2 vertices
sage: A = ri_segment.dilation(2); A
Relative interior of a
1-dimensional polyhedron in ZZ^2 defined as the convex hull of 2 vertices
sage: A.closure().vertices()
(A vertex at (2, 4), A vertex at (6, 8))
sage: B = ri_segment.dilation(-1/3); B
Relative interior of a
1-dimensional polyhedron in QQ^2 defined as the convex hull of 2 vertices
sage: B.closure().vertices()
(A vertex at (-1, -4/3), A vertex at (-1/3, -2/3))
sage: C = ri_segment.dilation(0); C
A 0-dimensional polyhedron in ZZ^2 defined as the convex hull of 1 vertex
sage: C.vertices()
(A vertex at (0, 0),)
"""
return self.closure().dilation(scalar).relative_interior()
def linear_transformation(self, linear_transf, **kwds):
"""
Return the linear transformation of ``self``.
By [Roc1970]_, Theorem 6.6, the linear transformation of a relative interior
is the relative interior of the linear transformation.
INPUT:
- ``linear_transf`` -- a matrix
- ``**kwds`` -- passed to the :meth:`linear_transformation` method of
the closure of ``self``.
EXAMPLES::
sage: segment = Polyhedron([[1, 2], [3, 4]])
sage: ri_segment = segment.relative_interior(); ri_segment
Relative interior of a
1-dimensional polyhedron in ZZ^2 defined as the convex hull of 2 vertices
sage: T = matrix([[1, 1]])
sage: A = ri_segment.linear_transformation(T); A
Relative interior of a
1-dimensional polyhedron in ZZ^1 defined as the convex hull of 2 vertices
sage: A.closure().vertices()
(A vertex at (3), A vertex at (7))
"""
return self.closure().linear_transformation(linear_transf, **kwds).relative_interior()
def translation(self, displacement):
"""
Return the translation of ``self`` by a ``displacement`` vector.
INPUT:
- ``displacement`` -- a displacement vector or a list/tuple of
coordinates that determines a displacement vector
EXAMPLES::
sage: segment = Polyhedron([[1, 2], [3, 4]])
sage: ri_segment = segment.relative_interior(); ri_segment
Relative interior of a
1-dimensional polyhedron in ZZ^2 defined as the convex hull of 2 vertices
sage: t = vector([100, 100])
sage: ri_segment.translation(t)
Relative interior of a
1-dimensional polyhedron in ZZ^2 defined as the convex hull of 2 vertices
sage: ri_segment.closure().vertices()
(A vertex at (1, 2), A vertex at (3, 4))
"""
return self.closure().translation(displacement).relative_interior()
| 34.429561 | 110 | 0.567749 | r"""
Relative Interiors of Polyhedra and Cones
"""
# ****************************************************************************
# Copyright (C) 2021 Matthias Koeppe
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# https://www.gnu.org/licenses/
# ****************************************************************************
from sage.geometry.convex_set import ConvexSet_relatively_open
class RelativeInterior(ConvexSet_relatively_open):
r"""
The relative interior of a polyhedron or cone
This class should not be used directly. Use methods
:meth:`~sage.geometry.polyhedron.Polyhedron_base.relative_interior`,
:meth:`~sage.geometry.polyhedron.Polyhedron_base.interior`,
:meth:`~sage.geometry.cone.ConvexRationalPolyhedralCone.relative_interior`,
:meth:`~sage.geometry.cone.ConvexRationalPolyhedralCone.interior` instead.
EXAMPLES::
sage: segment = Polyhedron([[1, 2], [3, 4]])
sage: segment.relative_interior()
Relative interior of
a 1-dimensional polyhedron in ZZ^2 defined as the convex hull of 2 vertices
sage: octant = Cone([(1,0,0), (0,1,0), (0,0,1)])
sage: octant.relative_interior()
Relative interior of 3-d cone in 3-d lattice N
"""
def __init__(self, polyhedron):
r"""
Initialize ``self``.
INPUT:
- ``polyhedron`` - an instance of :class:`Polyhedron_base` or
:class:`ConvexRationalPolyhedralCone`.
TESTS::
sage: P = Polyhedron([[1, 2], [3, 4]])
sage: from sage.geometry.relative_interior import RelativeInterior
sage: TestSuite(RelativeInterior(P)).run()
"""
self._polyhedron = polyhedron
if hasattr(polyhedron, "is_mutable") and polyhedron.is_mutable():
if hasattr(polyhedron, "_add_dependent_object"):
polyhedron._add_dependent_object(self)
def __hash__(self):
r"""
TESTS::
sage: P = Polyhedron([[1, 2], [3, 4]])
sage: Q = Polyhedron([[3, 4], [1, 2]])
sage: hash(P.relative_interior()) == hash(Q.relative_interior())
True
"""
return hash(self._polyhedron) ^ 1789
def __contains__(self, point):
r"""
Return whether ``self`` contains ``point``.
EXAMPLES::
sage: octant = Cone([(1,0,0), (0,1,0), (0,0,1)])
sage: ri_octant = octant.relative_interior(); ri_octant
Relative interior of 3-d cone in 3-d lattice N
sage: (1, 1, 1) in ri_octant
True
sage: (1, 0, 0) in ri_octant
False
"""
return self._polyhedron.relative_interior_contains(point)
def ambient(self):
r"""
Return the ambient convex set or space.
EXAMPLES::
sage: segment = Polyhedron([[1, 2], [3, 4]])
sage: ri_segment = segment.relative_interior(); ri_segment
Relative interior of
a 1-dimensional polyhedron in ZZ^2 defined as the convex hull of 2 vertices
sage: ri_segment.ambient()
Vector space of dimension 2 over Rational Field
"""
return self._polyhedron.ambient()
def ambient_vector_space(self, base_field=None):
r"""
Return the ambient vector space.
EXAMPLES::
sage: segment = Polyhedron([[1, 2], [3, 4]])
sage: ri_segment = segment.relative_interior(); ri_segment
Relative interior of
a 1-dimensional polyhedron in ZZ^2 defined as the convex hull of 2 vertices
sage: ri_segment.ambient_vector_space()
Vector space of dimension 2 over Rational Field
"""
return self._polyhedron.ambient_vector_space(base_field=base_field)
def ambient_dim(self):
r"""
Return the dimension of the ambient space.
EXAMPLES::
sage: segment = Polyhedron([[1, 2], [3, 4]])
sage: segment.ambient_dim()
2
sage: ri_segment = segment.relative_interior(); ri_segment
Relative interior of
a 1-dimensional polyhedron in ZZ^2 defined as the convex hull of 2 vertices
sage: ri_segment.ambient_dim()
2
"""
return self._polyhedron.ambient_dim()
def an_affine_basis(self):
r"""
Return points that form an affine basis for the affine hull.
The points are guaranteed to lie in the topological closure of ``self``.
EXAMPLES::
sage: segment = Polyhedron([[1, 0], [0, 1]])
sage: segment.relative_interior().an_affine_basis()
[A vertex at (1, 0), A vertex at (0, 1)]
"""
return self._polyhedron.an_affine_basis()
def dim(self):
r"""
Return the dimension of ``self``.
EXAMPLES::
sage: segment = Polyhedron([[1, 2], [3, 4]])
sage: segment.dim()
1
sage: ri_segment = segment.relative_interior(); ri_segment
Relative interior of
a 1-dimensional polyhedron in ZZ^2 defined as the convex hull of 2 vertices
sage: ri_segment.dim()
1
"""
return self._polyhedron.dim()
def interior(self):
r"""
Return the interior of ``self``.
EXAMPLES::
sage: segment = Polyhedron([[1, 2], [3, 4]])
sage: ri_segment = segment.relative_interior(); ri_segment
Relative interior of
a 1-dimensional polyhedron in ZZ^2 defined as the convex hull of 2 vertices
sage: ri_segment.interior()
The empty polyhedron in ZZ^2
sage: octant = Cone([(1,0,0), (0,1,0), (0,0,1)])
sage: ri_octant = octant.relative_interior(); ri_octant
Relative interior of 3-d cone in 3-d lattice N
sage: ri_octant.interior() is ri_octant
True
"""
return self._polyhedron.interior()
def relative_interior(self):
r"""
Return the relative interior of ``self``.
As ``self`` is already relatively open, this method just returns ``self``.
EXAMPLES::
sage: segment = Polyhedron([[1, 2], [3, 4]])
sage: ri_segment = segment.relative_interior(); ri_segment
Relative interior of
a 1-dimensional polyhedron in ZZ^2 defined as the convex hull of 2 vertices
sage: ri_segment.relative_interior() is ri_segment
True
"""
return self
def closure(self):
r"""
Return the topological closure of ``self``.
EXAMPLES::
sage: segment = Polyhedron([[1, 2], [3, 4]])
sage: ri_segment = segment.relative_interior(); ri_segment
Relative interior of
a 1-dimensional polyhedron in ZZ^2 defined as the convex hull of 2 vertices
sage: ri_segment.closure() is segment
True
"""
return self._polyhedron
def is_universe(self):
r"""
Return whether ``self`` is the whole ambient space
OUTPUT:
Boolean.
EXAMPLES::
sage: segment = Polyhedron([[1, 2], [3, 4]])
sage: ri_segment = segment.relative_interior(); ri_segment
Relative interior of
a 1-dimensional polyhedron in ZZ^2 defined as the convex hull of 2 vertices
sage: ri_segment.is_universe()
False
"""
# Relies on ``self`` not set up for polyhedra that are already
# relatively open themselves.
assert not self._polyhedron.is_universe()
return False
def is_closed(self):
r"""
Return whether ``self`` is closed.
OUTPUT:
Boolean.
EXAMPLES::
sage: segment = Polyhedron([[1, 2], [3, 4]])
sage: ri_segment = segment.relative_interior(); ri_segment
Relative interior of a 1-dimensional polyhedron in ZZ^2 defined as the convex hull of 2 vertices
sage: ri_segment.is_closed()
False
"""
# Relies on ``self`` not set up for polyhedra that are already
# relatively open themselves.
assert not self._polyhedron.is_relatively_open()
return False
def _some_elements_(self):
r"""
Generate some points of ``self``.
If ``self`` is empty, no points are generated; no exception will be raised.
EXAMPLES::
sage: P = polytopes.simplex()
sage: ri_P = P.relative_interior()
sage: ri_P.an_element() # indirect doctest
(1/4, 1/4, 1/4, 1/4)
sage: ri_P.some_elements() # indirect doctest
[(1/4, 1/4, 1/4, 1/4), (1/2, 1/4, 1/8, 1/8)]
"""
for p in self._polyhedron.some_elements():
if p in self:
yield p
def _repr_(self):
r"""
Return a description of ``self``.
EXAMPLES::
sage: P = Polyhedron(vertices = [[1,2,3,4],[2,1,3,4],[4,3,2,1]])
sage: P.relative_interior()._repr_()
'Relative interior of a 2-dimensional polyhedron in ZZ^4 defined as the convex hull of 3 vertices'
sage: P.rename('A')
sage: P.relative_interior()._repr_()
'Relative interior of A'
"""
repr_P = repr(self._polyhedron)
if repr_P.startswith('A '):
repr_P = 'a ' + repr_P[2:]
return 'Relative interior of ' + repr_P
def __eq__(self, other):
r"""
Compare ``self`` and ``other``.
INPUT:
- ``other`` -- any object
EXAMPLES::
sage: segment = Polyhedron([[1, 2], [3, 4]])
sage: ri_segment = segment.relative_interior(); ri_segment
Relative interior of
a 1-dimensional polyhedron in ZZ^2 defined as the convex hull of 2 vertices
sage: segment2 = Polyhedron([[1, 2], [3, 4]], base_ring=AA)
sage: ri_segment2 = segment2.relative_interior(); ri_segment2
Relative interior of
a 1-dimensional polyhedron in AA^2 defined as the convex hull of 2 vertices
sage: ri_segment == ri_segment2
True
TESTS::
sage: empty = Polyhedron(ambient_dim=2)
sage: ri_segment == empty
False
"""
if type(self) != type(other):
return False
return self._polyhedron == other._polyhedron
def __ne__(self, other):
r"""
Compare ``self`` and ``other``.
INPUT:
- ``other`` -- any object
TESTS::
sage: segment = Polyhedron([[1, 2], [3, 4]])
sage: ri_segment = segment.relative_interior(); ri_segment
Relative interior of
a 1-dimensional polyhedron in ZZ^2 defined as the convex hull of 2 vertices
sage: segment2 = Polyhedron([[1, 2], [3, 4]], base_ring=AA)
sage: ri_segment2 = segment2.relative_interior(); ri_segment2
Relative interior of
a 1-dimensional polyhedron in AA^2 defined as the convex hull of 2 vertices
sage: ri_segment != ri_segment2
False
"""
return not (self == other)
def dilation(self, scalar):
"""
Return the dilated (uniformly stretched) set.
INPUT:
- ``scalar`` -- A scalar
EXAMPLES::
sage: segment = Polyhedron([[1, 2], [3, 4]])
sage: ri_segment = segment.relative_interior(); ri_segment
Relative interior of a
1-dimensional polyhedron in ZZ^2 defined as the convex hull of 2 vertices
sage: A = ri_segment.dilation(2); A
Relative interior of a
1-dimensional polyhedron in ZZ^2 defined as the convex hull of 2 vertices
sage: A.closure().vertices()
(A vertex at (2, 4), A vertex at (6, 8))
sage: B = ri_segment.dilation(-1/3); B
Relative interior of a
1-dimensional polyhedron in QQ^2 defined as the convex hull of 2 vertices
sage: B.closure().vertices()
(A vertex at (-1, -4/3), A vertex at (-1/3, -2/3))
sage: C = ri_segment.dilation(0); C
A 0-dimensional polyhedron in ZZ^2 defined as the convex hull of 1 vertex
sage: C.vertices()
(A vertex at (0, 0),)
"""
return self.closure().dilation(scalar).relative_interior()
def linear_transformation(self, linear_transf, **kwds):
"""
Return the linear transformation of ``self``.
By [Roc1970]_, Theorem 6.6, the linear transformation of a relative interior
is the relative interior of the linear transformation.
INPUT:
- ``linear_transf`` -- a matrix
- ``**kwds`` -- passed to the :meth:`linear_transformation` method of
the closure of ``self``.
EXAMPLES::
sage: segment = Polyhedron([[1, 2], [3, 4]])
sage: ri_segment = segment.relative_interior(); ri_segment
Relative interior of a
1-dimensional polyhedron in ZZ^2 defined as the convex hull of 2 vertices
sage: T = matrix([[1, 1]])
sage: A = ri_segment.linear_transformation(T); A
Relative interior of a
1-dimensional polyhedron in ZZ^1 defined as the convex hull of 2 vertices
sage: A.closure().vertices()
(A vertex at (3), A vertex at (7))
"""
return self.closure().linear_transformation(linear_transf, **kwds).relative_interior()
def translation(self, displacement):
"""
Return the translation of ``self`` by a ``displacement`` vector.
INPUT:
- ``displacement`` -- a displacement vector or a list/tuple of
coordinates that determines a displacement vector
EXAMPLES::
sage: segment = Polyhedron([[1, 2], [3, 4]])
sage: ri_segment = segment.relative_interior(); ri_segment
Relative interior of a
1-dimensional polyhedron in ZZ^2 defined as the convex hull of 2 vertices
sage: t = vector([100, 100])
sage: ri_segment.translation(t)
Relative interior of a
1-dimensional polyhedron in ZZ^2 defined as the convex hull of 2 vertices
sage: ri_segment.closure().vertices()
(A vertex at (1, 2), A vertex at (3, 4))
"""
return self.closure().translation(displacement).relative_interior()
| 0 | 0 | 0 |
961c9747018bcc3a766268b8fdc55badf884779e | 17,022 | py | Python | custom_components/lennoxs30/config_flow.py | PeteRager/LennoxS30 | 5579c0f1e1a369c5c68b65d30f5bf30851442dee | [
"MIT"
] | 28 | 2021-06-04T15:57:37.000Z | 2022-03-28T22:59:58.000Z | custom_components/lennoxs30/config_flow.py | PeteRager/LennoxS30 | 5579c0f1e1a369c5c68b65d30f5bf30851442dee | [
"MIT"
] | 97 | 2021-06-04T22:23:18.000Z | 2022-03-31T14:54:18.000Z | custom_components/lennoxs30/config_flow.py | PeteRager/LennoxS30 | 5579c0f1e1a369c5c68b65d30f5bf30851442dee | [
"MIT"
] | 8 | 2021-07-22T20:48:00.000Z | 2021-11-03T12:02:51.000Z | import ipaddress
import re
from lennoxs30api.s30exception import EC_AUTHENTICATE, EC_LOGIN, S30Exception
import voluptuous as vol
from . import Manager
from .const import (
CONF_ALLERGEN_DEFENDER_SWITCH,
CONF_APP_ID,
CONF_CLOUD_CONNECTION,
CONF_CREATE_INVERTER_POWER,
CONF_CREATE_SENSORS,
CONF_FAST_POLL_INTERVAL,
CONF_INIT_WAIT_TIME,
CONF_LOG_MESSAGES_TO_FILE,
CONF_MESSAGE_DEBUG_FILE,
CONF_MESSAGE_DEBUG_LOGGING,
CONF_PII_IN_MESSAGE_LOGS,
LENNOX_DEFAULT_CLOUD_APP_ID,
LENNOX_DEFAULT_LOCAL_APP_ID,
CONF_LOCAL_CONNECTION,
)
from .util import dict_redact_fields, redact_email
from homeassistant.data_entry_flow import FlowResult
from homeassistant import config_entries
from homeassistant.core import HomeAssistant, callback
from homeassistant.const import (
CONF_HOST,
CONF_EMAIL,
CONF_PASSWORD,
CONF_PROTOCOL,
CONF_SCAN_INTERVAL,
)
from homeassistant.helpers import config_validation as cv
import logging
DEFAULT_POLL_INTERVAL: int = 10
DEFAULT_FAST_POLL_INTERVAL: float = 0.75
MAX_ERRORS = 5
RETRY_INTERVAL_SECONDS = 60
DOMAIN = "lennoxs30"
_LOGGER = logging.getLogger(__name__)
STEP_ONE = vol.Schema(
{
vol.Required(CONF_LOCAL_CONNECTION, default=True): cv.boolean,
vol.Required(CONF_CLOUD_CONNECTION, default=False): cv.boolean,
}
)
STEP_CLOUD = vol.Schema(
{
vol.Required(CONF_EMAIL): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_APP_ID, default=LENNOX_DEFAULT_CLOUD_APP_ID): cv.string,
vol.Optional(CONF_CREATE_SENSORS, default=True): cv.boolean,
vol.Optional(CONF_ALLERGEN_DEFENDER_SWITCH, default=False): cv.boolean,
}
)
STEP_LOCAL = vol.Schema(
{
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_APP_ID, default=LENNOX_DEFAULT_LOCAL_APP_ID): cv.string,
vol.Optional(CONF_CREATE_SENSORS, default=True): cv.boolean,
vol.Optional(CONF_ALLERGEN_DEFENDER_SWITCH, default=False): cv.boolean,
vol.Optional(CONF_CREATE_INVERTER_POWER, default=False): cv.boolean,
vol.Optional(CONF_PROTOCOL, default="https"): cv.string,
}
)
def host_valid(hostport: str):
"""Return True if hostname or IP address is valid."""
# We allow an host:port syntax.
splits = hostport.split(":")
host = splits[0]
try:
if ipaddress.ip_address(host).version == (4 or 6):
return True
except ValueError:
disallowed = re.compile(r"[^a-zA-Z\d\-]")
return all(x and not disallowed.search(x) for x in host.split("."))
@callback
def lennox30_entries(hass: HomeAssistant):
"""Return the hosts already configured."""
return set(
entry.data[CONF_HOST] for entry in hass.config_entries.async_entries(DOMAIN)
)
class lennoxs30ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Lennox S30 configflow."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_POLL
def _host_in_configuration_exists(self, host) -> bool:
"""Return True if host exists in configuration."""
if host in lennox30_entries(self.hass):
return True
return False
async def async_step_user(self, user_input=None):
"""Handle the initial step."""
errors = {}
self.config_input = {}
_LOGGER.debug(f"async_step_user user_input [{dict_redact_fields(user_input)}]")
if user_input is not None:
cloud_local = user_input[CONF_CLOUD_CONNECTION]
local_connection = user_input[CONF_LOCAL_CONNECTION]
if cloud_local == local_connection:
errors[CONF_LOCAL_CONNECTION] = "select_cloud_or_local"
else:
dict = {CONF_CLOUD_CONNECTION: cloud_local}
self.config_input.update(dict)
if cloud_local:
return await self.async_step_cloud()
else:
return await self.async_step_local()
return self.async_show_form(step_id="user", data_schema=STEP_ONE, errors=errors)
async def async_step_cloud(self, user_input=None):
"""Handle the initial step."""
errors = {}
_LOGGER.debug(f"async_step_cloud user_input [{dict_redact_fields(user_input)}]")
if user_input is not None:
await self.async_set_unique_id(DOMAIN + "_" + user_input[CONF_EMAIL])
self._abort_if_unique_id_configured()
try:
await self.try_to_connect(user_input)
self.config_input.update(user_input)
return await self.async_step_advanced()
except S30Exception as e:
_LOGGER.error(e.as_string())
if e.error_code == EC_LOGIN:
errors["base"] = "unable_to_connect_login"
else:
errors["base"] = "unable_to_connect_cloud"
return self.async_show_form(
step_id="cloud", data_schema=STEP_CLOUD, errors=errors
)
async def async_step_local(self, user_input=None):
"""Handle the initial step."""
errors = {}
_LOGGER.debug(f"async_step_local user_input [{dict_redact_fields(user_input)}]")
if user_input is not None:
host = user_input[CONF_HOST]
if self._host_in_configuration_exists(host):
errors[CONF_HOST] = "already_configured"
elif not host_valid(user_input[CONF_HOST]):
errors[CONF_HOST] = "invalid_hostname"
else:
await self.async_set_unique_id(DOMAIN + "_" + user_input[CONF_HOST])
self._abort_if_unique_id_configured()
try:
await self.try_to_connect(user_input)
self.config_input.update(user_input)
return await self.async_step_advanced()
except S30Exception as e:
_LOGGER.error(e.as_string())
errors[CONF_HOST] = "unable_to_connect_local"
return self.async_show_form(
step_id="local", data_schema=STEP_LOCAL, errors=errors
)
async def async_step_import(self, user_input) -> FlowResult:
"""Handle the import step."""
self.config_input = {}
_LOGGER.debug(
f"async_step_import user_input [{dict_redact_fields(user_input)}]"
)
self.config_input.update(user_input)
return await self.create_entry()
@staticmethod
@callback
| 40.918269 | 147 | 0.567148 | import ipaddress
import re
from lennoxs30api.s30exception import EC_AUTHENTICATE, EC_LOGIN, S30Exception
import voluptuous as vol
from . import Manager
from .const import (
CONF_ALLERGEN_DEFENDER_SWITCH,
CONF_APP_ID,
CONF_CLOUD_CONNECTION,
CONF_CREATE_INVERTER_POWER,
CONF_CREATE_SENSORS,
CONF_FAST_POLL_INTERVAL,
CONF_INIT_WAIT_TIME,
CONF_LOG_MESSAGES_TO_FILE,
CONF_MESSAGE_DEBUG_FILE,
CONF_MESSAGE_DEBUG_LOGGING,
CONF_PII_IN_MESSAGE_LOGS,
LENNOX_DEFAULT_CLOUD_APP_ID,
LENNOX_DEFAULT_LOCAL_APP_ID,
CONF_LOCAL_CONNECTION,
)
from .util import dict_redact_fields, redact_email
from homeassistant.data_entry_flow import FlowResult
from homeassistant import config_entries
from homeassistant.core import HomeAssistant, callback
from homeassistant.const import (
CONF_HOST,
CONF_EMAIL,
CONF_PASSWORD,
CONF_PROTOCOL,
CONF_SCAN_INTERVAL,
)
from homeassistant.helpers import config_validation as cv
import logging
DEFAULT_POLL_INTERVAL: int = 10
DEFAULT_FAST_POLL_INTERVAL: float = 0.75
MAX_ERRORS = 5
RETRY_INTERVAL_SECONDS = 60
DOMAIN = "lennoxs30"
_LOGGER = logging.getLogger(__name__)
STEP_ONE = vol.Schema(
{
vol.Required(CONF_LOCAL_CONNECTION, default=True): cv.boolean,
vol.Required(CONF_CLOUD_CONNECTION, default=False): cv.boolean,
}
)
STEP_CLOUD = vol.Schema(
{
vol.Required(CONF_EMAIL): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_APP_ID, default=LENNOX_DEFAULT_CLOUD_APP_ID): cv.string,
vol.Optional(CONF_CREATE_SENSORS, default=True): cv.boolean,
vol.Optional(CONF_ALLERGEN_DEFENDER_SWITCH, default=False): cv.boolean,
}
)
STEP_LOCAL = vol.Schema(
{
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_APP_ID, default=LENNOX_DEFAULT_LOCAL_APP_ID): cv.string,
vol.Optional(CONF_CREATE_SENSORS, default=True): cv.boolean,
vol.Optional(CONF_ALLERGEN_DEFENDER_SWITCH, default=False): cv.boolean,
vol.Optional(CONF_CREATE_INVERTER_POWER, default=False): cv.boolean,
vol.Optional(CONF_PROTOCOL, default="https"): cv.string,
}
)
def host_valid(hostport: str):
"""Return True if hostname or IP address is valid."""
# We allow an host:port syntax.
splits = hostport.split(":")
host = splits[0]
try:
if ipaddress.ip_address(host).version == (4 or 6):
return True
except ValueError:
disallowed = re.compile(r"[^a-zA-Z\d\-]")
return all(x and not disallowed.search(x) for x in host.split("."))
@callback
def lennox30_entries(hass: HomeAssistant):
"""Return the hosts already configured."""
return set(
entry.data[CONF_HOST] for entry in hass.config_entries.async_entries(DOMAIN)
)
class lennoxs30ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Lennox S30 configflow."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_POLL
def _host_in_configuration_exists(self, host) -> bool:
"""Return True if host exists in configuration."""
if host in lennox30_entries(self.hass):
return True
return False
def get_advanced_schema(self, is_cloud: bool):
if is_cloud == True:
scan_interval = 15
conf_wait_time = 60
else:
scan_interval = 1
conf_wait_time = 30
return vol.Schema(
{
vol.Optional(CONF_SCAN_INTERVAL, default=scan_interval): vol.All(
vol.Coerce(int), vol.Range(min=1, max=300)
),
vol.Optional(
CONF_FAST_POLL_INTERVAL, default=DEFAULT_FAST_POLL_INTERVAL
): vol.All(vol.Coerce(float), vol.Range(min=0.25, max=300.0)),
vol.Optional(CONF_INIT_WAIT_TIME, default=conf_wait_time): vol.All(
vol.Coerce(int), vol.Range(min=1, max=300)
),
vol.Optional(CONF_PII_IN_MESSAGE_LOGS, default=False): cv.boolean,
vol.Optional(CONF_MESSAGE_DEBUG_LOGGING, default=True): cv.boolean,
vol.Optional(CONF_LOG_MESSAGES_TO_FILE, default=False): cv.boolean,
vol.Optional(CONF_MESSAGE_DEBUG_FILE, default=""): cv.string,
}
)
async def async_step_user(self, user_input=None):
"""Handle the initial step."""
errors = {}
self.config_input = {}
_LOGGER.debug(f"async_step_user user_input [{dict_redact_fields(user_input)}]")
if user_input is not None:
cloud_local = user_input[CONF_CLOUD_CONNECTION]
local_connection = user_input[CONF_LOCAL_CONNECTION]
if cloud_local == local_connection:
errors[CONF_LOCAL_CONNECTION] = "select_cloud_or_local"
else:
dict = {CONF_CLOUD_CONNECTION: cloud_local}
self.config_input.update(dict)
if cloud_local:
return await self.async_step_cloud()
else:
return await self.async_step_local()
return self.async_show_form(step_id="user", data_schema=STEP_ONE, errors=errors)
async def async_step_cloud(self, user_input=None):
"""Handle the initial step."""
errors = {}
_LOGGER.debug(f"async_step_cloud user_input [{dict_redact_fields(user_input)}]")
if user_input is not None:
await self.async_set_unique_id(DOMAIN + "_" + user_input[CONF_EMAIL])
self._abort_if_unique_id_configured()
try:
await self.try_to_connect(user_input)
self.config_input.update(user_input)
return await self.async_step_advanced()
except S30Exception as e:
_LOGGER.error(e.as_string())
if e.error_code == EC_LOGIN:
errors["base"] = "unable_to_connect_login"
else:
errors["base"] = "unable_to_connect_cloud"
return self.async_show_form(
step_id="cloud", data_schema=STEP_CLOUD, errors=errors
)
async def async_step_local(self, user_input=None):
"""Handle the initial step."""
errors = {}
_LOGGER.debug(f"async_step_local user_input [{dict_redact_fields(user_input)}]")
if user_input is not None:
host = user_input[CONF_HOST]
if self._host_in_configuration_exists(host):
errors[CONF_HOST] = "already_configured"
elif not host_valid(user_input[CONF_HOST]):
errors[CONF_HOST] = "invalid_hostname"
else:
await self.async_set_unique_id(DOMAIN + "_" + user_input[CONF_HOST])
self._abort_if_unique_id_configured()
try:
await self.try_to_connect(user_input)
self.config_input.update(user_input)
return await self.async_step_advanced()
except S30Exception as e:
_LOGGER.error(e.as_string())
errors[CONF_HOST] = "unable_to_connect_local"
return self.async_show_form(
step_id="local", data_schema=STEP_LOCAL, errors=errors
)
async def async_step_advanced(self, user_input=None):
errors = {}
_LOGGER.debug(
f"async_step_advanced user_input [{dict_redact_fields(user_input)}]"
)
if user_input is not None:
self.config_input.update(user_input)
return await self.create_entry()
return self.async_show_form(
step_id="advanced",
data_schema=self.get_advanced_schema(
self.config_input[CONF_CLOUD_CONNECTION]
),
errors=errors,
)
async def create_entry(self):
if self.config_input[CONF_CLOUD_CONNECTION] == True:
title = redact_email(self.config_input[CONF_EMAIL])
else:
title = self.config_input[CONF_HOST]
await self.async_set_unique_id(DOMAIN + "_" + title)
self._abort_if_unique_id_configured()
if self.config_input[CONF_LOG_MESSAGES_TO_FILE] == False:
self.config_input[CONF_MESSAGE_DEBUG_FILE] = ""
_LOGGER.debug(
f"async_step_advanced config_input [{dict_redact_fields(self.config_input)}]"
)
return self.async_create_entry(title=title, data=self.config_input)
async def try_to_connect(self, user_input):
if self.config_input[CONF_CLOUD_CONNECTION] == True:
email = user_input[CONF_EMAIL]
password = user_input[CONF_PASSWORD]
ip_address = None
protocol = "https"
else:
email = None
password = None
ip_address = user_input[CONF_HOST]
protocol = user_input[CONF_PROTOCOL]
manager = Manager(
hass=self.hass,
config=None,
email=email,
password=password,
poll_interval=1,
fast_poll_interval=1.0,
allergenDefenderSwitch=False,
app_id=user_input[CONF_APP_ID],
conf_init_wait_time=30,
ip_address=ip_address,
create_sensors=False,
create_inverter_power=False,
protocol=protocol,
pii_message_logs=False,
message_debug_logging=True,
message_logging_file=None,
)
await manager.connect()
await manager.async_shutdown(None)
async def async_step_import(self, user_input) -> FlowResult:
"""Handle the import step."""
self.config_input = {}
_LOGGER.debug(
f"async_step_import user_input [{dict_redact_fields(user_input)}]"
)
self.config_input.update(user_input)
return await self.create_entry()
@staticmethod
@callback
def async_get_options_flow(config_entry):
return OptionsFlowHandler(config_entry)
class OptionsFlowHandler(config_entries.OptionsFlow):
def __init__(self, config_entry: config_entries.ConfigEntry):
"""Initialize options flow."""
self.config_entry = config_entry
async def async_step_init(self, user_input=None):
"""Manage the options."""
_LOGGER.debug(
f"OptionsFlowHandler:async_step_init user_input [{dict_redact_fields(user_input)}] data [{dict_redact_fields(self.config_entry.data)}]"
)
if user_input is not None:
if CONF_HOST in self.config_entry.data:
user_input[CONF_HOST] = self.config_entry.data[CONF_HOST]
if CONF_EMAIL in self.config_entry.data:
user_input[CONF_EMAIL] = self.config_entry.data[CONF_EMAIL]
if CONF_CLOUD_CONNECTION in self.config_entry.data:
user_input[CONF_CLOUD_CONNECTION] = self.config_entry.data[
CONF_CLOUD_CONNECTION
]
if user_input[CONF_LOG_MESSAGES_TO_FILE] == False:
user_input[CONF_MESSAGE_DEBUG_FILE] = ""
self.hass.config_entries.async_update_entry(
self.config_entry, data=user_input, options=self.config_entry.options
)
return self.async_create_entry(title="", data={})
if self.config_entry.data[CONF_CLOUD_CONNECTION] == False:
# Local Connection
return self.async_show_form(
step_id="init",
data_schema=vol.Schema(
{
vol.Optional(
CONF_APP_ID, default=self.config_entry.data[CONF_APP_ID]
): cv.string,
vol.Optional(
CONF_CREATE_SENSORS,
default=self.config_entry.data[CONF_CREATE_SENSORS],
): cv.boolean,
vol.Optional(
CONF_ALLERGEN_DEFENDER_SWITCH,
default=self.config_entry.data[
CONF_ALLERGEN_DEFENDER_SWITCH
],
): cv.boolean,
vol.Optional(
CONF_CREATE_INVERTER_POWER,
default=self.config_entry.data[CONF_CREATE_INVERTER_POWER],
): cv.boolean,
vol.Optional(
CONF_SCAN_INTERVAL,
default=self.config_entry.data[CONF_SCAN_INTERVAL],
): vol.All(vol.Coerce(int), vol.Range(min=1, max=300)),
vol.Optional(
CONF_INIT_WAIT_TIME,
default=self.config_entry.data[CONF_INIT_WAIT_TIME],
): vol.All(vol.Coerce(int), vol.Range(min=1, max=300)),
vol.Optional(
CONF_FAST_POLL_INTERVAL,
default=self.config_entry.data[CONF_FAST_POLL_INTERVAL],
): vol.All(vol.Coerce(float), vol.Range(min=0.25, max=300.0)),
vol.Optional(
CONF_PROTOCOL, default=self.config_entry.data[CONF_PROTOCOL]
): cv.string,
vol.Optional(
CONF_PII_IN_MESSAGE_LOGS,
default=self.config_entry.data[CONF_PII_IN_MESSAGE_LOGS],
): cv.boolean,
vol.Optional(
CONF_MESSAGE_DEBUG_LOGGING,
default=self.config_entry.data[CONF_MESSAGE_DEBUG_LOGGING],
): cv.boolean,
vol.Optional(
CONF_LOG_MESSAGES_TO_FILE,
default=self.config_entry.data[CONF_LOG_MESSAGES_TO_FILE],
): cv.boolean,
vol.Optional(
CONF_MESSAGE_DEBUG_FILE,
default=self.config_entry.data[CONF_MESSAGE_DEBUG_FILE],
): cv.string,
}
),
)
else:
# Cloud Connection
return self.async_show_form(
step_id="init",
data_schema=vol.Schema(
{
vol.Optional(
CONF_PASSWORD,
default=self.config_entry.data[CONF_PASSWORD],
): cv.string,
vol.Optional(
CONF_APP_ID, default=self.config_entry.data[CONF_APP_ID]
): cv.string,
vol.Optional(
CONF_CREATE_SENSORS,
default=self.config_entry.data[CONF_CREATE_SENSORS],
): cv.boolean,
vol.Optional(
CONF_ALLERGEN_DEFENDER_SWITCH,
default=self.config_entry.data[
CONF_ALLERGEN_DEFENDER_SWITCH
],
): cv.boolean,
vol.Optional(
CONF_SCAN_INTERVAL,
default=self.config_entry.data[CONF_SCAN_INTERVAL],
): vol.All(vol.Coerce(int), vol.Range(min=1, max=300)),
vol.Optional(
CONF_INIT_WAIT_TIME,
default=self.config_entry.data[CONF_INIT_WAIT_TIME],
): vol.All(vol.Coerce(int), vol.Range(min=1, max=300)),
vol.Optional(
CONF_FAST_POLL_INTERVAL,
default=self.config_entry.data[CONF_FAST_POLL_INTERVAL],
): vol.All(vol.Coerce(float), vol.Range(min=0.25, max=300.0)),
vol.Optional(
CONF_PII_IN_MESSAGE_LOGS,
default=self.config_entry.data[CONF_PII_IN_MESSAGE_LOGS],
): cv.boolean,
vol.Optional(
CONF_MESSAGE_DEBUG_LOGGING,
default=self.config_entry.data[CONF_MESSAGE_DEBUG_LOGGING],
): cv.boolean,
vol.Optional(
CONF_LOG_MESSAGES_TO_FILE,
default=self.config_entry.data[CONF_LOG_MESSAGES_TO_FILE],
): cv.boolean,
vol.Optional(
CONF_MESSAGE_DEBUG_FILE,
default=self.config_entry.data[CONF_MESSAGE_DEBUG_FILE],
): cv.string,
}
),
)
| 3,390 | 6,952 | 157 |
43fc0efa71086d127093049d027710be2c64639e | 10,918 | py | Python | visualisation/generation/electricity_generation_plot.py | HauHe/OSeMBEtoREEEMdb | 4ce4cb814ecd9d65b937af51f807f0b0fd88828a | [
"Apache-2.0",
"CC-BY-4.0"
] | 2 | 2021-01-07T15:27:18.000Z | 2022-02-12T21:21:04.000Z | visualisation/generation/electricity_generation_plot.py | HauHe/OSeMBEtoREEEMdb | 4ce4cb814ecd9d65b937af51f807f0b0fd88828a | [
"Apache-2.0",
"CC-BY-4.0"
] | 7 | 2019-07-23T15:16:09.000Z | 2022-02-15T10:47:57.000Z | visualisation/generation/electricity_generation_plot.py | HauHe/OSeMBEtoREEEMdb | 4ce4cb814ecd9d65b937af51f807f0b0fd88828a | [
"Apache-2.0",
"CC-BY-4.0"
] | 3 | 2021-06-08T13:06:45.000Z | 2022-03-23T14:39:07.000Z | # -*- coding: utf-8 -*-
"""
Created on Fri Sep 25 14:51:38 2020
@author: haukeh
"""
#%%Import of required packages
import numpy as np
import pandas as pd
import os
import sys
import plotly.graph_objs as go
from plotly.offline import plot
#%% Function to read results csv files
#%% Function to create dictionaries containing dictionaries for each scenario that contain the results as dataframes
#%% Function to creat a df with the production by technology annual
#%% Function to create dictionary with information
#%% Dictionary of dictionaries with colour schemes
colour_schemes = dict(
dES_colours = dict(
Coal = 'rgb(0, 0, 0)',
Oil = 'rgb(121, 43, 41)',
Gas = 'rgb(86, 108, 140)',
Nuclear = 'rgb(186, 28, 175)',
Waste = 'rgb(138, 171, 71)',
Biomass = 'rgb(172, 199, 119)',
Biofuel = 'rgb(79, 98, 40)',
Hydro = 'rgb(0, 139, 188)',
Wind = 'rgb(143, 119, 173)',
Solar = 'rgb(230, 175, 0)',
Geo = 'rgb(192, 80, 77)',
Ocean ='rgb(22, 54, 92)',
Imports = 'rgb(232, 133, 2)'),
TIMES_PanEU_colours = dict(
Coal = 'rgb(0, 0, 0)',
Oil = 'rgb(202, 171, 169)',
Gas = 'rgb(102, 77, 142)',
Nuclear = 'rgb(109, 109, 109)',
Waste = 'rgb(223, 134, 192)',
Biomass = 'rgb(80, 112, 45)',
Biofuel = 'rgb(178, 191, 225)',
Hydro = 'rgb(181, 192, 224)',
Wind = 'rgb(103, 154, 181)',
Solar = 'rgb(210, 136, 63)',
Geo = 'rgb(178, 191, 225)',
Ocean ='rgb(178, 191, 225)',
Imports = 'rgb(232, 133, 2)')
)
#%% functions for returning positives and negatives
#%% Function to create dfs with import and export of electricity for selected country
#%% Function to create figure
#%% main function to execute the script
#%% If executed as script
if __name__ == '__main__':
selec_region = sys.argv[1]
scens = sys.argv[2:]
main(selec_region,scens) | 41.356061 | 503 | 0.588111 | # -*- coding: utf-8 -*-
"""
Created on Fri Sep 25 14:51:38 2020
@author: haukeh
"""
#%%Import of required packages
import numpy as np
import pandas as pd
import os
import sys
import plotly.graph_objs as go
from plotly.offline import plot
#%% Function to read results csv files
def read_csv(scen, param):
df = pd.read_csv('{}/results_csv/{}.csv'.format(scen,param))
df['pathway'] = scen
return df
#%% Function to create dictionaries containing dictionaries for each scenario that contain the results as dataframes
def build_dic(scens, params):
dic = {}
for scen in scens:
dic[scen] = {}
for scen in scens:
for param in params:
dic[scen][param] = read_csv(scen, param)
return dic
#%% Function to creat a df with the production by technology annual
def build_PbTA_df(dic):
# dic = results_dic
df = pd.DataFrame(columns=['REGION','TECHNOLOGY','FUEL','YEAR','VALUE','pathway'])
for i in dic:
df_work = dic[i]['ProductionByTechnologyAnnual']
df = df.append(df_work)
df['region'] = df['TECHNOLOGY'].apply(lambda x: x[:2])
df['fuel'] = df['TECHNOLOGY'].apply(lambda x: x[2:4])
df['tech_type'] = df['TECHNOLOGY'].apply(lambda x: x[4:6])
df['tech_spec'] = df['TECHNOLOGY'].apply(lambda x: x[2:])
df = df[(df['fuel']!='OI')
&(df['tech_type']!='00')
&((df['YEAR']==2015)|(df['YEAR']==2020)|(df['YEAR']==2030)|(df['YEAR']==2040)|(df['YEAR']==2050))]
df['unit'] = 'PJ'
return df
#%% Function to create dictionary with information
def get_facts(df):
facts_dic = {}
facts_dic['pathways'] = df.loc[:,'pathway'].unique()
facts_dic['regions'] = df.loc[:,'region'].unique()
facts_dic['unit'] = df.loc[:, 'unit'].unique()
facts_dic['regions'] = np.append(facts_dic['regions'],'EU28')
return facts_dic
#%% Dictionary of dictionaries with colour schemes
colour_schemes = dict(
dES_colours = dict(
Coal = 'rgb(0, 0, 0)',
Oil = 'rgb(121, 43, 41)',
Gas = 'rgb(86, 108, 140)',
Nuclear = 'rgb(186, 28, 175)',
Waste = 'rgb(138, 171, 71)',
Biomass = 'rgb(172, 199, 119)',
Biofuel = 'rgb(79, 98, 40)',
Hydro = 'rgb(0, 139, 188)',
Wind = 'rgb(143, 119, 173)',
Solar = 'rgb(230, 175, 0)',
Geo = 'rgb(192, 80, 77)',
Ocean ='rgb(22, 54, 92)',
Imports = 'rgb(232, 133, 2)'),
TIMES_PanEU_colours = dict(
Coal = 'rgb(0, 0, 0)',
Oil = 'rgb(202, 171, 169)',
Gas = 'rgb(102, 77, 142)',
Nuclear = 'rgb(109, 109, 109)',
Waste = 'rgb(223, 134, 192)',
Biomass = 'rgb(80, 112, 45)',
Biofuel = 'rgb(178, 191, 225)',
Hydro = 'rgb(181, 192, 224)',
Wind = 'rgb(103, 154, 181)',
Solar = 'rgb(210, 136, 63)',
Geo = 'rgb(178, 191, 225)',
Ocean ='rgb(178, 191, 225)',
Imports = 'rgb(232, 133, 2)')
)
#%% functions for returning positives and negatives
def positives(value):
return max(value, 0)
def negatives(value):
return min(value, 0)
#%% Function to create dfs with import and export of electricity for selected country
def impex(data, paths, selected_country):
df_filtered = data[(data['fuel']=='EL')
&((data['region']==selected_country)|(data['tech_type']==selected_country))
&(data['tech_type']!='00')]
countries = []
countries = list(df_filtered['region'].unique())
countries.extend(df_filtered['tech_type'].unique())
countries = list(dict.fromkeys(countries))
df_filtered = df_filtered[df_filtered['FUEL'].str.contains('|'.join(countries))]
df_filtered = df_filtered[df_filtered['FUEL'].str.contains('E1')]
years = pd.Series(df_filtered['YEAR'].unique(),name='YEAR').sort_values()
#paths = list(path_names.keys())
neighbours = []
for i in countries:
if i != selected_country:
neighbours.append(i)
dict_path = {}
links = list(df_filtered['TECHNOLOGY'].unique())
label_imp = []
label_exp = []
for n in neighbours:
label_imp.append('Import from '+n)
label_exp.append('Export to '+n)
for j in paths:
i = 0
net_imp = pd.DataFrame(index=years)
for link in links:
imp = df_filtered[(df_filtered['pathway']==j)
&(df_filtered['TECHNOLOGY']==link)
&(df_filtered['FUEL']==(selected_country+'E1'))]
if len(imp.index)<5:
imp = imp.set_index('YEAR').reindex(years).reset_index().fillna(0)
imp = imp.set_index(years)
exp = df_filtered[(df_filtered['pathway']==j)
&(df_filtered['TECHNOLOGY']==link)
&(df_filtered['FUEL']==(neighbours[i]+'E1'))]
if len(exp.index)<5:
exp = exp.set_index('YEAR').reindex(years).reset_index().fillna(0)
exp = exp.set_index(years)
net_imp[link] = imp['VALUE'] - exp['VALUE']
i += 1
net_imp_pos = pd.DataFrame(index=years,columns=links)
net_imp_neg = pd.DataFrame(index=years,columns=links)
for link in links:
net_imp_pos[link] = net_imp[link].map(positives)
net_imp_neg[link] = net_imp[link].map(negatives)
net_imp_pos.columns = label_imp
net_imp_neg.columns = label_exp
dict_path[j] = {}
dict_path[j]['imports']=net_imp_pos
dict_path[j]['exports']=net_imp_neg
path_ind = []
year_ind = []
df_exports = pd.DataFrame(columns=label_exp)
df_imports = pd.DataFrame(columns=label_imp)
for year in years:
i=0
for j in paths:
df_exports = df_exports.append(dict_path[j]['exports'].loc[year])
df_imports = df_imports.append(dict_path[j]['imports'].loc[year])
path_ind.append(paths[i].upper())
i+=1
df_exports = df_exports.set_index([pd.Index(path_ind, name='paths')],append=True)
df_imports = df_imports.set_index([pd.Index(path_ind, name='paths')],append=True)
return df_exports, df_imports
#%% Function to create figure
def create_fig(data, paths, country_sel, countries_mod, fuels, colours):
fig = go.Figure()
elexp, elimp = impex(data, paths, country_sel)
elexp = elexp.sum(axis=1)
elimp = elimp.sum(axis=1)
#paths = list(path_names.keys())
years = data['YEAR'].unique()
years.sort()
coms = fuels['fuel_name']
coms = coms[(coms!='EL')&(coms!='OI')]
info_dict = {}
info_dict['Unit'] = data.loc[:,'unit'].unique()
info_dict['Y-Axis'] = ['{}'.format(*info_dict['Unit'])]
countr_el1 = country_sel + 'E1'
countr_el2 = country_sel + 'E2'
dict_path = {}
for path in paths:
filtered_df = data[
(data['pathway'] == path)
& (data['region'] == country_sel)
& ((data['FUEL']==countr_el1)|(data['FUEL']==countr_el2))
& (data['fuel']!='EL')
& (data['tech_type']!='00')]
filtered_df_p = filtered_df.pivot(index='YEAR', columns='tech_spec', values='VALUE')
df_by_com = pd.DataFrame()
for com in coms:
com_selec = filtered_df_p.filter(regex="\A"+com, axis=1)
com_sum = com_selec.sum(axis=1)
df_by_com[com] = com_sum
dict_path[path] = df_by_com
df_fig = pd.DataFrame(columns=coms)
path_ind = []
year_ind = []
for y in years:
i = 0
for p in paths:
df_fig = df_fig.append(dict_path[p].loc[y])
path_ind.append(paths[i].upper())
year_ind.append(y)
i +=1
df_fig = df_fig.set_index([pd.Index(path_ind, name='paths')],append=True)
df_fig['EL'] = elimp
coms = coms.append(pd.Series('EL'))
for c in coms:
temp = fuels.loc[fuels['fuel_name']==c,'fuel_abr']
fuel_code = temp.iloc[0]
fig.add_trace(go.Bar(
y = df_fig.loc[:,c],
x = [year_ind,path_ind],
name = fuel_code,
hovertemplate = 'Power generation: %{y}PJ',
marker_color = colours[fuel_code]
))
fig.add_trace(go.Bar(
y = elexp,
x = [year_ind,path_ind],
name = 'Exports',
hovertemplate = 'Exported electricity: %{y}PJ',
marker_color = colours['Imports'],
base=0
))
fig.update_layout(
barmode = 'stack',
plot_bgcolor='rgba(0,0,0,0)',
title={
'text':'<b>Electricity generation in {}</b>'.format(countries_mod[country_sel]),
'y':0.95,
'x':0.5,
'xanchor': 'center',
'yanchor': 'top'},
xaxis = {'type': 'multicategory'},
yaxis = dict(title='Electricity in [{}]'.format(info_dict['Y-Axis'][0])),
font_family = "Arial",
font_color = "black",
title_font_size = 32,
legend_font_size = 26
)
fig.update_yaxes(showgrid=True, gridwidth=1, gridcolor='Black',title_font_size=26, tickfont_size=22)
fig.update_xaxes(tickfont_size=22)
return fig
#%% main function to execute the script
def main(country,scenarios):
#scens = ['B1C0TxE0','B1C0T0E0','B1C0ToE0']
params = ['ProductionByTechnologyAnnual']
results_dic = build_dic(scenarios, params)
df_PbTA = build_PbTA_df(results_dic)
facts_dic = get_facts(df_PbTA)
#path_names = {'B1C0TxE0':'CBS','B1C0T0E0':'REF','B1C0ToE0':'OBS'}
countries_mod = {'AT':'Austria','BE':'Belgium','BG':'Bulgaria','CH':'Switzerland','CY':'Cyrpus','CZ':'Czech Republic','DE':'Germany','DK':'Denmark','EE':'Estonia','ES':'Spain','FI':'Finland','FR':'France','GR':'Greece','HR':'Croatia','HU':'Hungary','IE':'Ireland','IT':'Italy','LT':'Lithuania','LU':'Luxembourg','LV':'Latvia','MT':'Malta','NL':'Netherlands','NO':'Norway','PL':'Poland','PT':'Portugal','RO':'Romania','SE':'Sweden','SI':'Slovenia','SK':'Slovakia','UK':'United Kingdom','EU28':'EU28'}
fuels = pd.DataFrame({'fuel_name':['WI','HY','BF','CO','BM','WS','HF','NU','NG','OC','OI','GO','SO','EL'],'fuel_abr':['Wind','Hydro','Biofuel','Coal','Biomass','Waste','Oil','Nuclear','Gas','Ocean','Oil','Geo','Solar','Imports']}, columns = ['fuel_name','fuel_abr'])
fuels = fuels.sort_values(['fuel_name'])
for region in facts_dic['regions']:
print(region)
# selec_region = input('Please select a country from the above listed by typing here:')
#selec_region = 'DE'
print(list(colour_schemes.keys()))
# selec_scheme = input('Please select one of the above listed colour schemes by writing it here and confirming by enter:')
selec_scheme = 'dES_colours'
colours = colour_schemes[selec_scheme]
figure = create_fig(df_PbTA, scenarios, country, countries_mod, fuels, colours)
plot(figure)
#%% If executed as script
if __name__ == '__main__':
selec_region = sys.argv[1]
scens = sys.argv[2:]
main(selec_region,scens) | 8,772 | 0 | 198 |
086da9fd8b4bf09932d4ca7390adf3bd53271063 | 5,095 | py | Python | src/antidote/providers/lazy.py | keelerm84/antidote | a30d488cd6d3421e50a2414bc9a20af052d3b821 | [
"MIT"
] | null | null | null | src/antidote/providers/lazy.py | keelerm84/antidote | a30d488cd6d3421e50a2414bc9a20af052d3b821 | [
"MIT"
] | null | null | null | src/antidote/providers/lazy.py | keelerm84/antidote | a30d488cd6d3421e50a2414bc9a20af052d3b821 | [
"MIT"
] | null | null | null | from typing import Callable, Dict, Hashable, Optional, Tuple, Union
from .._internal.utils import SlotsReprMixin
from ..core import DependencyInstance, DependencyProvider
class LazyCall(SlotsReprMixin):
"""
Dependency which is the result of the call of the given function with the
given arguments.
.. doctest::
>>> from antidote import LazyCall, world
>>> def f(x, y):
... print("Computing {} + {}".format(x, y))
... return x + y
>>> A = LazyCall(f)(2, y=3)
>>> world.get(A)
Computing 2 + 3
5
"""
__slots__ = ('_func', '_args', '_kwargs', '_singleton')
def __init__(self, func: Callable, singleton: bool = True):
"""
Args:
func: Function to lazily call, any arguments given by calling
to the instance of :py:class:`~.LazyCall` will be passed on.
singleton: Whether or not this is a singleton or not.
"""
self._singleton = singleton
self._func = func
self._args = () # type: Tuple
self._kwargs = {} # type: Dict
def __call__(self, *args, **kwargs):
"""
All argument are passed on to the lazily called function.
"""
self._args = args
self._kwargs = kwargs
return self
class LazyMethodCall(SlotsReprMixin):
"""
Similar to :py:class:`~.LazyCall` but adapted to methods within a class
definition. The class has to be a registered service, as the class
instantiation itself is also lazy.
.. doctest::
>>> from antidote import LazyMethodCall, register, world
>>> @register
... class Constants:
... def get(self, x: str):
... return len(x)
... A = LazyMethodCall(get)('test')
>>> Constants.A
LazyMethodCallDependency(...)
>>> world.get(Constants.A)
4
>>> Constants().A
4
:py:class:`~.LazyMethodCall` has two different behaviors:
- if retrieved as a class attribute it returns a dependency which identifies
the result for Antidote.
- if retrieved as a instance attribute it returns the result for this
instance. This makes testing a lot easier as it does not require Antidote.
Check out :py:class:`~.helpers.conf.LazyConstantsMeta` for simple way
to declare multiple constants.
"""
__slots__ = ('_method_name', '_args', '_kwargs', '_singleton', '_key')
def __init__(self, method: Union[Callable, str], singleton: bool = True):
"""
Args:
method: Method to be called or the name of it.
singleton: Whether or not this is a singleton or not.
"""
self._singleton = singleton
# Retrieve the name of the method, as injection can be done after the class
# creation which is typically the case with @register.
self._method_name = method if isinstance(method, str) else method.__name__
self._args = () # type: Tuple
self._kwargs = {} # type: Dict
self._key = None
def __call__(self, *args, **kwargs):
"""
All argument are passed on to the lazily called function.
"""
self._args = args
self._kwargs = kwargs
return self
# The attribute is expected to be found in owner, as one should not call
# directly __get__.
| 34.425676 | 87 | 0.602552 | from typing import Callable, Dict, Hashable, Optional, Tuple, Union
from .._internal.utils import SlotsReprMixin
from ..core import DependencyInstance, DependencyProvider
class LazyCall(SlotsReprMixin):
"""
Dependency which is the result of the call of the given function with the
given arguments.
.. doctest::
>>> from antidote import LazyCall, world
>>> def f(x, y):
... print("Computing {} + {}".format(x, y))
... return x + y
>>> A = LazyCall(f)(2, y=3)
>>> world.get(A)
Computing 2 + 3
5
"""
__slots__ = ('_func', '_args', '_kwargs', '_singleton')
def __init__(self, func: Callable, singleton: bool = True):
"""
Args:
func: Function to lazily call, any arguments given by calling
to the instance of :py:class:`~.LazyCall` will be passed on.
singleton: Whether or not this is a singleton or not.
"""
self._singleton = singleton
self._func = func
self._args = () # type: Tuple
self._kwargs = {} # type: Dict
def __call__(self, *args, **kwargs):
"""
All argument are passed on to the lazily called function.
"""
self._args = args
self._kwargs = kwargs
return self
class LazyMethodCall(SlotsReprMixin):
"""
Similar to :py:class:`~.LazyCall` but adapted to methods within a class
definition. The class has to be a registered service, as the class
instantiation itself is also lazy.
.. doctest::
>>> from antidote import LazyMethodCall, register, world
>>> @register
... class Constants:
... def get(self, x: str):
... return len(x)
... A = LazyMethodCall(get)('test')
>>> Constants.A
LazyMethodCallDependency(...)
>>> world.get(Constants.A)
4
>>> Constants().A
4
:py:class:`~.LazyMethodCall` has two different behaviors:
- if retrieved as a class attribute it returns a dependency which identifies
the result for Antidote.
- if retrieved as a instance attribute it returns the result for this
instance. This makes testing a lot easier as it does not require Antidote.
Check out :py:class:`~.helpers.conf.LazyConstantsMeta` for simple way
to declare multiple constants.
"""
__slots__ = ('_method_name', '_args', '_kwargs', '_singleton', '_key')
def __init__(self, method: Union[Callable, str], singleton: bool = True):
"""
Args:
method: Method to be called or the name of it.
singleton: Whether or not this is a singleton or not.
"""
self._singleton = singleton
# Retrieve the name of the method, as injection can be done after the class
# creation which is typically the case with @register.
self._method_name = method if isinstance(method, str) else method.__name__
self._args = () # type: Tuple
self._kwargs = {} # type: Dict
self._key = None
def __call__(self, *args, **kwargs):
"""
All argument are passed on to the lazily called function.
"""
self._args = args
self._kwargs = kwargs
return self
def __get__(self, instance, owner):
if instance is None:
if self._singleton:
if self._key is None:
self._key = "{}_dependency".format(self._get_attribute_name(owner))
setattr(owner, self._key, LazyMethodCallDependency(self, owner))
return getattr(owner, self._key)
return LazyMethodCallDependency(self, owner)
return getattr(instance, self._method_name)(*self._args, **self._kwargs)
# The attribute is expected to be found in owner, as one should not call
# directly __get__.
def _get_attribute_name(self, owner):
for k, v in owner.__dict__.items(): # pragma: no cover
if v is self:
return k
class LazyMethodCallDependency(SlotsReprMixin):
__slots__ = ('lazy_method_call', 'owner')
def __init__(self, lazy_method_call, owner):
self.lazy_method_call = lazy_method_call
self.owner = owner
class LazyCallProvider(DependencyProvider):
bound_dependency_types = (LazyMethodCallDependency, LazyCall)
def provide(self,
dependency: Hashable
) -> Optional[DependencyInstance]:
if isinstance(dependency, LazyMethodCallDependency):
return DependencyInstance(
dependency.lazy_method_call.__get__(
self._container.get(dependency.owner),
dependency.owner
),
singleton=dependency.lazy_method_call._singleton
)
elif isinstance(dependency, LazyCall):
return DependencyInstance(
dependency._func(*dependency._args, **dependency._kwargs),
singleton=dependency._singleton
)
return None
| 1,377 | 214 | 99 |
554f324cd1b73fb5ea839a27518cc3cd2a409d84 | 606 | py | Python | scripts/model_summary.py | Dmitriuso/longformer | ff8c00bb672585d7f3a10f4a85c24e12a39c29e8 | [
"Apache-2.0"
] | null | null | null | scripts/model_summary.py | Dmitriuso/longformer | ff8c00bb672585d7f3a10f4a85c24e12a39c29e8 | [
"Apache-2.0"
] | null | null | null | scripts/model_summary.py | Dmitriuso/longformer | ff8c00bb672585d7f3a10f4a85c24e12a39c29e8 | [
"Apache-2.0"
] | null | null | null | import torch
#from pytorch_lightning import LightingModule
from transformers import AutoModelForSeq2SeqLM, MBartForConditionalGeneration
PATH = "../longformer/models/LongTinyMBART"
model = AutoModelForSeq2SeqLM.from_pretrained(PATH)
# model_torch = torch.load(PATH)
# model = model_torch
# model_lightning = MyLightingModule.load_from_checkpoint(PATH)
# model = model_lightning
print(sum([param.nelement() for param in model.parameters()]))
if __name__ == '__main__':
with open("model_summary/LongTinyMBART.txt", 'w') as f:
f.write(repr(model))
f.close()
print(repr(model)) | 25.25 | 77 | 0.754125 | import torch
#from pytorch_lightning import LightingModule
from transformers import AutoModelForSeq2SeqLM, MBartForConditionalGeneration
PATH = "../longformer/models/LongTinyMBART"
model = AutoModelForSeq2SeqLM.from_pretrained(PATH)
# model_torch = torch.load(PATH)
# model = model_torch
# model_lightning = MyLightingModule.load_from_checkpoint(PATH)
# model = model_lightning
print(sum([param.nelement() for param in model.parameters()]))
if __name__ == '__main__':
with open("model_summary/LongTinyMBART.txt", 'w') as f:
f.write(repr(model))
f.close()
print(repr(model)) | 0 | 0 | 0 |
11d5d4498d9e4821020710b9442598427cfb7f13 | 3,851 | py | Python | pyke/tests/test_kepconvert.py | ecalifornica/pyke | 6a3fcc0513cf012044e4420cc4d17064e582d142 | [
"MIT"
] | null | null | null | pyke/tests/test_kepconvert.py | ecalifornica/pyke | 6a3fcc0513cf012044e4420cc4d17064e582d142 | [
"MIT"
] | 1 | 2017-07-25T19:23:05.000Z | 2017-07-25T19:23:05.000Z | pyke/tests/test_kepconvert.py | mirca/PyKE | 6a3fcc0513cf012044e4420cc4d17064e582d142 | [
"MIT"
] | null | null | null | import pytest
import csv
from astropy.utils.data import get_pkg_data_filename
from astropy.io import fits as pyfits
from astropy.time import Time as astropyTime
from pyke import kepconvert
from pyke import kepio
from ..kepio import delete
fake_lc = get_pkg_data_filename("data/golden-lc.fits")
SUPPORTED_CONVERSION = ['jd', 'mjd', 'decimalyear', 'unix', 'cxcsec', 'gps',
'plot_date', 'datetime', 'iso', 'isot', 'yday', 'fits',
'byear', 'jyear', 'byear_str', 'jyear_str']
| 44.264368 | 79 | 0.572579 | import pytest
import csv
from astropy.utils.data import get_pkg_data_filename
from astropy.io import fits as pyfits
from astropy.time import Time as astropyTime
from pyke import kepconvert
from pyke import kepio
from ..kepio import delete
fake_lc = get_pkg_data_filename("data/golden-lc.fits")
def test_kepconvert():
with pytest.raises(Exception,
message=("ERROR -- KEPCONVERT: input file myfile.fits"
" does not exist")):
kepconvert("myfile.fits", "fits2excel",
"TIME,SAP_FLUX,SAP_FLUX_ERR,SAP_QUALITY",
outfile="output.txt", baddata=True,
overwrite=True, verbose=True)
with pytest.raises(Exception,
message=("ERROR -- KEPCONVERT: conversion not supported"
": fits2excel")):
kepconvert(fake_lc, "fits2excel",
"TIME,SAP_FLUX,SAP_FLUX_ERR,SAP_QUALITY",
outfile="output.txt",
baddata=True, overwrite=True, verbose=True)
# convert to csv
kepconvert(fake_lc, "fits2csv", "TIME,SAP_FLUX", outfile="fake_lc.csv",
baddata=True, overwrite=True, verbose=True)
with open('fake_lc.csv', 'r') as csvfile:
# check header
line = csvfile.readline()
first_line = line.split(',')
assert first_line == ['TIME', 'SAP_FLUX\n']
delete("fake_lc.csv", "kepconvert.log", False)
# convert to ascii
kepconvert(fake_lc, "fits2asc", "TIME,SAP_FLUX", outfile="fake_lc.txt",
baddata=True, overwrite=True, verbose=True)
with open('fake_lc.txt', 'r') as asciifile:
lines = asciifile.readlines()
first_line = lines[0]
first_line = first_line.split(',')
assert first_line == ['TIME', 'SAP_FLUX\n']
delete("fake_lc.txt", "kepconvert.log", False)
# time conversion
with pytest.raises(Exception,
message=("ERROR -- KEPCONVERT: error converting time to"
" nasa: format must be one of ['jd', 'mjd',"
" 'decimalyear', 'unix', 'cxcsec', 'gps',"
" 'plot_date', 'datetime', 'iso', 'isot',"
" 'yday', 'fits', 'byear', 'jyear',"
" 'byear_str', 'jyear_str']")):
kepconvert(fake_lc, "fits2csv", "TIME,SAP_FLUX", timeformat='nasa',
outfile="fake_lc.txt", baddata=True, overwrite=True,
verbose=True)
kepconvert(fake_lc, "fits2csv", "TIME,SAP_FLUX", timeformat='unix',
outfile="fake_lc.txt", baddata=True, overwrite=True,
verbose=True)
with open('fake_lc.txt', 'r') as asciifile:
lines = asciifile.readlines()
first_line = lines[0]
first_line = first_line.split(',')
assert first_line == ['TIME', 'SAP_FLUX\n']
second_line = lines[1]
second_line = second_line.split(',')
assert second_line == (['1.461334722059185982e+09',
'1.500000000000000000e+01\n'])
delete("fake_lc.txt", "kepconvert.log", False)
SUPPORTED_CONVERSION = ['jd', 'mjd', 'decimalyear', 'unix', 'cxcsec', 'gps',
'plot_date', 'datetime', 'iso', 'isot', 'yday', 'fits',
'byear', 'jyear', 'byear_str', 'jyear_str']
def test_time_conversion():
fits = pyfits.open(fake_lc, 'readonly')
tstart, _, _, _ = kepio.timekeys(fits, fake_lc, "kepconvert.log", False)
for conversion in SUPPORTED_CONVERSION:
dateobs = astropyTime(fits[1].header['DATE-OBS'])
dateobs.format = conversion
start = astropyTime(tstart, format='jd')
start.format = conversion
fits.close()
assert start.value == dateobs.value
| 3,284 | 0 | 46 |
67932b31fbfe6716b20cb211fb33ec4f13fa7a16 | 259 | py | Python | eventgen/src/config/utils.py | mtanvirulislam/infodium | b6210da6121bfb5a4fc178c8b9a6231794b30409 | [
"Apache-2.0"
] | 1 | 2021-05-26T11:28:02.000Z | 2021-05-26T11:28:02.000Z | eventgen/src/config/utils.py | mtanvirulislam/real-time-data-processing | b6210da6121bfb5a4fc178c8b9a6231794b30409 | [
"Apache-2.0"
] | null | null | null | eventgen/src/config/utils.py | mtanvirulislam/real-time-data-processing | b6210da6121bfb5a4fc178c8b9a6231794b30409 | [
"Apache-2.0"
] | null | null | null | import logging
import sys
| 28.777778 | 76 | 0.590734 | import logging
import sys
def printlog(log):
logging.basicConfig(level=logging.INFO,
format='[%(asctime)s] [%(levelname)s]: %(message)s',
handlers=[logging.StreamHandler(sys.stdout)])
logging.info(log) | 210 | 0 | 23 |
21f03fec975e47ecea6661210575c239ff1b02f1 | 8,077 | py | Python | demo.py | hukim1112/weakalign | 4d2c5a275fd50f34418734198b32de3c8ce749a8 | [
"MIT"
] | null | null | null | demo.py | hukim1112/weakalign | 4d2c5a275fd50f34418734198b32de3c8ce749a8 | [
"MIT"
] | null | null | null | demo.py | hukim1112/weakalign | 4d2c5a275fd50f34418734198b32de3c8ce749a8 | [
"MIT"
] | null | null | null | from __future__ import print_function, division
import os
import argparse
import torch
import torch.nn as nn
from os.path import exists
from torch.utils.data import Dataset, DataLoader
from model.cnn_geometric_model import CNNGeometric, TwoStageCNNGeometric
from data.pf_dataset import PFDataset, PFPascalDataset
from data.download_datasets import download_PF_willow
from image.normalization import NormalizeImageDict, normalize_image
from util.torch_util import BatchTensorToVars, str_to_bool
from geotnf.transformation import GeometricTnf
from geotnf.point_tnf import *
import matplotlib.pyplot as plt
from skimage import io
from collections import OrderedDict
import torch.nn.functional as F
# for compatibility with Python 2
try:
input = raw_input
except NameError:
pass
"""
Script to demonstrate evaluation on a trained model
"""
print('WeakAlign demo script')
# Argument parsing
parser = argparse.ArgumentParser(description='WeakAlign PyTorch implementation')
# Paths
parser.add_argument('--model', type=str, default='trained_models/weakalign_resnet101_affine_tps.pth.tar', help='Trained two-stage model filename')
parser.add_argument('--model-aff', type=str, default='', help='Trained affine model filename')
parser.add_argument('--model-tps', type=str, default='', help='Trained TPS model filename')
parser.add_argument('--pf-path', type=str, default='datasets/proposal-flow-pascal', help='Path to PF dataset')
parser.add_argument('--feature-extraction-cnn', type=str, default='resnet101', help='feature extraction CNN model architecture: vgg/resnet101')
parser.add_argument('--tps-reg-factor', type=float, default=0.0, help='regularisation factor for tps tnf')
args = parser.parse_args()
use_cuda = torch.cuda.is_available()
do_aff = not args.model_aff==''
do_tps = not args.model_tps==''
if args.pf_path=='':
args.args.pf_path='datasets/proposal-flow-pascal/'
# Download dataset if needed
if not exists(args.pf_path):
download_PF_pascal(args.pf_path)
# Create model
print('Creating CNN model...')
model = TwoStageCNNGeometric(use_cuda=use_cuda,
return_correlation=False,
feature_extraction_cnn=args.feature_extraction_cnn)
# Load trained weights
print('Loading trained model weights...')
if args.model!='':
checkpoint = torch.load(args.model, map_location=lambda storage, loc: storage)
checkpoint['state_dict'] = OrderedDict([(k.replace('vgg', 'model'), v) for k, v in checkpoint['state_dict'].items()])
for name, param in model.FeatureExtraction.state_dict().items():
model.FeatureExtraction.state_dict()[name].copy_(checkpoint['state_dict']['FeatureExtraction.' + name])
for name, param in model.FeatureRegression.state_dict().items():
model.FeatureRegression.state_dict()[name].copy_(checkpoint['state_dict']['FeatureRegression.' + name])
for name, param in model.FeatureRegression2.state_dict().items():
model.FeatureRegression2.state_dict()[name].copy_(checkpoint['state_dict']['FeatureRegression2.' + name])
else:
checkpoint_aff = torch.load(args.model_aff, map_location=lambda storage, loc: storage)
checkpoint_aff['state_dict'] = OrderedDict([(k.replace('vgg', 'model'), v) for k, v in checkpoint_aff['state_dict'].items()])
for name, param in model.FeatureExtraction.state_dict().items():
model.FeatureExtraction.state_dict()[name].copy_(checkpoint_aff['state_dict']['FeatureExtraction.' + name])
for name, param in model.FeatureRegression.state_dict().items():
model.FeatureRegression.state_dict()[name].copy_(checkpoint_aff['state_dict']['FeatureRegression.' + name])
checkpoint_tps = torch.load(args.model_tps, map_location=lambda storage, loc: storage)
checkpoint_tps['state_dict'] = OrderedDict([(k.replace('vgg', 'model'), v) for k, v in checkpoint_tps['state_dict'].items()])
for name, param in model.FeatureRegression2.state_dict().items():
model.FeatureRegression2.state_dict()[name].copy_(checkpoint_tps['state_dict']['FeatureRegression.' + name])
# Dataset and dataloader
dataset = PFPascalDataset(csv_file=os.path.join(args.pf_path, 'test_pairs_pf_pascal.csv'),
dataset_path=args.pf_path,
transform=NormalizeImageDict(['source_image','target_image']))
dataloader = DataLoader(dataset, batch_size=1,
shuffle=True, num_workers=4)
batchTensorToVars = BatchTensorToVars(use_cuda=use_cuda)
# Instatiate image transformers
affTnf = GeometricTnf(geometric_model='affine', use_cuda=use_cuda)
for i, batch in enumerate(dataloader):
# get random batch of size 1
batch = batchTensorToVars(batch)
source_im_size = batch['source_im_size']
target_im_size = batch['target_im_size']
source_points = batch['source_points']
target_points = batch['target_points']
# warp points with estimated transformations
target_points_norm = PointsToUnitCoords(target_points,target_im_size)
model.eval()
# Evaluate model
theta_aff,theta_aff_tps=model(batch)
warped_image_aff = affTnf(batch['source_image'],theta_aff.view(-1,2,3))
warped_image_aff_tps = affTpsTnf(batch['source_image'],theta_aff, theta_aff_tps)
# Un-normalize images and convert to numpy
source_image = normalize_image(batch['source_image'],forward=False)
source_image = source_image.data.squeeze(0).transpose(0,1).transpose(1,2).cpu().numpy()
target_image = normalize_image(batch['target_image'],forward=False)
target_image = target_image.data.squeeze(0).transpose(0,1).transpose(1,2).cpu().numpy()
warped_image_aff = normalize_image(warped_image_aff,forward=False)
warped_image_aff = warped_image_aff.data.squeeze(0).transpose(0,1).transpose(1,2).cpu().numpy()
warped_image_aff_tps = normalize_image(warped_image_aff_tps,forward=False)
warped_image_aff_tps = warped_image_aff_tps.data.squeeze(0).transpose(0,1).transpose(1,2).cpu().numpy()
# check if display is available
exit_val = os.system('python -c "import matplotlib.pyplot as plt;plt.figure()" > /dev/null 2>&1')
display_avail = exit_val==0
if display_avail:
N_subplots = 4
fig, axs = plt.subplots(1,N_subplots)
axs[0].imshow(source_image)
axs[0].set_title('src')
axs[1].imshow(target_image)
axs[1].set_title('tgt')
axs[2].imshow(warped_image_aff)
axs[2].set_title('aff')
axs[3].imshow(warped_image_aff_tps)
axs[3].set_title('aff+tps')
for i in range(N_subplots):
axs[i].axis('off')
print('Showing results. Close figure window to continue')
plt.show()
else:
print('No display found. Writing results to:')
fn_src = 'source.png'
print(fn_src)
io.imsave(fn_src, source_image)
fn_tgt = 'target.png'
print(fn_tgt)
io.imsave(fn_tgt, target_image)
fn_aff = 'result_aff.png'
print(fn_aff)
io.imsave(fn_aff, warped_image_aff)
fn_aff_tps = 'result_aff_tps.png'
print(fn_aff_tps)
io.imsave(fn_aff_tps,warped_image_aff_tps)
res = input('Run for another example ([y]/n): ')
if res=='n':
break
| 42.73545 | 146 | 0.710288 | from __future__ import print_function, division
import os
import argparse
import torch
import torch.nn as nn
from os.path import exists
from torch.utils.data import Dataset, DataLoader
from model.cnn_geometric_model import CNNGeometric, TwoStageCNNGeometric
from data.pf_dataset import PFDataset, PFPascalDataset
from data.download_datasets import download_PF_willow
from image.normalization import NormalizeImageDict, normalize_image
from util.torch_util import BatchTensorToVars, str_to_bool
from geotnf.transformation import GeometricTnf
from geotnf.point_tnf import *
import matplotlib.pyplot as plt
from skimage import io
from collections import OrderedDict
import torch.nn.functional as F
# for compatibility with Python 2
try:
input = raw_input
except NameError:
pass
"""
Script to demonstrate evaluation on a trained model
"""
print('WeakAlign demo script')
# Argument parsing
parser = argparse.ArgumentParser(description='WeakAlign PyTorch implementation')
# Paths
parser.add_argument('--model', type=str, default='trained_models/weakalign_resnet101_affine_tps.pth.tar', help='Trained two-stage model filename')
parser.add_argument('--model-aff', type=str, default='', help='Trained affine model filename')
parser.add_argument('--model-tps', type=str, default='', help='Trained TPS model filename')
parser.add_argument('--pf-path', type=str, default='datasets/proposal-flow-pascal', help='Path to PF dataset')
parser.add_argument('--feature-extraction-cnn', type=str, default='resnet101', help='feature extraction CNN model architecture: vgg/resnet101')
parser.add_argument('--tps-reg-factor', type=float, default=0.0, help='regularisation factor for tps tnf')
args = parser.parse_args()
use_cuda = torch.cuda.is_available()
do_aff = not args.model_aff==''
do_tps = not args.model_tps==''
if args.pf_path=='':
args.args.pf_path='datasets/proposal-flow-pascal/'
# Download dataset if needed
if not exists(args.pf_path):
download_PF_pascal(args.pf_path)
# Create model
print('Creating CNN model...')
model = TwoStageCNNGeometric(use_cuda=use_cuda,
return_correlation=False,
feature_extraction_cnn=args.feature_extraction_cnn)
# Load trained weights
print('Loading trained model weights...')
if args.model!='':
checkpoint = torch.load(args.model, map_location=lambda storage, loc: storage)
checkpoint['state_dict'] = OrderedDict([(k.replace('vgg', 'model'), v) for k, v in checkpoint['state_dict'].items()])
for name, param in model.FeatureExtraction.state_dict().items():
model.FeatureExtraction.state_dict()[name].copy_(checkpoint['state_dict']['FeatureExtraction.' + name])
for name, param in model.FeatureRegression.state_dict().items():
model.FeatureRegression.state_dict()[name].copy_(checkpoint['state_dict']['FeatureRegression.' + name])
for name, param in model.FeatureRegression2.state_dict().items():
model.FeatureRegression2.state_dict()[name].copy_(checkpoint['state_dict']['FeatureRegression2.' + name])
else:
checkpoint_aff = torch.load(args.model_aff, map_location=lambda storage, loc: storage)
checkpoint_aff['state_dict'] = OrderedDict([(k.replace('vgg', 'model'), v) for k, v in checkpoint_aff['state_dict'].items()])
for name, param in model.FeatureExtraction.state_dict().items():
model.FeatureExtraction.state_dict()[name].copy_(checkpoint_aff['state_dict']['FeatureExtraction.' + name])
for name, param in model.FeatureRegression.state_dict().items():
model.FeatureRegression.state_dict()[name].copy_(checkpoint_aff['state_dict']['FeatureRegression.' + name])
checkpoint_tps = torch.load(args.model_tps, map_location=lambda storage, loc: storage)
checkpoint_tps['state_dict'] = OrderedDict([(k.replace('vgg', 'model'), v) for k, v in checkpoint_tps['state_dict'].items()])
for name, param in model.FeatureRegression2.state_dict().items():
model.FeatureRegression2.state_dict()[name].copy_(checkpoint_tps['state_dict']['FeatureRegression.' + name])
# Dataset and dataloader
dataset = PFPascalDataset(csv_file=os.path.join(args.pf_path, 'test_pairs_pf_pascal.csv'),
dataset_path=args.pf_path,
transform=NormalizeImageDict(['source_image','target_image']))
dataloader = DataLoader(dataset, batch_size=1,
shuffle=True, num_workers=4)
batchTensorToVars = BatchTensorToVars(use_cuda=use_cuda)
# Instatiate image transformers
affTnf = GeometricTnf(geometric_model='affine', use_cuda=use_cuda)
def affTpsTnf(source_image, theta_aff, theta_aff_tps, use_cuda=use_cuda):
tpstnf = GeometricTnf(geometric_model = 'tps',use_cuda=use_cuda)
sampling_grid = tpstnf(image_batch=source_image,
theta_batch=theta_aff_tps,
return_sampling_grid=True)[1]
X = sampling_grid[:,:,:,0].unsqueeze(3)
Y = sampling_grid[:,:,:,1].unsqueeze(3)
Xp = X*theta_aff[:,0].unsqueeze(1).unsqueeze(2)+Y*theta_aff[:,1].unsqueeze(1).unsqueeze(2)+theta_aff[:,2].unsqueeze(1).unsqueeze(2)
Yp = X*theta_aff[:,3].unsqueeze(1).unsqueeze(2)+Y*theta_aff[:,4].unsqueeze(1).unsqueeze(2)+theta_aff[:,5].unsqueeze(1).unsqueeze(2)
sg = torch.cat((Xp,Yp),3)
warped_image_batch = F.grid_sample(source_image, sg)
return warped_image_batch
for i, batch in enumerate(dataloader):
# get random batch of size 1
batch = batchTensorToVars(batch)
source_im_size = batch['source_im_size']
target_im_size = batch['target_im_size']
source_points = batch['source_points']
target_points = batch['target_points']
# warp points with estimated transformations
target_points_norm = PointsToUnitCoords(target_points,target_im_size)
model.eval()
# Evaluate model
theta_aff,theta_aff_tps=model(batch)
warped_image_aff = affTnf(batch['source_image'],theta_aff.view(-1,2,3))
warped_image_aff_tps = affTpsTnf(batch['source_image'],theta_aff, theta_aff_tps)
# Un-normalize images and convert to numpy
source_image = normalize_image(batch['source_image'],forward=False)
source_image = source_image.data.squeeze(0).transpose(0,1).transpose(1,2).cpu().numpy()
target_image = normalize_image(batch['target_image'],forward=False)
target_image = target_image.data.squeeze(0).transpose(0,1).transpose(1,2).cpu().numpy()
warped_image_aff = normalize_image(warped_image_aff,forward=False)
warped_image_aff = warped_image_aff.data.squeeze(0).transpose(0,1).transpose(1,2).cpu().numpy()
warped_image_aff_tps = normalize_image(warped_image_aff_tps,forward=False)
warped_image_aff_tps = warped_image_aff_tps.data.squeeze(0).transpose(0,1).transpose(1,2).cpu().numpy()
# check if display is available
exit_val = os.system('python -c "import matplotlib.pyplot as plt;plt.figure()" > /dev/null 2>&1')
display_avail = exit_val==0
if display_avail:
N_subplots = 4
fig, axs = plt.subplots(1,N_subplots)
axs[0].imshow(source_image)
axs[0].set_title('src')
axs[1].imshow(target_image)
axs[1].set_title('tgt')
axs[2].imshow(warped_image_aff)
axs[2].set_title('aff')
axs[3].imshow(warped_image_aff_tps)
axs[3].set_title('aff+tps')
for i in range(N_subplots):
axs[i].axis('off')
print('Showing results. Close figure window to continue')
plt.show()
else:
print('No display found. Writing results to:')
fn_src = 'source.png'
print(fn_src)
io.imsave(fn_src, source_image)
fn_tgt = 'target.png'
print(fn_tgt)
io.imsave(fn_tgt, target_image)
fn_aff = 'result_aff.png'
print(fn_aff)
io.imsave(fn_aff, warped_image_aff)
fn_aff_tps = 'result_aff_tps.png'
print(fn_aff_tps)
io.imsave(fn_aff_tps,warped_image_aff_tps)
res = input('Run for another example ([y]/n): ')
if res=='n':
break
| 763 | 0 | 22 |
71a21e4a40918501a27b23e93ce27aedb909ef4f | 890 | py | Python | tests/backend/tarantool/test_schema.py | liviu-lesan/planetmint | 54cf8e45108947aa8282ddaaf127d08e06ce842e | [
"Apache-2.0"
] | null | null | null | tests/backend/tarantool/test_schema.py | liviu-lesan/planetmint | 54cf8e45108947aa8282ddaaf127d08e06ce842e | [
"Apache-2.0"
] | null | null | null | tests/backend/tarantool/test_schema.py | liviu-lesan/planetmint | 54cf8e45108947aa8282ddaaf127d08e06ce842e | [
"Apache-2.0"
] | null | null | null | # Copyright © 2020 Interplanetary Database Association e.V.,
# Planetmint and IPDB software contributors.
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
# Code is Apache-2.0 and docs are CC-BY-4.0
from planetmint.backend.tarantool.connection import TarantoolDB
| 28.709677 | 102 | 0.720225 | # Copyright © 2020 Interplanetary Database Association e.V.,
# Planetmint and IPDB software contributors.
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
# Code is Apache-2.0 and docs are CC-BY-4.0
from planetmint.backend.tarantool.connection import TarantoolDB
def _check_spaces_by_list(conn, space_names):
_exists = []
for name in space_names:
try:
conn.space(name)
_exists.append(name)
except:
pass
return _exists
def test_create_tables(db_conn):
db_conn.drop_database()
db_conn.init_database()
assert db_conn.SPACE_NAMES == _check_spaces_by_list(conn=db_conn, space_names=db_conn.SPACE_NAMES)
def test_drop(db_conn): # remove dummy_db as argument
db_conn.drop_database()
actual_spaces = _check_spaces_by_list(conn=db_conn, space_names=db_conn.SPACE_NAMES)
assert [] == actual_spaces
| 548 | 0 | 69 |
8e08988ea4cf640459c26fd2030ac36550909e58 | 5,320 | py | Python | main.py | biomadeira/gae_pubmed2rss | c033c8088249fcb67ed5ef5d1c0f236bd7cbf57a | [
"MIT"
] | null | null | null | main.py | biomadeira/gae_pubmed2rss | c033c8088249fcb67ed5ef5d1c0f236bd7cbf57a | [
"MIT"
] | null | null | null | main.py | biomadeira/gae_pubmed2rss | c033c8088249fcb67ed5ef5d1c0f236bd7cbf57a | [
"MIT"
] | null | null | null | #!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
"""
-------
main.py
-------
Main methods (views + routes) implemented in the API.
.. moduleauthor:: Fabio Madeira
:module_version: 1.0
:created_on: 28-02-2015
"""
import webapp2
import logging
import os
import jinja2
import urllib
from tools import *
from google.appengine.ext.webapp import template
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader('templates'),
extensions=['jinja2.ext.autoescape'],
autoescape=True)
default_search = '"PLoS One"[jour]'
default_feeds = 10
default_rssguid= "1h9kEWSfxImUd3q0TuDX7eLhEJoM4-k3pB8scCPrUmcSn3lkLl"
class RssPubmed(webapp2.RequestHandler):
"""Generate a rss feed from Pubmed - based on the main page search."""
class RssBot(webapp2.RequestHandler):
"""
Consumes a feed and checks if there are new entries in db.
If so, gets a shortened url and tweets the new status.
"""
debug = os.environ.get('SERVER_SOFTWARE', '').startswith('Dev')
app = webapp2.WSGIApplication(routes=[
webapp2.Route(r'/', handler='main.MainPage', name='home'),
webapp2.Route(r'/search_output=<search_output:[^/]+>', handler='main.MainPage', name='search_output'),
webapp2.Route(r'/rssguid_output=<rssguid_output:[^/]+>', handler='main.MainPage', name='rssguid_output'),
webapp2.Route(r'/search', handler='main.Search'),
webapp2.Route(r'/rss', handler='main.Rss'),
webapp2.Route(r'/twitter', handler='main.Twitter'),
webapp2.Route(r'/search/pubmed/string=<string:[^/]+>', handler='main.SearchPubmed', name='string'),
webapp2.Route(r'/search/pubmed/<string:[^/]+>', handler='main.SearchPubmed', name='string'),
webapp2.Route(r'/rss/pubmed/string=<string:[^/]+>&feeds=<feeds:[^/]+>', handler='main.RssPubmed', name='string'),
webapp2.Route(r'/rss/pubmed/<string:[^/]+>&<feeds:[^/]+>', handler='main.RssPubmed', name='string'),
webapp2.Route(r'/rss/pubmed/string=<string:[^/]+>', handler='main.RssPubmed', name='string'),
webapp2.Route(r'/rss/pubmed/<string:[^/]+>', handler='main.RssPubmed', name='string'),
webapp2.Route(r'/twitter_bot&rss_guid=<rss_guid:[^/]+>', handler='main.RssBot', name='rss_guid'),
webapp2.Route(r'/twitter_bot&<rss_guid:[^/]+>', handler='main.RssBot', name='rss_guid'),
webapp2.Route(r'/twitter_bot', handler='main.RssBot', name='rss_guid'),
], debug=debug)
app.error_handlers[404] = handle_404
app.error_handlers[500] = handle_500
| 34.771242 | 117 | 0.664098 | #!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
"""
-------
main.py
-------
Main methods (views + routes) implemented in the API.
.. moduleauthor:: Fabio Madeira
:module_version: 1.0
:created_on: 28-02-2015
"""
import webapp2
import logging
import os
import jinja2
import urllib
from tools import *
from google.appengine.ext.webapp import template
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader('templates'),
extensions=['jinja2.ext.autoescape'],
autoescape=True)
default_search = '"PLoS One"[jour]'
default_feeds = 10
default_rssguid= "1h9kEWSfxImUd3q0TuDX7eLhEJoM4-k3pB8scCPrUmcSn3lkLl"
class MainPage(webapp2.RequestHandler):
def get(self, search_output="", rssguid_output="", twitter_output=""):
"""Renders a simple api doc with the implemented methods."""
# template = JINJA_ENVIRONMENT.get_template('api.html')
template_values = {}
template_values['baseurl'] = ""
template_values['default_search'] = default_search
template_values['default_feeds'] = str(default_feeds)
template_values['default_rssguid'] = default_rssguid
template_values['search_output'] = search_output
template_values['rssguid_output'] = rssguid_output
template_values['twitter_output'] = twitter_output
path = os.path.join(os.path.dirname(__file__), 'api.html')
self.response.write(template.render(path, template_values))
class Search(webapp2.RequestHandler):
def post(self):
search = self.request.get("search", default_search)
search = urllib.quote_plus(search)
return webapp2.redirect('/search/pubmed/string=%s' % search)
class Rss(webapp2.RequestHandler):
def post(self):
search = self.request.get("search", default_search)
feeds = self.request.get("feeds", default_feeds)
return webapp2.redirect('/rss/pubmed/string=%s&feeds=%s' % (search, feeds))
class Twitter(webapp2.RequestHandler):
def post(self):
rssguid = self.request.get("rssguid", default_rssguid)
return webapp2.redirect('/twitter_bot&rss_guid=%s' % (rssguid))
class SearchPubmed(webapp2.RequestHandler):
def get(self, string):
"""Return output from Pubmed - based on eutils API."""
if string:
return webapp2.redirect('/search_output=%s' % string)
else:
self.abort(500)
class RssPubmed(webapp2.RequestHandler):
"""Generate a rss feed from Pubmed - based on the main page search."""
def get(self, string, feeds=50):
if string:
rss_guid = generate_rss_from_pubmed(string, feeds=feeds)
return webapp2.redirect('/rssguid_output=%s' % rss_guid)
else:
self.abort(500)
class RssBot(webapp2.RequestHandler):
"""
Consumes a feed and checks if there are new entries in db.
If so, gets a shortened url and tweets the new status.
"""
def get(self, rss_guid=None):
try:
tweets = twitter_bot(rss_guid=rss_guid)
# template = JINJA_ENVIRONMENT.get_template('papers.html')
template_values = {}
template_values['baseurl'] = ""
template_values['twitter_output'] = tweets
path = os.path.join(os.path.dirname(__file__), 'papers.html')
self.response.write(template.render(path, template_values))
except:
self.abort(500)
def handle_404(request, response, exception):
logging.exception(exception)
response.write('Sorry, nothing at this URL!')
response.set_status(404)
def handle_500(request, response, exception):
logging.exception(exception)
response.write('A server error occurred!')
response.set_status(500)
debug = os.environ.get('SERVER_SOFTWARE', '').startswith('Dev')
app = webapp2.WSGIApplication(routes=[
webapp2.Route(r'/', handler='main.MainPage', name='home'),
webapp2.Route(r'/search_output=<search_output:[^/]+>', handler='main.MainPage', name='search_output'),
webapp2.Route(r'/rssguid_output=<rssguid_output:[^/]+>', handler='main.MainPage', name='rssguid_output'),
webapp2.Route(r'/search', handler='main.Search'),
webapp2.Route(r'/rss', handler='main.Rss'),
webapp2.Route(r'/twitter', handler='main.Twitter'),
webapp2.Route(r'/search/pubmed/string=<string:[^/]+>', handler='main.SearchPubmed', name='string'),
webapp2.Route(r'/search/pubmed/<string:[^/]+>', handler='main.SearchPubmed', name='string'),
webapp2.Route(r'/rss/pubmed/string=<string:[^/]+>&feeds=<feeds:[^/]+>', handler='main.RssPubmed', name='string'),
webapp2.Route(r'/rss/pubmed/<string:[^/]+>&<feeds:[^/]+>', handler='main.RssPubmed', name='string'),
webapp2.Route(r'/rss/pubmed/string=<string:[^/]+>', handler='main.RssPubmed', name='string'),
webapp2.Route(r'/rss/pubmed/<string:[^/]+>', handler='main.RssPubmed', name='string'),
webapp2.Route(r'/twitter_bot&rss_guid=<rss_guid:[^/]+>', handler='main.RssBot', name='rss_guid'),
webapp2.Route(r'/twitter_bot&<rss_guid:[^/]+>', handler='main.RssBot', name='rss_guid'),
webapp2.Route(r'/twitter_bot', handler='main.RssBot', name='rss_guid'),
], debug=debug)
app.error_handlers[404] = handle_404
app.error_handlers[500] = handle_500
| 1,450 | 1,083 | 309 |
0ac9376d1d6f2acf015536f7964222f793b50596 | 10,774 | py | Python | doc/TRexDataAnalysis.py | timgates42/trex-core | efe94752fcb2d0734c83d4877afe92a3dbf8eccd | [
"Apache-2.0"
] | 956 | 2015-06-24T15:04:55.000Z | 2022-03-30T06:25:04.000Z | doc/TRexDataAnalysis.py | angelyouyou/trex-core | fddf78584cae285d9298ef23f9f5c8725e16911e | [
"Apache-2.0"
] | 782 | 2015-09-20T15:19:00.000Z | 2022-03-31T23:52:05.000Z | doc/TRexDataAnalysis.py | angelyouyou/trex-core | fddf78584cae285d9298ef23f9f5c8725e16911e | [
"Apache-2.0"
] | 429 | 2015-06-27T19:34:21.000Z | 2022-03-23T11:02:51.000Z | #!/scratch/Anaconda2.4.0/bin/python
import pandas as pd
import numpy as np
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
from matplotlib import dates as matdates
from matplotlib import lines as matlines
import os
import time
from datetime import datetime
"""
This Module is structured to work with a raw data at the following JSON format:
{'setup_name': {'test1_name':[QUERY1,QUERY2,QUERY3],
'test2_name':[QUERY1,QUERY2,QUERY3]
}
'setup_name2': {'test1_name':[QUERY1,QUERY2,QUERY3],
'test2_name':[QUERY1,QUERY2,QUERY3]
}
}
The Query structure is set (currently) to this:
(test_name,state, date,hour,minute,mpps_result,mpps_min,mpps_max,build_id) example:
["syn attack - 64 bytes, single CPU", "stl", "20161226", "01", "39", "9.631898", "9.5", "11.5", "54289"]
it can be changed to support other formats of queries, simply change the query class to support your desired structure
the query class specify the indexes of the data within the query tuple
"""
| 45.268908 | 121 | 0.621589 | #!/scratch/Anaconda2.4.0/bin/python
import pandas as pd
import numpy as np
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
from matplotlib import dates as matdates
from matplotlib import lines as matlines
import os
import time
from datetime import datetime
"""
This Module is structured to work with a raw data at the following JSON format:
{'setup_name': {'test1_name':[QUERY1,QUERY2,QUERY3],
'test2_name':[QUERY1,QUERY2,QUERY3]
}
'setup_name2': {'test1_name':[QUERY1,QUERY2,QUERY3],
'test2_name':[QUERY1,QUERY2,QUERY3]
}
}
The Query structure is set (currently) to this:
(test_name,state, date,hour,minute,mpps_result,mpps_min,mpps_max,build_id) example:
["syn attack - 64 bytes, single CPU", "stl", "20161226", "01", "39", "9.631898", "9.5", "11.5", "54289"]
it can be changed to support other formats of queries, simply change the query class to support your desired structure
the query class specify the indexes of the data within the query tuple
"""
class TestQuery(object):
QUERY_TIMEFORMAT = "%Y/%m/%d %H:%M:%S" # date format in the query
QUERY_TIMESTAMP = 1
QUERY_MPPS_RESULT = 2
QUERY_BUILD_ID = 3
class Test:
def __init__(self, name, setup_name, end_date):
self.name = name
self.setup_name = setup_name
self.end_date = end_date
self.stats = [] # tuple
self.results_df = [] # dataFrame
self.latest_result = [] # float
self.latest_result_date = '' # string
def analyze_all_test_data(self, raw_test_data):
test_results = []
test_dates = []
test_build_ids = []
for query in raw_test_data:
# date_formatted = time.strftime("%d-%m-%Y",
# time.strptime(query[int(TestQuery.QUERY_DATE)], TestQuery.query_dateformat))
# time_of_res = date_formatted + '-' + query[int(TestQuery.QUERY_HOUR)] + ':' + query[
# int(TestQuery.QUERY_MINUTE)]
time_of_query = time.strptime(query[TestQuery.QUERY_TIMESTAMP], TestQuery.QUERY_TIMEFORMAT)
time_formatted = time.strftime("%d-%m-%Y-%H:%M", time_of_query)
test_dates.append(time_formatted)
test_results.append(float(query[int(TestQuery.QUERY_MPPS_RESULT)]))
test_build_ids.append(query[int(TestQuery.QUERY_BUILD_ID)])
test_results_df = pd.DataFrame({self.name: test_results, self.name + ' Date': test_dates,
"Setup": ([self.setup_name] * len(test_results)), "Build Id": test_build_ids},
dtype='str')
stats_avg = float(test_results_df[self.name].mean())
stats_min = float(test_results_df[self.name].min())
stats_max = float(test_results_df[self.name].max())
stats = tuple(
[stats_avg, stats_min, stats_max,
float(test_results_df[self.name].std()),
float(((stats_max - stats_min) / stats_avg) * 100),
len(test_results)]) # stats = (avg_mpps,min,max,std,error, no of test_results) error = ((max-min)/avg)*100
self.latest_result = float(test_results_df[self.name].iloc[-1])
self.latest_result_date = str(test_results_df[test_results_df.columns[3]].iloc[-1])
self.results_df = test_results_df
self.stats = stats
class Setup:
def __init__(self, name, end_date, raw_setup_data):
self.name = name
self.end_date = end_date # string of date
self.tests = [] # list of test objects
self.all_tests_data_table = pd.DataFrame() # dataframe
self.setup_trend_stats = pd.DataFrame() # dataframe
self.latest_test_results = pd.DataFrame() # dataframe
self.raw_setup_data = raw_setup_data # dictionary
self.test_names = raw_setup_data.keys() # list of names
def analyze_all_tests(self):
for test_name in self.test_names:
t = Test(test_name, self.name, self.end_date)
t.analyze_all_test_data(self.raw_setup_data[test_name])
self.tests.append(t)
def analyze_latest_test_results(self):
test_names = []
test_dates = []
test_latest_results = []
for test in self.tests:
test_names.append(test.name)
test_dates.append(test.latest_result_date)
test_latest_results.append(test.latest_result)
self.latest_test_results = pd.DataFrame(
{'Date': test_dates, 'Test Name': test_names, 'MPPS\Core (Norm)': test_latest_results},
index=range(1, len(test_latest_results) + 1))
self.latest_test_results = self.latest_test_results[[2, 1, 0]] # re-order columns to name|MPPS|date
def analyze_all_tests_stats(self):
test_names = []
all_test_stats = []
for test in self.tests:
test_names.append(test.name)
all_test_stats.append(test.stats)
self.setup_trend_stats = pd.DataFrame(all_test_stats, index=test_names,
columns=['Avg MPPS/Core (Norm)', 'Min', 'Max', 'Std', 'Error (%)',
'Total Results'])
self.setup_trend_stats.index.name = 'Test Name'
def analyze_all_tests_trend(self):
all_tests_trend_data = []
for test in self.tests:
all_tests_trend_data.append(test.results_df)
self.all_tests_data_table = reduce(lambda x, y: pd.merge(x, y, how='outer'), all_tests_trend_data)
def plot_trend_graph_all_tests(self, save_path='', file_name='_trend_graph.png'):
time_format1 = '%d-%m-%Y-%H:%M'
time_format2 = '%Y-%m-%d-%H:%M'
for test in self.tests:
test_data = test.results_df[test.results_df.columns[2]].tolist()
test_time_stamps = test.results_df[test.results_df.columns[3]].tolist()
start_date = test_time_stamps[0]
test_time_stamps.append(self.end_date + '-23:59')
test_data.append(test_data[-1])
float_test_time_stamps = []
for ts in test_time_stamps:
try:
float_test_time_stamps.append(matdates.date2num(datetime.strptime(ts, time_format1)))
except:
float_test_time_stamps.append(matdates.date2num(datetime.strptime(ts, time_format2)))
plt.plot_date(x=float_test_time_stamps, y=test_data, label=test.name, fmt='.-', xdate=True)
plt.legend(fontsize='small', loc='best')
plt.ylabel('MPPS/Core (Norm)')
plt.title('Setup: ' + self.name)
plt.tick_params(
axis='x',
which='both',
bottom='off',
top='off',
labelbottom='off')
plt.xlabel('Time Period: ' + start_date[:-6] + ' - ' + self.end_date)
if save_path:
plt.savefig(os.path.join(save_path, self.name + file_name))
if not self.setup_trend_stats.empty:
(self.setup_trend_stats.round(2)).to_csv(os.path.join(save_path, self.name +
'_trend_stats.csv'))
plt.close('all')
def plot_latest_test_results_bar_chart(self, save_path='', img_file_name='_latest_test_runs.png',
stats_file_name='_latest_test_runs_stats.csv'):
plt.figure()
colors_for_bars = ['b', 'g', 'r', 'c', 'm', 'y']
self.latest_test_results[[1]].plot(kind='bar', legend=False,
color=colors_for_bars) # plot only mpps data, which is in column 1
plt.xticks(rotation='horizontal')
plt.xlabel('Index of Tests')
plt.ylabel('MPPS/Core (Norm)')
plt.title("Test Runs for Setup: " + self.name)
if save_path:
plt.savefig(os.path.join(save_path, self.name + img_file_name))
(self.latest_test_results.round(2)).to_csv(
os.path.join(save_path, self.name + stats_file_name))
plt.close('all')
def analyze_all_setup_data(self):
self.analyze_all_tests()
self.analyze_latest_test_results()
self.analyze_all_tests_stats()
self.analyze_all_tests_trend()
def plot_all(self, save_path=''):
self.plot_latest_test_results_bar_chart(save_path)
self.plot_trend_graph_all_tests(save_path)
def latest_runs_comparison_bar_chart(setup_name1, setup_name2, setup1_latest_result, setup2_latest_result,
save_path=''
):
s1_res = setup1_latest_result[[0, 1]] # column0 is test name, column1 is MPPS\Core
s2_res = setup2_latest_result[[0, 1, 2]] # column0 is test name, column1 is MPPS\Core, column2 is Date
s1_res.columns = ['Test Name', setup_name1]
s2_res.columns = ['Test Name', setup_name2, 'Date']
compare_dframe = pd.merge(s1_res, s2_res, on='Test Name')
compare_dframe.plot(kind='bar')
plt.legend(fontsize='small', loc='best')
plt.xticks(rotation='horizontal')
plt.xlabel('Index of Tests')
plt.ylabel('MPPS/Core (Norm)')
plt.title("Comparison between " + setup_name1 + " and " + setup_name2)
if save_path:
plt.savefig(os.path.join(save_path, "_comparison.png"))
compare_dframe = compare_dframe.round(2)
compare_dframe.to_csv(os.path.join(save_path, '_comparison_stats_table.csv'))
# WARNING: if the file _all_stats.csv already exists, this script deletes it, to prevent overflowing of data
def create_all_data(ga_data, end_date, save_path='', detailed_test_stats=False):
all_setups = {}
all_setups_data = []
setup_names = ga_data.keys()
for setup_name in setup_names:
s = Setup(setup_name, end_date, ga_data[setup_name])
s.analyze_all_setup_data()
s.plot_all(save_path)
all_setups_data.append(s.all_tests_data_table)
all_setups[setup_name] = s
if detailed_test_stats:
if os.path.exists(os.path.join(save_path, '_detailed_table.csv')):
os.remove(os.path.join(save_path, '_detailed_table.csv'))
if all_setups_data:
all_setups_data_dframe = pd.DataFrame().append(all_setups_data)
all_setups_data_dframe.to_csv(os.path.join(save_path, '_detailed_table.csv'))
trex19setup = all_setups['trex19']
trex08setup = all_setups['trex08']
latest_runs_comparison_bar_chart('Mellanox ConnectX-5',
'Intel XL710', trex19setup.latest_test_results,
trex08setup.latest_test_results,
save_path=save_path)
| 9,165 | 128 | 410 |
8564c60bc6da704fa49bb24a6f7e2132c38d2509 | 11,722 | py | Python | beyond/orbits/forms.py | priyatharsan/beyond | 1061b870407d316d43e4d1351a7ec026629685ae | [
"MIT"
] | null | null | null | beyond/orbits/forms.py | priyatharsan/beyond | 1061b870407d316d43e4d1351a7ec026629685ae | [
"MIT"
] | null | null | null | beyond/orbits/forms.py | priyatharsan/beyond | 1061b870407d316d43e4d1351a7ec026629685ae | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""This module declares the different meanings that the Orbit 6 components can take
and their conversions
"""
from numpy import cos, arccos, sin, arcsin, arctan2, sqrt, arctanh, sinh, cosh
import numpy as np
from ..errors import UnknownFormError
from ..utils.node import Node
class Form(Node):
"""Base class for orbital form definition
"""
alt = {
"theta": "θ",
"phi": "φ",
"raan": "Ω",
"Omega": "Ω",
"omega": "ω",
"nu": "ν",
"theta_dot": "θ_dot",
"phi_dot": "φ_dot",
"aol": "u",
"H": "E", # The hyperbolic anomaly is available under the eccentric anomaly
}
def __call__(self, orbit, new_form):
"""Gives the result of the transformation without in-place modifications
Args:
orbit (Orbit):
new_form (str or Form):
Returns:
Coord
"""
if isinstance(new_form, Form):
new_form = new_form.name
coord = orbit.copy()
if new_form != orbit.form.name:
for a, b in self.steps(new_form):
coord = getattr(
self, "_{}_to_{}".format(a.name.lower(), b.name.lower())
)(coord, orbit.frame.center)
return coord
@classmethod
def _cartesian_to_keplerian(cls, coord, center):
"""Conversion from cartesian (position and velocity) to keplerian
The keplerian form is
* a : semi-major axis
* e : eccentricity
* i : inclination
* Ω : right-ascension of ascending node
* ω : Argument of perigee
* ν : True anomaly
"""
r, v = coord[:3], coord[3:]
h = np.cross(r, v) # angular momentum vector
h_norm = np.linalg.norm(h)
r_norm = np.linalg.norm(r)
v_norm = np.linalg.norm(v)
K = v_norm ** 2 / 2 - center.µ / r_norm # specific energy
a = -center.µ / (2 * K) # semi-major axis
e = sqrt(1 - h_norm ** 2 / (a * center.µ)) # eccentricity
p = a * (1 - e ** 2) # semi parameter
i = arccos(h[2] / h_norm) # inclination
Ω = arctan2(h[0], -h[1]) % (2 * np.pi) # right ascension of the ascending node
ω_ν = arctan2(r[2] / sin(i), r[0] * cos(Ω) + r[1] * sin(Ω))
ν = arctan2(sqrt(p / center.µ) * np.dot(v, r), p - r_norm) % (2 * np.pi)
ω = (ω_ν - ν) % (2 * np.pi) # argument of the perigee
return np.array([a, e, i, Ω, ω, ν], dtype=float)
@classmethod
def _keplerian_to_cartesian(cls, coord, center):
"""Conversion from Keplerian to Cartesian coordinates
"""
a, e, i, Ω, ω, ν = coord
p = a * (1 - e ** 2)
r = p / (1 + e * cos(ν))
h = sqrt(center.µ * p)
x = r * (cos(Ω) * cos(ω + ν) - sin(Ω) * sin(ω + ν) * cos(i))
y = r * (sin(Ω) * cos(ω + ν) + cos(Ω) * sin(ω + ν) * cos(i))
z = r * sin(i) * sin(ω + ν)
vx = x * h * e / (r * p) * sin(ν) - h / r * (
cos(Ω) * sin(ω + ν) + sin(Ω) * cos(ω + ν) * cos(i)
)
vy = y * h * e / (r * p) * sin(ν) - h / r * (
sin(Ω) * sin(ω + ν) - cos(Ω) * cos(ω + ν) * cos(i)
)
vz = z * h * e / (r * p) * sin(ν) + h / r * sin(i) * cos(ω + ν)
return np.array([x, y, z, vx, vy, vz], dtype=float)
@classmethod
def _keplerian_to_keplerian_eccentric(cls, coord, center):
"""Conversion from Keplerian to Keplerian Eccentric
"""
a, e, i, Ω, ω, ν = coord
if e < 1:
# Elliptic case
cos_E = (e + cos(ν)) / (1 + e * cos(ν))
sin_E = (sin(ν) * sqrt(1 - e ** 2)) / (1 + e * cos(ν))
E = arctan2(sin_E, cos_E) % (2 * np.pi)
else:
# Hyperbolic case, E usually marked as H
cosh_E = (e + cos(ν)) / (1 + e * cos(ν))
sinh_E = (sin(ν) * sqrt(e ** 2 - 1)) / (1 + e * cos(ν))
E = arctanh(sinh_E / cosh_E)
return np.array([a, e, i, Ω, ω, E], dtype=float)
@classmethod
def _keplerian_eccentric_to_keplerian_mean(cls, coord, center):
"""Conversion from Keplerian Eccentric to Keplerian Mean
"""
a, e, i, Ω, ω, E = coord
if e < 1:
M = E - e * sin(E)
else:
# Hyperbolic case, E usually marked as H
M = e * sinh(E) - E
return np.array([a, e, i, Ω, ω, M], dtype=float)
@classmethod
def _keplerian_mean_to_keplerian_eccentric(cls, coord, center):
"""Conversion from Mean Keplerian to Keplerian Eccentric
"""
a, e, i, Ω, ω, M = coord
E = cls.M2E(e, M)
return np.array([a, e, i, Ω, ω, E], dtype=float)
@classmethod
def _keplerian_eccentric_to_keplerian(cls, coord, center):
"""Conversion from Mean Keplerian to True Keplerian
"""
a, e, i, Ω, ω, E = coord
if e < 1:
cos_ν = (cos(E) - e) / (1 - e * cos(E))
sin_ν = (sin(E) * sqrt(1 - e ** 2)) / (1 - e * cos(E))
else:
# Hyperbolic case, E usually marked as H
cos_ν = (cosh(E) - e) / (1 - e * cosh(E))
sin_ν = -(sinh(E) * sqrt(e ** 2 - 1)) / (1 - e * cosh(E))
ν = arctan2(sin_ν, cos_ν) % (np.pi * 2)
return np.array([a, e, i, Ω, ω, ν], dtype=float)
@classmethod
def M2E(cls, e, M):
"""Conversion from Mean Anomaly to Eccentric anomaly,
or Hyperbolic anomaly.
from Vallado
"""
tol = 1e-8
if e < 1:
# Ellipse
if -np.pi < M < 0 or M > np.pi:
E = M - e
else:
E = M + e
E1 = next_E(E, e, M)
while abs(E1 - E) >= tol:
E = E1
E1 = next_E(E, e, M)
return E1
else:
# Hyperbolic
if e < 1.6:
if -np.pi < M < 0 or M > np.pi:
H = M - e
else:
H = M + e
else:
if e < 3.6 and abs(M) > np.pi:
H = M - np.sign(M) * e
else:
H = M / (e - 1)
H1 = next_H(H, e, M)
while abs(H1 - H) >= tol:
H = H1
H1 = next_H(H, e, M)
return H1
@classmethod
@classmethod
def _keplerian_circular_to_keplerian(cls, coord, center):
"""Conversion from Keplerian near-circular elements to Mean Keplerian
"""
a, ex, ey, i, Ω, u = coord
e = sqrt(ex ** 2 + ey ** 2)
ω = arctan2(ey / e, ex / e)
ν = u - ω
return np.array([a, e, i, Ω, ω, ν], dtype=float)
@classmethod
def _keplerian_to_keplerian_circular(cls, coord, center):
"""Conversion from Mean Keplerian to Keplerian near-circular elements
"""
a, e, i, Ω, ω, ν = coord
ex = e * cos(ω)
ey = e * sin(ω)
u = (ω + ν) % (np.pi * 2)
return np.array([a, ex, ey, i, Ω, u], dtype=float)
@classmethod
def _tle_to_keplerian_mean(cls, coord, center):
"""Conversion from the TLE standard format to the Mean Keplerian
see :py:class:`Tle` for more information.
"""
i, Ω, e, ω, M, n = coord
a = (center.µ / n ** 2) ** (1 / 3)
return np.array([a, e, i, Ω, ω, M], dtype=float)
@classmethod
def _keplerian_mean_to_tle(cls, coord, center):
"""Mean Keplerian to TLE format conversion
"""
a, e, i, Ω, ω, M = coord
n = sqrt(center.µ / a ** 3)
return np.array([i, Ω, e, ω, M, n], dtype=float)
@classmethod
def _cartesian_to_spherical(cls, coord, center):
"""Cartesian to Spherical conversion
.. warning:: The spherical form is equatorial, not zenithal
"""
x, y, z, vx, vy, vz = coord
r = np.linalg.norm(coord[:3])
phi = arcsin(z / r)
theta = arctan2(y, x)
r_dot = (x * vx + y * vy + z * vz) / r
phi_dot = (vz * (x ** 2 + y ** 2) - z * (x * vx + y * vy)) / (
r ** 2 * sqrt(x ** 2 + y ** 2)
)
theta_dot = (x * vy - y * vx) / (x ** 2 + y ** 2)
return np.array([r, theta, phi, r_dot, theta_dot, phi_dot], dtype=float)
@classmethod
def _spherical_to_cartesian(cls, coord, center):
"""Spherical to cartesian conversion
"""
r, theta, phi, r_dot, theta_dot, phi_dot = coord
x = r * cos(phi) * cos(theta)
y = r * cos(phi) * sin(theta)
z = r * sin(phi)
vx = r_dot * x / r - y * theta_dot - z * phi_dot * cos(theta)
vy = r_dot * y / r + x * theta_dot - z * phi_dot * sin(theta)
vz = r_dot * z / r + r * phi_dot * cos(phi)
return np.array([x, y, z, vx, vy, vz], dtype=float)
TLE = Form("tle", ["i", "Ω", "e", "ω", "M", "n"])
"""TLE special form
* i : inclination
* Ω : right-ascension of ascending node
* e : eccentricity
* ω : argument of perigee
* M : mean anomaly
* n : mean motion
see :py:class:`~beyond.orbits.tle.Tle` for details
"""
KEPL_C = Form("keplerian_circular", ["a", "ex", "ey", "i", "Ω", "u"])
"""Special case for near-circular orbits
* a : semi-major axis
* ex : e * cos(ω)
* ey : e * sin(ω)
* i : inclination
* Ω : right-ascension of ascending node
* u : argument of latitude (ω + ν)
"""
KEPL_E = Form("keplerian_eccentric", ["a", "e", "i", "Ω", "ω", "E"])
"""Same as Keplerian, but replaces True anomaly with
`Eccentric anomaly <https://en.wikipedia.org/wiki/Eccentric_anomaly>`__
"""
KEPL_M = Form("keplerian_mean", ["a", "e", "i", "Ω", "ω", "M"])
"""Same as Keplerian, but replaces True anomaly with
`Mean anomaly <https://en.wikipedia.org/wiki/Mean_anomaly>`__
"""
KEPL = Form("keplerian", ["a", "e", "i", "Ω", "ω", "ν"])
"""The keplerian form is
* a : semi-major axis
* e : eccentricity
* i : inclination
* Ω : right-ascension of ascending node
* ω : Argument of perigee
* ν : True anomaly
see `wikipedia <https://en.wikipedia.org/wiki/Orbital_elements>`__ for details
"""
SPHE = Form("spherical", ["r", "θ", "φ", "r_dot", "θ_dot", "φ_dot"])
"""Spherical form
* r : radial distance / altitude
* θ : azimuth / longitude
* φ : elevation / latitude
* r_dot : first derivative of radial distance / altitude
* θ_dot : first derivative of azimuth / longitude
* φ_dot : first derivative of elevation / latitude
"""
CART = Form("cartesian", ["x", "y", "z", "vx", "vy", "vz"])
"""Cartesian form"""
SPHE + CART + KEPL + KEPL_E + KEPL_M + TLE
KEPL + KEPL_C
_cache = {
"tle": TLE,
"keplerian_circular": KEPL_C,
"keplerian_mean": KEPL_M,
"keplerian_eccentric": KEPL_E,
"keplerian": KEPL,
"spherical": SPHE,
"cartesian": CART,
}
| 29.086849 | 87 | 0.494711 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""This module declares the different meanings that the Orbit 6 components can take
and their conversions
"""
from numpy import cos, arccos, sin, arcsin, arctan2, sqrt, arctanh, sinh, cosh
import numpy as np
from ..errors import UnknownFormError
from ..utils.node import Node
class Form(Node):
"""Base class for orbital form definition
"""
alt = {
"theta": "θ",
"phi": "φ",
"raan": "Ω",
"Omega": "Ω",
"omega": "ω",
"nu": "ν",
"theta_dot": "θ_dot",
"phi_dot": "φ_dot",
"aol": "u",
"H": "E", # The hyperbolic anomaly is available under the eccentric anomaly
}
def __init__(self, name, param_names):
super().__init__(name)
self.param_names = param_names
def __str__(self): # pragma: no cover
return self.name
def __call__(self, orbit, new_form):
"""Gives the result of the transformation without in-place modifications
Args:
orbit (Orbit):
new_form (str or Form):
Returns:
Coord
"""
if isinstance(new_form, Form):
new_form = new_form.name
coord = orbit.copy()
if new_form != orbit.form.name:
for a, b in self.steps(new_form):
coord = getattr(
self, "_{}_to_{}".format(a.name.lower(), b.name.lower())
)(coord, orbit.frame.center)
return coord
@classmethod
def _cartesian_to_keplerian(cls, coord, center):
"""Conversion from cartesian (position and velocity) to keplerian
The keplerian form is
* a : semi-major axis
* e : eccentricity
* i : inclination
* Ω : right-ascension of ascending node
* ω : Argument of perigee
* ν : True anomaly
"""
r, v = coord[:3], coord[3:]
h = np.cross(r, v) # angular momentum vector
h_norm = np.linalg.norm(h)
r_norm = np.linalg.norm(r)
v_norm = np.linalg.norm(v)
K = v_norm ** 2 / 2 - center.µ / r_norm # specific energy
a = -center.µ / (2 * K) # semi-major axis
e = sqrt(1 - h_norm ** 2 / (a * center.µ)) # eccentricity
p = a * (1 - e ** 2) # semi parameter
i = arccos(h[2] / h_norm) # inclination
Ω = arctan2(h[0], -h[1]) % (2 * np.pi) # right ascension of the ascending node
ω_ν = arctan2(r[2] / sin(i), r[0] * cos(Ω) + r[1] * sin(Ω))
ν = arctan2(sqrt(p / center.µ) * np.dot(v, r), p - r_norm) % (2 * np.pi)
ω = (ω_ν - ν) % (2 * np.pi) # argument of the perigee
return np.array([a, e, i, Ω, ω, ν], dtype=float)
@classmethod
def _keplerian_to_cartesian(cls, coord, center):
"""Conversion from Keplerian to Cartesian coordinates
"""
a, e, i, Ω, ω, ν = coord
p = a * (1 - e ** 2)
r = p / (1 + e * cos(ν))
h = sqrt(center.µ * p)
x = r * (cos(Ω) * cos(ω + ν) - sin(Ω) * sin(ω + ν) * cos(i))
y = r * (sin(Ω) * cos(ω + ν) + cos(Ω) * sin(ω + ν) * cos(i))
z = r * sin(i) * sin(ω + ν)
vx = x * h * e / (r * p) * sin(ν) - h / r * (
cos(Ω) * sin(ω + ν) + sin(Ω) * cos(ω + ν) * cos(i)
)
vy = y * h * e / (r * p) * sin(ν) - h / r * (
sin(Ω) * sin(ω + ν) - cos(Ω) * cos(ω + ν) * cos(i)
)
vz = z * h * e / (r * p) * sin(ν) + h / r * sin(i) * cos(ω + ν)
return np.array([x, y, z, vx, vy, vz], dtype=float)
@classmethod
def _keplerian_to_keplerian_eccentric(cls, coord, center):
"""Conversion from Keplerian to Keplerian Eccentric
"""
a, e, i, Ω, ω, ν = coord
if e < 1:
# Elliptic case
cos_E = (e + cos(ν)) / (1 + e * cos(ν))
sin_E = (sin(ν) * sqrt(1 - e ** 2)) / (1 + e * cos(ν))
E = arctan2(sin_E, cos_E) % (2 * np.pi)
else:
# Hyperbolic case, E usually marked as H
cosh_E = (e + cos(ν)) / (1 + e * cos(ν))
sinh_E = (sin(ν) * sqrt(e ** 2 - 1)) / (1 + e * cos(ν))
E = arctanh(sinh_E / cosh_E)
return np.array([a, e, i, Ω, ω, E], dtype=float)
@classmethod
def _keplerian_eccentric_to_keplerian_mean(cls, coord, center):
"""Conversion from Keplerian Eccentric to Keplerian Mean
"""
a, e, i, Ω, ω, E = coord
if e < 1:
M = E - e * sin(E)
else:
# Hyperbolic case, E usually marked as H
M = e * sinh(E) - E
return np.array([a, e, i, Ω, ω, M], dtype=float)
@classmethod
def _keplerian_mean_to_keplerian_eccentric(cls, coord, center):
"""Conversion from Mean Keplerian to Keplerian Eccentric
"""
a, e, i, Ω, ω, M = coord
E = cls.M2E(e, M)
return np.array([a, e, i, Ω, ω, E], dtype=float)
@classmethod
def _keplerian_eccentric_to_keplerian(cls, coord, center):
"""Conversion from Mean Keplerian to True Keplerian
"""
a, e, i, Ω, ω, E = coord
if e < 1:
cos_ν = (cos(E) - e) / (1 - e * cos(E))
sin_ν = (sin(E) * sqrt(1 - e ** 2)) / (1 - e * cos(E))
else:
# Hyperbolic case, E usually marked as H
cos_ν = (cosh(E) - e) / (1 - e * cosh(E))
sin_ν = -(sinh(E) * sqrt(e ** 2 - 1)) / (1 - e * cosh(E))
ν = arctan2(sin_ν, cos_ν) % (np.pi * 2)
return np.array([a, e, i, Ω, ω, ν], dtype=float)
@classmethod
def M2E(cls, e, M):
"""Conversion from Mean Anomaly to Eccentric anomaly,
or Hyperbolic anomaly.
from Vallado
"""
tol = 1e-8
if e < 1:
# Ellipse
if -np.pi < M < 0 or M > np.pi:
E = M - e
else:
E = M + e
def next_E(E, e, M):
return E + (M - E + e * sin(E)) / (1 - e * cos(E))
E1 = next_E(E, e, M)
while abs(E1 - E) >= tol:
E = E1
E1 = next_E(E, e, M)
return E1
else:
# Hyperbolic
if e < 1.6:
if -np.pi < M < 0 or M > np.pi:
H = M - e
else:
H = M + e
else:
if e < 3.6 and abs(M) > np.pi:
H = M - np.sign(M) * e
else:
H = M / (e - 1)
def next_H(H, e, M):
return H + (M - e * sinh(H) + H) / (e * cosh(H) - 1)
H1 = next_H(H, e, M)
while abs(H1 - H) >= tol:
H = H1
H1 = next_H(H, e, M)
return H1
@classmethod
def _e_e_sin_e(cls, e, E):
x = (1 - e) * sin(E)
term = float(E)
d = 0
x0 = np.nan
while x != x0:
d += 2
term *= -(E ** 2) / (d * (d + 1))
x0 = x
x = x - term
return x
@classmethod
def _keplerian_circular_to_keplerian(cls, coord, center):
"""Conversion from Keplerian near-circular elements to Mean Keplerian
"""
a, ex, ey, i, Ω, u = coord
e = sqrt(ex ** 2 + ey ** 2)
ω = arctan2(ey / e, ex / e)
ν = u - ω
return np.array([a, e, i, Ω, ω, ν], dtype=float)
@classmethod
def _keplerian_to_keplerian_circular(cls, coord, center):
"""Conversion from Mean Keplerian to Keplerian near-circular elements
"""
a, e, i, Ω, ω, ν = coord
ex = e * cos(ω)
ey = e * sin(ω)
u = (ω + ν) % (np.pi * 2)
return np.array([a, ex, ey, i, Ω, u], dtype=float)
@classmethod
def _tle_to_keplerian_mean(cls, coord, center):
"""Conversion from the TLE standard format to the Mean Keplerian
see :py:class:`Tle` for more information.
"""
i, Ω, e, ω, M, n = coord
a = (center.µ / n ** 2) ** (1 / 3)
return np.array([a, e, i, Ω, ω, M], dtype=float)
@classmethod
def _keplerian_mean_to_tle(cls, coord, center):
"""Mean Keplerian to TLE format conversion
"""
a, e, i, Ω, ω, M = coord
n = sqrt(center.µ / a ** 3)
return np.array([i, Ω, e, ω, M, n], dtype=float)
@classmethod
def _cartesian_to_spherical(cls, coord, center):
"""Cartesian to Spherical conversion
.. warning:: The spherical form is equatorial, not zenithal
"""
x, y, z, vx, vy, vz = coord
r = np.linalg.norm(coord[:3])
phi = arcsin(z / r)
theta = arctan2(y, x)
r_dot = (x * vx + y * vy + z * vz) / r
phi_dot = (vz * (x ** 2 + y ** 2) - z * (x * vx + y * vy)) / (
r ** 2 * sqrt(x ** 2 + y ** 2)
)
theta_dot = (x * vy - y * vx) / (x ** 2 + y ** 2)
return np.array([r, theta, phi, r_dot, theta_dot, phi_dot], dtype=float)
@classmethod
def _spherical_to_cartesian(cls, coord, center):
"""Spherical to cartesian conversion
"""
r, theta, phi, r_dot, theta_dot, phi_dot = coord
x = r * cos(phi) * cos(theta)
y = r * cos(phi) * sin(theta)
z = r * sin(phi)
vx = r_dot * x / r - y * theta_dot - z * phi_dot * cos(theta)
vy = r_dot * y / r + x * theta_dot - z * phi_dot * sin(theta)
vz = r_dot * z / r + r * phi_dot * cos(phi)
return np.array([x, y, z, vx, vy, vz], dtype=float)
TLE = Form("tle", ["i", "Ω", "e", "ω", "M", "n"])
"""TLE special form
* i : inclination
* Ω : right-ascension of ascending node
* e : eccentricity
* ω : argument of perigee
* M : mean anomaly
* n : mean motion
see :py:class:`~beyond.orbits.tle.Tle` for details
"""
KEPL_C = Form("keplerian_circular", ["a", "ex", "ey", "i", "Ω", "u"])
"""Special case for near-circular orbits
* a : semi-major axis
* ex : e * cos(ω)
* ey : e * sin(ω)
* i : inclination
* Ω : right-ascension of ascending node
* u : argument of latitude (ω + ν)
"""
KEPL_E = Form("keplerian_eccentric", ["a", "e", "i", "Ω", "ω", "E"])
"""Same as Keplerian, but replaces True anomaly with
`Eccentric anomaly <https://en.wikipedia.org/wiki/Eccentric_anomaly>`__
"""
KEPL_M = Form("keplerian_mean", ["a", "e", "i", "Ω", "ω", "M"])
"""Same as Keplerian, but replaces True anomaly with
`Mean anomaly <https://en.wikipedia.org/wiki/Mean_anomaly>`__
"""
KEPL = Form("keplerian", ["a", "e", "i", "Ω", "ω", "ν"])
"""The keplerian form is
* a : semi-major axis
* e : eccentricity
* i : inclination
* Ω : right-ascension of ascending node
* ω : Argument of perigee
* ν : True anomaly
see `wikipedia <https://en.wikipedia.org/wiki/Orbital_elements>`__ for details
"""
SPHE = Form("spherical", ["r", "θ", "φ", "r_dot", "θ_dot", "φ_dot"])
"""Spherical form
* r : radial distance / altitude
* θ : azimuth / longitude
* φ : elevation / latitude
* r_dot : first derivative of radial distance / altitude
* θ_dot : first derivative of azimuth / longitude
* φ_dot : first derivative of elevation / latitude
"""
CART = Form("cartesian", ["x", "y", "z", "vx", "vy", "vz"])
"""Cartesian form"""
SPHE + CART + KEPL + KEPL_E + KEPL_M + TLE
KEPL + KEPL_C
_cache = {
"tle": TLE,
"keplerian_circular": KEPL_C,
"keplerian_mean": KEPL_M,
"keplerian_eccentric": KEPL_E,
"keplerian": KEPL,
"spherical": SPHE,
"cartesian": CART,
}
def get_form(form): # pragma: no cover
if form.lower() not in _cache:
raise UnknownFormError(form)
return _cache[form.lower()]
| 627 | 0 | 173 |
181dd2ebd78938c6289753f8e6243a7398061b6f | 2,664 | py | Python | chroma_core/services/__init__.py | beevans/integrated-manager-for-lustre | 6b7e49b8a58058e6139ad815a4388f21a581dfa0 | [
"MIT"
] | 52 | 2018-09-13T03:26:23.000Z | 2022-03-25T16:51:37.000Z | chroma_core/services/__init__.py | beevans/integrated-manager-for-lustre | 6b7e49b8a58058e6139ad815a4388f21a581dfa0 | [
"MIT"
] | 1,264 | 2018-06-15T19:50:57.000Z | 2022-03-28T08:19:04.000Z | chroma_core/services/__init__.py | beevans/integrated-manager-for-lustre | 6b7e49b8a58058e6139ad815a4388f21a581dfa0 | [
"MIT"
] | 27 | 2018-06-18T08:51:59.000Z | 2022-03-16T15:35:34.000Z | # Copyright (c) 2020 DDN. All rights reserved.
# Use of this source code is governed by a MIT-style
# license that can be found in the LICENSE file.
import threading
from kombu.connection import BrokerConnection
from kombu.messaging import Exchange
from kombu.entity import TRANSIENT_DELIVERY_MODE
import os
import sys
import traceback
from chroma_core.services.log import log_register, trace
import settings
class ChromaService(object):
"""Define a subclass of this for each service. Must implement `start` and `stop`
methods: typically starting a server/thread in `start` and tearing it down in `stop`.
Use the `log` instance attribute for all logging, this is set up with a logger that
tags messages with the service name.
"""
@property
class ServiceThread(threading.Thread):
"""Sometimes a single service may have multiple threads of execution. Use this
class rather than the bare threading.Thread to help Chroma keep track of your threads.
This wraps a Thread-like object which has a `run` and `stop` method, passed in at
construction time`
"""
| 27.75 | 111 | 0.650526 | # Copyright (c) 2020 DDN. All rights reserved.
# Use of this source code is governed by a MIT-style
# license that can be found in the LICENSE file.
import threading
from kombu.connection import BrokerConnection
from kombu.messaging import Exchange
from kombu.entity import TRANSIENT_DELIVERY_MODE
import os
import sys
import traceback
from chroma_core.services.log import log_register, trace
import settings
class ChromaService(object):
"""Define a subclass of this for each service. Must implement `start` and `stop`
methods: typically starting a server/thread in `start` and tearing it down in `stop`.
Use the `log` instance attribute for all logging, this is set up with a logger that
tags messages with the service name.
"""
def __init__(self):
self.log = None
@property
def name(self):
return self.__class__.__module__.split(".")[-1]
def run(self):
pass
def stop(self):
pass
class ServiceThread(threading.Thread):
"""Sometimes a single service may have multiple threads of execution. Use this
class rather than the bare threading.Thread to help Chroma keep track of your threads.
This wraps a Thread-like object which has a `run` and `stop` method, passed in at
construction time`
"""
def __init__(self, service):
super(ServiceThread, self).__init__()
self.service = service
self.log = log_register("service_thread")
self._started = False
def start(self):
super(ServiceThread, self).start()
self._started = True
def run(self):
if hasattr(self.service, "name"):
name = self.service.name
else:
name = self.service.__class__.__name__
self.log.debug("running ServiceThread '%s'" % name)
self.name = name
if trace:
sys.settrace(trace)
try:
self.service.run()
except Exception:
exc_info = sys.exc_info()
backtrace = "\n".join(traceback.format_exception(*(exc_info or sys.exc_info())))
self.log.error("Exception in main loop. backtrace: %s" % backtrace)
os._exit(-1)
def stop(self):
if not self._started:
self.log.error(
"Attempted to stop ServiceThread '%s' before it was started." % self.service.__class__.__name__
)
os._exit(-1)
else:
self.service.stop()
def _amqp_connection():
return BrokerConnection(settings.BROKER_URL)
def _amqp_exchange():
return Exchange("rpc", type="topic", delivery_mode=TRANSIENT_DELIVERY_MODE, durable=False)
| 1,292 | 0 | 261 |
a61c895bde5c35a6c85ab9707bf4e5d78c16b929 | 312 | py | Python | my_awesome_project/users/migrations/0003_remove_user_bio.py | SocratesAncient/new | b8ffcb85b3691389d6a2627df1301643667a0de3 | [
"MIT"
] | null | null | null | my_awesome_project/users/migrations/0003_remove_user_bio.py | SocratesAncient/new | b8ffcb85b3691389d6a2627df1301643667a0de3 | [
"MIT"
] | null | null | null | my_awesome_project/users/migrations/0003_remove_user_bio.py | SocratesAncient/new | b8ffcb85b3691389d6a2627df1301643667a0de3 | [
"MIT"
] | null | null | null | # Generated by Django 3.1.12 on 2021-06-23 17:25
from django.db import migrations
| 17.333333 | 48 | 0.570513 | # Generated by Django 3.1.12 on 2021-06-23 17:25
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('users', '0002_user_bio'),
]
operations = [
migrations.RemoveField(
model_name='user',
name='bio',
),
]
| 0 | 205 | 23 |
83b834e0a575aa2751c0d31ab4252ffc669e1430 | 4,513 | py | Python | tf2onnx/convert.py | duli2012/tensorflow-onnx | 32f7264e81fa69ebc36c204c7a606e2e8be90d80 | [
"MIT"
] | null | null | null | tf2onnx/convert.py | duli2012/tensorflow-onnx | 32f7264e81fa69ebc36c204c7a606e2e8be90d80 | [
"MIT"
] | null | null | null | tf2onnx/convert.py | duli2012/tensorflow-onnx | 32f7264e81fa69ebc36c204c7a606e2e8be90d80 | [
"MIT"
] | null | null | null | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT license.
"""
python -m tf2onnx.convert : tool to convert a frozen tensorflow to onnx
"""
from __future__ import division
from __future__ import print_function
import argparse
import sys
import onnx
from onnx import helper
import tensorflow as tf
import tf2onnx.utils
from tf2onnx.optimizer.transpose_optimizer import TransposeOptimizer
from tf2onnx.tfonnx import process_tf_graph, tf_optimize, DEFAULT_TARGET, POSSIBLE_TARGETS
_TENSORFLOW_DOMAIN = "ai.onnx.converters.tensorflow"
# pylint: disable=unused-argument
def get_args():
"""Parse commandline."""
parser = argparse.ArgumentParser()
parser.add_argument("--input", required=True, help="input model file")
parser.add_argument("--output", help="output model file")
parser.add_argument("--inputs", required=True, help="model input_names")
parser.add_argument("--outputs", required=True, help="model output_names")
parser.add_argument("--opset", type=int, default=None, help="highest opset to use")
parser.add_argument("--custom-ops", help="list of custom ops")
parser.add_argument("--target", default=",".join(DEFAULT_TARGET), help="target platform")
parser.add_argument("--continue_on_error", help="continue_on_error", action="store_true")
parser.add_argument("--verbose", help="verbose output", action="store_true")
parser.add_argument("--fold_const", help="enable tf constant_folding transformation before conversion",
action="store_true")
# experimental
parser.add_argument("--inputs-as-nchw", help="transpose inputs as from nhwc to nchw")
# depreciated, going to be removed some time in the future
parser.add_argument("--unknown-dim", type=int, default=-1, help="default for unknown dimensions")
args = parser.parse_args()
args.shape_override = None
if args.inputs:
args.inputs, args.shape_override = tf2onnx.utils.split_nodename_and_shape(args.inputs)
if args.outputs:
args.outputs = args.outputs.split(",")
if args.inputs_as_nchw:
args.inputs_as_nchw = args.inputs_as_nchw.split(",")
if args.target:
args.target = args.target.split(",")
for target in args.target:
if target not in POSSIBLE_TARGETS:
print("unknown target ", target)
sys.exit(1)
return args
main()
| 37.297521 | 107 | 0.674053 | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT license.
"""
python -m tf2onnx.convert : tool to convert a frozen tensorflow to onnx
"""
from __future__ import division
from __future__ import print_function
import argparse
import sys
import onnx
from onnx import helper
import tensorflow as tf
import tf2onnx.utils
from tf2onnx.optimizer.transpose_optimizer import TransposeOptimizer
from tf2onnx.tfonnx import process_tf_graph, tf_optimize, DEFAULT_TARGET, POSSIBLE_TARGETS
_TENSORFLOW_DOMAIN = "ai.onnx.converters.tensorflow"
# pylint: disable=unused-argument
def get_args():
"""Parse commandline."""
parser = argparse.ArgumentParser()
parser.add_argument("--input", required=True, help="input model file")
parser.add_argument("--output", help="output model file")
parser.add_argument("--inputs", required=True, help="model input_names")
parser.add_argument("--outputs", required=True, help="model output_names")
parser.add_argument("--opset", type=int, default=None, help="highest opset to use")
parser.add_argument("--custom-ops", help="list of custom ops")
parser.add_argument("--target", default=",".join(DEFAULT_TARGET), help="target platform")
parser.add_argument("--continue_on_error", help="continue_on_error", action="store_true")
parser.add_argument("--verbose", help="verbose output", action="store_true")
parser.add_argument("--fold_const", help="enable tf constant_folding transformation before conversion",
action="store_true")
# experimental
parser.add_argument("--inputs-as-nchw", help="transpose inputs as from nhwc to nchw")
# depreciated, going to be removed some time in the future
parser.add_argument("--unknown-dim", type=int, default=-1, help="default for unknown dimensions")
args = parser.parse_args()
args.shape_override = None
if args.inputs:
args.inputs, args.shape_override = tf2onnx.utils.split_nodename_and_shape(args.inputs)
if args.outputs:
args.outputs = args.outputs.split(",")
if args.inputs_as_nchw:
args.inputs_as_nchw = args.inputs_as_nchw.split(",")
if args.target:
args.target = args.target.split(",")
for target in args.target:
if target not in POSSIBLE_TARGETS:
print("unknown target ", target)
sys.exit(1)
return args
def default_custom_op_handler(ctx, node, name, args):
node.domain = _TENSORFLOW_DOMAIN
return node
def main():
args = get_args()
opset = tf2onnx.utils.find_opset(args.opset)
print("using tensorflow={}, onnx={}, opset={}, tfonnx={}/{}".format(
tf.__version__, onnx.__version__, opset,
tf2onnx.__version__, tf2onnx.version.git_version[:6]))
# override unknown dimensions from -1 to 1 (aka batchsize 1) since not every runtime does
# support unknown dimensions.
tf2onnx.utils.ONNX_UNKNOWN_DIMENSION = args.unknown_dim
if args.custom_ops:
# default custom ops for tensorflow-onnx are in the "tf" namespace
custom_ops = {op: default_custom_op_handler for op in args.custom_ops.split(",")}
extra_opset = [helper.make_opsetid(_TENSORFLOW_DOMAIN, 1)]
else:
custom_ops = {}
extra_opset = None
graph_def = tf.GraphDef()
with tf.gfile.FastGFile(args.input, 'rb') as f:
graph_def.ParseFromString(f.read())
# todo: consider to enable const folding by default?
graph_def = tf_optimize(args.inputs, args.outputs, graph_def, args.fold_const)
with tf.Graph().as_default() as tf_graph:
tf.import_graph_def(graph_def, name='')
with tf.Session(graph=tf_graph):
g = process_tf_graph(tf_graph,
continue_on_error=args.continue_on_error,
verbose=args.verbose,
target=args.target,
opset=args.opset,
custom_op_handlers=custom_ops,
extra_opset=extra_opset,
shape_override=args.shape_override)
optimizer = TransposeOptimizer(g, args.verbose is not None)
optimizer.optimize()
model_proto = g.make_model(
"converted from {}".format(args.input), args.outputs,
optimize=not args.continue_on_error)
# write onnx graph
if args.output:
with open(args.output, "wb") as f:
f.write(model_proto.SerializeToString())
main()
| 2,044 | 0 | 46 |
82d84c7d806db8f3bd318b5491ca898dbaeae65b | 317 | py | Python | model/http/__init__.py | dc-avasilev/testing-api | 3f46406daa7e9145352e8cfba0878b2b3df510c6 | [
"MIT"
] | 1 | 2021-09-13T21:37:58.000Z | 2021-09-13T21:37:58.000Z | model/http/__init__.py | dc-avasilev/testing-api | 3f46406daa7e9145352e8cfba0878b2b3df510c6 | [
"MIT"
] | null | null | null | model/http/__init__.py | dc-avasilev/testing-api | 3f46406daa7e9145352e8cfba0878b2b3df510c6 | [
"MIT"
] | null | null | null | from .compare import (
CompareABC,
CompareDicts,
CompareEndswith,
CompareIgnore,
CompareIgnoreOrder
)
from .message import Message
from .request import Request
from .response import (
BaseBodyParser,
JSONBodyParser,
Response,
XMLBodyParser,
get_schema,
validate_response
)
| 17.611111 | 28 | 0.716088 | from .compare import (
CompareABC,
CompareDicts,
CompareEndswith,
CompareIgnore,
CompareIgnoreOrder
)
from .message import Message
from .request import Request
from .response import (
BaseBodyParser,
JSONBodyParser,
Response,
XMLBodyParser,
get_schema,
validate_response
)
| 0 | 0 | 0 |
b2ba1ceb08ae0521e02059a609e9d4fd255d96f0 | 518 | py | Python | from_base64.py | jabbalaci/Bash-Utils | c6fb115834a221c4aaba8eaa37f650beea45ef29 | [
"MIT"
] | 73 | 2015-03-31T01:12:26.000Z | 2021-07-10T19:45:04.000Z | from_base64.py | doc22940/Bash-Utils | c6fb115834a221c4aaba8eaa37f650beea45ef29 | [
"MIT"
] | 2 | 2017-01-06T17:17:42.000Z | 2017-08-23T18:35:55.000Z | from_base64.py | doc22940/Bash-Utils | c6fb115834a221c4aaba8eaa37f650beea45ef29 | [
"MIT"
] | 27 | 2015-01-03T18:51:23.000Z | 2020-11-15T11:49:51.000Z | #!/usr/bin/env python3
"""
Convert a base64 string back to a normal string (decode).
"""
import readline # to overcome the 4k input limit
from lib.jhash import base64_to_str as back
##############################################################################
if __name__ == "__main__":
main()
| 19.185185 | 78 | 0.515444 | #!/usr/bin/env python3
"""
Convert a base64 string back to a normal string (decode).
"""
import readline # to overcome the 4k input limit
from lib.jhash import base64_to_str as back
def main():
try:
inp = input("base64 string> ")
except (KeyboardInterrupt, EOFError):
print()
return
#
print("Input: '{}'".format(inp))
print()
print(back(inp))
##############################################################################
if __name__ == "__main__":
main()
| 187 | 0 | 23 |
850808f1f3d839b5e00f3b457fc33d1c95a55e51 | 6,538 | py | Python | ai_economist/foundation/components/education.py | CornellDataScience/AI-Economist | 1203eccb64b8f744229f738408e6b44ff5aaf21a | [
"BSD-3-Clause"
] | 1 | 2021-11-11T03:29:00.000Z | 2021-11-11T03:29:00.000Z | ai_economist/foundation/components/education.py | CornellDataScience/AI-Economist | 1203eccb64b8f744229f738408e6b44ff5aaf21a | [
"BSD-3-Clause"
] | 1 | 2021-11-06T22:29:27.000Z | 2021-11-06T22:29:27.000Z | ai_economist/foundation/components/education.py | CornellDataScience/ai-economist | 1203eccb64b8f744229f738408e6b44ff5aaf21a | [
"BSD-3-Clause"
] | null | null | null | import numpy as np
from ai_economist.foundation.base.base_component import BaseComponent, component_registry
@component_registry.add
class GetEducated(BaseComponent):
"""
Environments expand the agents' state/action spaces by querying:
get_n_actions
get_additional_state_fields
Environments expand their dynamics by querying:
component_step
generate_observations
generate_masks
Environments expand logging behavior by querying:
get_metrics
get_dense_log
Because they are built as Python objects, component instances can also be
stateful. Stateful attributes are reset via calls to:
additional_reset_steps
"""
name = "GetEducated"
required_entities = ["Coin", "Labor", "build_skill"]
agent_subclasses = ["BasicMobileAgent"]
def agent_can_get_educated(self, agent):
"""Return True if agent can actually get educated."""
# See if the agent has the resources necessary to complete the action
if agent.state["inventory"]["Coin"] < self.tuition:
return False
# # Do nothing if skill is already max
# if True: # TODO see how to get skill
# return False
# If we made it here, the agent can go to college.
return True
def get_additional_state_fields(self, agent_cls_name):
"""
Return a dictionary of {state_field: reset_val} managed by this Component
class for agents of type agent_cls_name. This also partially controls reset
behavior.
Args:
agent_cls_name (str): name of the Agent class for which additional states
are being queried. For example, "BasicMobileAgent".
Returns:
extra_state_dict (dict): A dictionary of {"state_field": reset_val} for
each extra state field that this component adds/manages to agents of
type agent_cls_name. This extra_state_dict is incorporated into
agent.state for each agent of this type. Note that the keyed fields
will be reset to reset_val when the environment is reset.
"""
if agent_cls_name not in self.agent_subclasses:
return {}
if agent_cls_name == "BasicMobileAgent":
return {"tuition_payment": float(self.tuition)} # check
raise NotImplementedError
def get_n_actions(self, agent_cls_name):
"""
Args:
agent_cls_name (str): name of the Agent class for which number of actions
is being queried. For example, "BasicMobileAgent".
Returns:
action_space (None, int, or list): If the component does not add any
actions for agents of type agent_cls_name, return None. If it adds a
single action space, return an integer specifying the number of
actions in the action space. If it adds multiple action spaces,
return a list of tuples ("action_set_name", num_actions_in_set).
See below for further detail.
"""
if agent_cls_name == "BasicMobileAgent":
return 1
return None
def component_step(self):
"""
See base_component.py for detailed description.
Convert coin to skill for agents that choose to go to school and can.
"""
world = self.world
build = []
# Apply any go_to_school actions taken by the mobile agents
for agent in world.get_random_order_agents():
action = agent.get_component_action(self.name)
# This component doesn't apply to this agent!
if action is None:
continue
# NO-OP!
if action == 0:
pass
# Learn! (If you can.)
elif action == 1:
if self.agent_can_get_educated(agent):
# Remove the resources
agent.state["inventory"]["Coin"] -= self.tuition
# Receive skills for going to school
agent.state["build_skill"] += self.skill_gain
# self.payment_max_skill_multiplier += self.skill_gain
# Incur the labor cost for going to school
agent.state["endogenous"]["Labor"] += self.education_labor
# self.number_times_educated += 1
else:
raise ValueError
# self.builds.append(build) | 37.574713 | 105 | 0.599113 | import numpy as np
from ai_economist.foundation.base.base_component import BaseComponent, component_registry
@component_registry.add
class GetEducated(BaseComponent):
"""
Environments expand the agents' state/action spaces by querying:
get_n_actions
get_additional_state_fields
Environments expand their dynamics by querying:
component_step
generate_observations
generate_masks
Environments expand logging behavior by querying:
get_metrics
get_dense_log
Because they are built as Python objects, component instances can also be
stateful. Stateful attributes are reset via calls to:
additional_reset_steps
"""
name = "GetEducated"
required_entities = ["Coin", "Labor", "build_skill"]
agent_subclasses = ["BasicMobileAgent"]
def __init__(
self,
*base_component_args,
tuition=100, # same tuition cost as building 10 houses <- tweak later
education_labor=100.0,
skill_gain = 1
**base_component_kwargs
):
super().__init__(*base_component_args, **base_component_kwargs)
self.tuition = int(tuition)
self.skill_gain = float(skill_gain)
assert self.tuition >= 0
self.education_labor = float(education_labor)
assert self.education_labor >= 0
# self.skill = int(skill)
# self.number_times_educated = 0
self.educates = []
def agent_can_get_educated(self, agent):
"""Return True if agent can actually get educated."""
# See if the agent has the resources necessary to complete the action
if agent.state["inventory"]["Coin"] < self.tuition:
return False
# # Do nothing if skill is already max
# if True: # TODO see how to get skill
# return False
# If we made it here, the agent can go to college.
return True
def get_additional_state_fields(self, agent_cls_name):
"""
Return a dictionary of {state_field: reset_val} managed by this Component
class for agents of type agent_cls_name. This also partially controls reset
behavior.
Args:
agent_cls_name (str): name of the Agent class for which additional states
are being queried. For example, "BasicMobileAgent".
Returns:
extra_state_dict (dict): A dictionary of {"state_field": reset_val} for
each extra state field that this component adds/manages to agents of
type agent_cls_name. This extra_state_dict is incorporated into
agent.state for each agent of this type. Note that the keyed fields
will be reset to reset_val when the environment is reset.
"""
if agent_cls_name not in self.agent_subclasses:
return {}
if agent_cls_name == "BasicMobileAgent":
return {"tuition_payment": float(self.tuition)} # check
raise NotImplementedError
def additional_reset_steps(self):
# reset skill level
world = self.world
for agent in world.agents:
if self.skill_dist == "none":
sampled_skill = 1
pay_rate = 1
elif self.skill_dist == "pareto":
sampled_skill = np.random.pareto(4)
pay_rate = np.minimum(PMSM, (PMSM - 1) * sampled_skill + 1)
elif self.skill_dist == "lognormal":
sampled_skill = np.random.lognormal(-1, 0.5)
pay_rate = np.minimum(PMSM, (PMSM - 1) * sampled_skill + 1)
else:
raise NotImplementedError
agent.state["build_skill"] = float(sampled_skill)
self.sampled_skills[agent.idx] = sampled_skill
def get_n_actions(self, agent_cls_name):
"""
Args:
agent_cls_name (str): name of the Agent class for which number of actions
is being queried. For example, "BasicMobileAgent".
Returns:
action_space (None, int, or list): If the component does not add any
actions for agents of type agent_cls_name, return None. If it adds a
single action space, return an integer specifying the number of
actions in the action space. If it adds multiple action spaces,
return a list of tuples ("action_set_name", num_actions_in_set).
See below for further detail.
"""
if agent_cls_name == "BasicMobileAgent":
return 1
return None
def generate_masks(self, completions=0):
masks = {}
for agent in self.world.agents:
masks[agent.idx] = np.array([
agent.state["inventory"]["Coin"] >= self.widget_price and self.available_widget_units > 0
])
return masks
def component_step(self):
"""
See base_component.py for detailed description.
Convert coin to skill for agents that choose to go to school and can.
"""
world = self.world
build = []
# Apply any go_to_school actions taken by the mobile agents
for agent in world.get_random_order_agents():
action = agent.get_component_action(self.name)
# This component doesn't apply to this agent!
if action is None:
continue
# NO-OP!
if action == 0:
pass
# Learn! (If you can.)
elif action == 1:
if self.agent_can_get_educated(agent):
# Remove the resources
agent.state["inventory"]["Coin"] -= self.tuition
# Receive skills for going to school
agent.state["build_skill"] += self.skill_gain
# self.payment_max_skill_multiplier += self.skill_gain
# Incur the labor cost for going to school
agent.state["endogenous"]["Labor"] += self.education_labor
# self.number_times_educated += 1
else:
raise ValueError
# self.builds.append(build)
def generate_observations(self):
obs_dict = dict()
for agent in self.world.agents:
obs_dict[agent.idx] = {
"skill_gain": self.skill_gain,
"tuition": self.tuition,
"build_skill": self.sampled_skills[agent.idx]
}
return obs_dict | 1,912 | 0 | 108 |
1f0f718e8960427f619ae5a29798356a0df97955 | 2,754 | py | Python | benchmark/qutip_benchmarks.py | trxw/qutip | b923c973edd9a071d86eb849650661549f73585f | [
"BSD-3-Clause"
] | 1 | 2015-11-06T06:35:06.000Z | 2015-11-06T06:35:06.000Z | benchmark/qutip_benchmarks.py | trxw/qutip | b923c973edd9a071d86eb849650661549f73585f | [
"BSD-3-Clause"
] | null | null | null | benchmark/qutip_benchmarks.py | trxw/qutip | b923c973edd9a071d86eb849650661549f73585f | [
"BSD-3-Clause"
] | null | null | null | # This file is part of QuTiP.
#
# QuTiP is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# QuTiP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with QuTiP. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright (C) 2011 and later, Paul D. Nation & Robert J. Johansson
#
###########################################################################
import platform
import json
import numpy as np
from scipy import *
from qutip import *
from tests import *
#
# command-line parsing
#
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--run-profiler",
help="run profiler on qutip benchmarks",
action='store_true')
parser.add_argument("-o", "--output-file",
help="file name for benchmark output",
default="qutip-benchmarks.json", type=str)
parser.add_argument("-N", "--runs",
help="number of times to perform each benchmark",
default=1, type=int)
args = parser.parse_args()
qutip_info = [{'label': 'QuTiP', 'value': qutip.__version__},
{'label': 'Python', 'value': platform.python_version()},
{'label': 'NumPy', 'value': numpy.__version__},
{'label': 'SciPy', 'value': scipy.__version__}]
#---------------------
# Run Python Benchmarks
#---------------------
if args.run_profiler:
import cProfile
cProfile.run('run_tests(1)', 'qutip_benchmarks_profiler')
import pstats
p = pstats.Stats('qutip_benchmarks_profiler')
p.sort_stats('cumulative').print_stats(50)
else:
times, names = run_tests(args.runs)
data = [{'name': names[n], 'time': times[n]} for n in range(len(names))]
qutip_info.append({'label': 'Acc. time', 'value': "%.2f s" % sum(times)})
qutip_bm = {"info": qutip_info, "data": data}
with open(args.output_file, "w") as outfile:
json.dump(qutip_bm, outfile, sort_keys=True, indent=4)
| 32.4 | 77 | 0.612564 | # This file is part of QuTiP.
#
# QuTiP is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# QuTiP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with QuTiP. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright (C) 2011 and later, Paul D. Nation & Robert J. Johansson
#
###########################################################################
import platform
import json
import numpy as np
from scipy import *
from qutip import *
from tests import *
#
# command-line parsing
#
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--run-profiler",
help="run profiler on qutip benchmarks",
action='store_true')
parser.add_argument("-o", "--output-file",
help="file name for benchmark output",
default="qutip-benchmarks.json", type=str)
parser.add_argument("-N", "--runs",
help="number of times to perform each benchmark",
default=1, type=int)
args = parser.parse_args()
qutip_info = [{'label': 'QuTiP', 'value': qutip.__version__},
{'label': 'Python', 'value': platform.python_version()},
{'label': 'NumPy', 'value': numpy.__version__},
{'label': 'SciPy', 'value': scipy.__version__}]
#---------------------
# Run Python Benchmarks
#---------------------
def run_tests(N):
# setup list for python times
python_times = []
test_names = []
for test_function in test_function_list():
try:
out = test_function(N)
except:
out = [["unsupported"], [0.0]]
test_names += out[0]
python_times += out[1]
return python_times, test_names
if args.run_profiler:
import cProfile
cProfile.run('run_tests(1)', 'qutip_benchmarks_profiler')
import pstats
p = pstats.Stats('qutip_benchmarks_profiler')
p.sort_stats('cumulative').print_stats(50)
else:
times, names = run_tests(args.runs)
data = [{'name': names[n], 'time': times[n]} for n in range(len(names))]
qutip_info.append({'label': 'Acc. time', 'value': "%.2f s" % sum(times)})
qutip_bm = {"info": qutip_info, "data": data}
with open(args.output_file, "w") as outfile:
json.dump(qutip_bm, outfile, sort_keys=True, indent=4)
| 324 | 0 | 22 |
456c5a74ca205e6d431e1daf8aae33ca461c59d9 | 452 | py | Python | Python_Exercicios/angulos.py | thalles-dreissig20/Quebra_Cabeca | eeb9458dbabac72d9867e5ec5d7f1aa9b5993d79 | [
"MIT"
] | null | null | null | Python_Exercicios/angulos.py | thalles-dreissig20/Quebra_Cabeca | eeb9458dbabac72d9867e5ec5d7f1aa9b5993d79 | [
"MIT"
] | 1 | 2021-11-29T18:37:14.000Z | 2021-11-29T18:37:14.000Z | Python_Exercicios/angulos.py | thalles-dreissig20/Quebra_Cabeca | eeb9458dbabac72d9867e5ec5d7f1aa9b5993d79 | [
"MIT"
] | null | null | null | from math import radians, sin, cos, tan
angulo = float(input('Digita o angulo que voce deseja: '))
seno = sin(radians(angulo))
print('O angulo de {} tem o seno de {:.2f}'.format(angulo, seno))
coseno = cos(radians(angulo))
print('O angulo de {} tem o cosseno de {:.2f}'.format(angulo, coseno))
tangente = tan(radians(angulo))
print('O angulo de {} tem a tangente de {:.2f}'. format(angulo, tangente))
""" DESCOBRIR O SENO COSSENO E A TANGENTE """ | 41.090909 | 74 | 0.681416 | from math import radians, sin, cos, tan
angulo = float(input('Digita o angulo que voce deseja: '))
seno = sin(radians(angulo))
print('O angulo de {} tem o seno de {:.2f}'.format(angulo, seno))
coseno = cos(radians(angulo))
print('O angulo de {} tem o cosseno de {:.2f}'.format(angulo, coseno))
tangente = tan(radians(angulo))
print('O angulo de {} tem a tangente de {:.2f}'. format(angulo, tangente))
""" DESCOBRIR O SENO COSSENO E A TANGENTE """ | 0 | 0 | 0 |
34770d8a031ecc93e3549ae52b9ba394afc22181 | 603 | py | Python | pkgs/anaconda-navigator-1.1.0-py27_0/lib/python2.7/site-packages/anaconda_navigator/templates/__init__.py | wangyum/anaconda | 6e5a0dbead3327661d73a61e85414cf92aa52be6 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | pkgs/anaconda-navigator-1.1.0-py27_0/lib/python2.7/site-packages/anaconda_navigator/templates/__init__.py | wangyum/anaconda | 6e5a0dbead3327661d73a61e85414cf92aa52be6 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | pkgs/anaconda-navigator-1.1.0-py27_0/lib/python2.7/site-packages/anaconda_navigator/templates/__init__.py | wangyum/anaconda | 6e5a0dbead3327661d73a61e85414cf92aa52be6 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright 2016 Continuum Analytics, Inc.
# May be copied and distributed freely only as part of an Anaconda or
# Miniconda installation.
#
"""
This folder contains jinja2 templates used by Anaconda Navigator.
This folder is defined as a python module so that some convenience global
variables can be defined.
"""
# Standard library imports
from __future__ import absolute_import, division
import os.path as osp
DATA_PATH = osp.dirname(osp.realpath(__file__))
EMPTY_TEMPLATE_PATH = osp.join(DATA_PATH, 'empty.html')
VIDEO_TEMPLATE_PATH = osp.join(DATA_PATH, 'video.html')
| 27.409091 | 73 | 0.772803 | # -*- coding: utf-8 -*-
#
# Copyright 2016 Continuum Analytics, Inc.
# May be copied and distributed freely only as part of an Anaconda or
# Miniconda installation.
#
"""
This folder contains jinja2 templates used by Anaconda Navigator.
This folder is defined as a python module so that some convenience global
variables can be defined.
"""
# Standard library imports
from __future__ import absolute_import, division
import os.path as osp
DATA_PATH = osp.dirname(osp.realpath(__file__))
EMPTY_TEMPLATE_PATH = osp.join(DATA_PATH, 'empty.html')
VIDEO_TEMPLATE_PATH = osp.join(DATA_PATH, 'video.html')
| 0 | 0 | 0 |
e7068a485f2ca1549f7033e2b99f4576733becd0 | 710 | py | Python | flask_wiki/frontend/frontend.py | gcavalcante8808/flask-wiki | a2c0af2e7fa6ce64faeb38e678a2e207ff63f3a6 | [
"BSD-2-Clause"
] | null | null | null | flask_wiki/frontend/frontend.py | gcavalcante8808/flask-wiki | a2c0af2e7fa6ce64faeb38e678a2e207ff63f3a6 | [
"BSD-2-Clause"
] | 35 | 2015-10-08T21:00:22.000Z | 2021-06-25T15:29:41.000Z | flask_wiki/frontend/frontend.py | gcavalcante8808/flask-wiki | a2c0af2e7fa6ce64faeb38e678a2e207ff63f3a6 | [
"BSD-2-Clause"
] | 1 | 2019-07-09T14:17:48.000Z | 2019-07-09T14:17:48.000Z | from flask import Flask, render_template, abort, redirect, url_for
from flask_script import Manager
from jinja2 import TemplateNotFound
app = Flask(__name__)
app.config['TESTING'] = True
manager = Manager(app)
@app.route('/', endpoint='frontend-index')
@app.route('/<page>', endpoint='frontend-pages')
def show(page='index'):
"""
Try to Deliver a page.
:param page: name of the page
:return: template.
"""
try:
return render_template('pages/index.html')
except (TemplateNotFound,):
abort(404)
if __name__ == '__main__':
manager.run()
| 23.666667 | 66 | 0.676056 | from flask import Flask, render_template, abort, redirect, url_for
from flask_script import Manager
from jinja2 import TemplateNotFound
app = Flask(__name__)
app.config['TESTING'] = True
manager = Manager(app)
@app.route('/', endpoint='frontend-index')
def root():
# Redirect Base URL for the real Index Page.
return redirect(url_for('frontend-pages', page='index'))
@app.route('/<page>', endpoint='frontend-pages')
def show(page='index'):
"""
Try to Deliver a page.
:param page: name of the page
:return: template.
"""
try:
return render_template('pages/index.html')
except (TemplateNotFound,):
abort(404)
if __name__ == '__main__':
manager.run()
| 100 | 0 | 22 |
7adeca7d93d4713767194c4859d3d9beb08c2e15 | 2,855 | py | Python | melodic-manatees/early_internet/main/views.py | Vthechamp22/summer-code-jam-2021 | 0a8bf1f22f6c73300891fd779da36efd8e1304c1 | [
"MIT"
] | 40 | 2020-08-02T07:38:22.000Z | 2021-07-26T01:46:50.000Z | melodic-manatees/early_internet/main/views.py | Vthechamp22/summer-code-jam-2021 | 0a8bf1f22f6c73300891fd779da36efd8e1304c1 | [
"MIT"
] | 134 | 2020-07-31T12:15:45.000Z | 2020-12-13T04:42:19.000Z | melodic-manatees/early_internet/main/views.py | Vthechamp22/summer-code-jam-2021 | 0a8bf1f22f6c73300891fd779da36efd8e1304c1 | [
"MIT"
] | 101 | 2020-07-31T12:00:47.000Z | 2021-11-01T09:06:58.000Z | import requests
from decouple import config
from django.shortcuts import render, redirect
from django.core.exceptions import ObjectDoesNotExist
from users.models import UserPreferences
from diary.models import DiaryEntry
from diary.forms import DiaryEntryForm
| 33.988095 | 86 | 0.582837 | import requests
from decouple import config
from django.shortcuts import render, redirect
from django.core.exceptions import ObjectDoesNotExist
from users.models import UserPreferences
from diary.models import DiaryEntry
from diary.forms import DiaryEntryForm
def main(request):
news_key = config('NEWS_KEY')
weather_key = config('WEATHER_KEY')
news_url = 'https://api.nytimes.com/svc/topstories/v2/us.json?api-key=' + news_key
nyt_usnews = requests.get(news_url).json()
nyt_usnews_list = []
for top_story in range(0, 4):
nyt_usnews_list.append(
{
'title': nyt_usnews['results'][top_story]['title'],
'abstract': nyt_usnews['results'][top_story]['abstract'],
'url': nyt_usnews['results'][top_story]['url'],
'img_small': nyt_usnews['results'][top_story]['multimedia'][0]['url']
}
)
weather_url = 'https://api.openweathermap.org/data/2.5/weather?q=' + \
'london' + ',' + 'uk' + '&appid=' + weather_key + '&units=metric'
if request.user.is_authenticated:
try:
entries = DiaryEntry.objects.filter(creator=request.user)
except ObjectDoesNotExist:
entries = None
if request.method == 'POST':
form = DiaryEntryForm(request.POST)
if form.is_valid():
instance = form.save(commit=False)
instance.creator = request.user
instance.save()
return redirect('/')
else:
form = DiaryEntryForm()
pref = UserPreferences.objects.get(user=request.user)
weather_url = 'https://api.openweathermap.org/data/2.5/weather?q='\
f'{pref.city_name},{pref.country_name}&appid={weather_key}&units=metric'
city_weather = requests.get(weather_url).json()
weather_dict = {
'city': city_weather['name'],
'country': city_weather['sys']['country'],
'temperature': city_weather['main']['temp'],
'description': city_weather['weather'][0]['description']
}
context = {
'pref': pref,
'news': nyt_usnews_list,
'weather': weather_dict,
'entries': entries,
'form': form
}
return render(request, 'main/dashboard.html', context)
city_weather = requests.get(weather_url).json()
weather_dict = {'city': city_weather['name'],
'country': city_weather['sys']['country'],
'temperature': city_weather['main']['temp'],
'description': city_weather['weather'][0]['description']
}
context = {
'news': nyt_usnews_list,
'weather': weather_dict,
}
return render(request, 'main/dashboard.html', context)
| 2,571 | 0 | 23 |
02c479119c63e0d1cf589aba3f5923da1cb34526 | 18,242 | py | Python | Logo/Logo.py | huky643/manim_sandbox | 397c8857145f371aeeca611e5e65c60083758d0c | [
"MIT"
] | 2 | 2020-11-01T05:34:28.000Z | 2020-11-01T05:34:31.000Z | Logo/Logo.py | huky643/manim_sandbox | 397c8857145f371aeeca611e5e65c60083758d0c | [
"MIT"
] | null | null | null | Logo/Logo.py | huky643/manim_sandbox | 397c8857145f371aeeca611e5e65c60083758d0c | [
"MIT"
] | null | null | null |
"""
关于logo创意:
1. 基础元素为M和K的负空间设计
2. 白色部分创意来自弦图
3. 整体图案本身可生成一个不错的分形
4. 配色致敬3B1B(具体的蓝色和棕色还得再微调一下)
logo主要创意由@GrakePCH提供,@GZTime、@cigar666、@鹤翔万里都提供了不少宝贵意见。目前设计工作还在继续完善,希望大家多提意见和建议
"""
from manimlib.imports import *
# final
| 35.8389 | 148 | 0.591821 |
"""
关于logo创意:
1. 基础元素为M和K的负空间设计
2. 白色部分创意来自弦图
3. 整体图案本身可生成一个不错的分形
4. 配色致敬3B1B(具体的蓝色和棕色还得再微调一下)
logo主要创意由@GrakePCH提供,@GZTime、@cigar666、@鹤翔万里都提供了不少宝贵意见。目前设计工作还在继续完善,希望大家多提意见和建议
"""
from manimlib.imports import *
class Logo(VGroup):
CONFIG = {
'color_1': [WHITE, BLUE_B, BLUE_D],
'color_2': [WHITE, '#C59978', '#8D5630'],
# 'color_3': [average_color("#CCCCCC", BLUE_C), BLUE_C, BLUE_D],
# 'color_4': [average_color("#CCCCCC", "#C59978"), '#C59978', '#8D5630'],
'color_3': [average_color(WHITE, BLUE_C), BLUE_C, BLUE_D],
'color_4': [average_color(WHITE, "#C59978"), '#C59978', '#8D5630'],
'center': ORIGIN,
'size': 2,
'shift_out': ORIGIN,
'black_bg': True,
'add_bg_square': False,
}
def __init__(self, **kwargs):
VGroup.__init__(self, **kwargs)
self.create_logo()
def create_logo(self):
p1 = Polygon(ORIGIN, RIGHT, 2 * UP, stroke_width=0).set_fill(self.color_1[0], 1)
p2 = Polygon(1.5 * RIGHT, 3 * UR, 3 * UP, stroke_width=0).set_fill(self.color_1[1], 1)
p3 = Polygon(2 * RIGHT, 3 * RIGHT, 3 * RIGHT + 2 * UP, stroke_width=0).set_fill(self.color_1[2], 1)
if not self.black_bg:
p1.set_fill(self.color_3[0], 1), p2.set_fill(self.color_3[1], 1), p3.set_fill(self.color_3[2], 1)
self.bg = Square(stroke_width=0, fill_color=BLACK if self.black_bg else WHITE, fill_opacity=1).set_height(self.size * 2.5)
if self.add_bg_square:
self.add(self.bg)
self.part_ur = VGroup(p1, p2, p3).move_to([2.5, 1., 0] + self.shift_out)
self.part_ul = self.part_ur.copy().rotate(PI / 2, about_point=ORIGIN)
self.part_dl = self.part_ur.copy().rotate(PI, about_point=ORIGIN)
self.part_dr = self.part_ur.copy().rotate(3 * PI / 2, about_point=ORIGIN)
self.add(self.part_ur, self.part_ul, self.part_dl, self.part_dr)
self.set_height(self.size).move_to(self.center)
if self.black_bg:
self.part_ur[0].set_fill(self.color_2[0], 1), self.part_ur[1].set_fill(self.color_2[1], 1), self.part_ur[2].set_fill(self.color_2[2], 1)
else:
self.part_ur[0].set_fill(self.color_4[0], 1), self.part_ur[1].set_fill(self.color_4[1], 1), self.part_ur[2].set_fill(self.color_4[2], 1)
self.inner_triangles = VGroup(self.part_ur[0], self.part_ul[0], self.part_dl[0], self.part_dr[0])
self.mid_triangles = VGroup(self.part_ur[1], self.part_ul[1], self.part_dl[1], self.part_dr[1])
self.outer_triangles = VGroup(self.part_ur[2], self.part_ul[2], self.part_dl[2], self.part_dr[2])
class Logo_image(Scene):
CONFIG = {
'camera_config': {
'background_color': GRAY,
}
}
def construct(self):
logo_black_bg = Logo(size=4.5, add_bg_square=True).shift(LEFT * 3)
logo_white_bg = Logo(size=4.5, black_bg=False, add_bg_square=True).shift(RIGHT * 3)
self.add(logo_white_bg, logo_black_bg)
self.wait(2)
class Logo_01(Scene):
def construct(self):
logo = Logo(size=3.2)
square = VGroup(*[Polygon(ORIGIN, UR, UL), Polygon(ORIGIN, UL, DL), Polygon(ORIGIN, DL, DR), Polygon(ORIGIN, DR, UR),])
square.set_fill(WHITE, 1).set_stroke(width=0.5, color=WHITE).rotate(np.arctan(0.5)).set_height(logo.inner_triangles.get_height())
self.add(square)
self.wait(0.5)
self.play(ReplacementTransform(square, logo.inner_triangles), run_time=1.5)
self.wait(0.4)
self.play(TransformFromCopy(logo.inner_triangles, logo.mid_triangles),
TransformFromCopy(logo.inner_triangles, logo.outer_triangles), run_time=2)
self.wait(0.6)
big_black_rect = Rectangle(stroke_width=0, fill_color=BLACK, fill_opacity=1).scale(100).align_to(LEFT * 1.4, RIGHT)
big_black_rect_02 = big_black_rect.copy()
big_black_rect_02.add_updater(lambda b: b.align_to(logo, RIGHT).shift(RIGHT * 0.15))
text_font = '思源黑体 Bold'
text_manim = Text('Manim', font=text_font, size=1.15).align_to(LEFT * 1.4, LEFT).align_to(logo.part_ur, DOWN)
text_manim.set_color_by_t2c({'M': logo.color_2[2]})
text_kindergarten = Text('Kindergarten', font=text_font, size=1.15).align_to(logo.part_dr, UP).align_to(text_manim, LEFT)
text_kindergarten.set_color_by_t2c({'K': logo.color_1[2]})
text = VGroup(text_manim, text_kindergarten).shift(LEFT * 8).set_plot_depth(-2)
self.add(text, big_black_rect, big_black_rect_02)
self.play(logo.shift, LEFT * 3.6, text.shift, RIGHT * 8, run_time=2)
self.wait(4)
class Logo_02(Scene):
CONFIG = {
"font": "Orbitron Bold",
}
def construct(self):
logo = Logo(size=8/3)
text = VGroup(
Text("Manim", font=self.font),
Text("Kindergarten", font=self.font)
).arrange(DOWN, aligned_edge=LEFT, buff=0.3).set_height(2).next_to(logo, buff=1.2).shift(DOWN*0.2)
text[1][0].set_color(logo.color_2[2])
text[0][0].set_color(logo.color_1[2])
all_logo = VGroup(logo, text).center()
line = Line(UP, DOWN, stroke_width=8).move_to(mid(logo.get_right(), text.get_left()))
line.set_length(1.4)
text.add(line)
bg = Rectangle(height=10, width=10, fill_color=BLACK, fill_opacity=1, stroke_width=0)
bg.add_updater(lambda m: m.move_to(logo, aligned_edge=RIGHT).shift(RIGHT*0.2))
text.save_state()
text.shift((text.get_right()[0]-bg.get_right()[0]+0.2)*LEFT)
logo.save_state()
logo.move_to(ORIGIN)
logo.scale(1.2)
logo.rotate(TAU, axis=IN)
self.add(text, bg)
self.play(FadeIn(logo[0]))
self.wait(0.25)
for i in range(3):
self.play(MyTransform(logo[i], logo[i+1], about_point=logo.get_center()), run_time=0.2, rate_func=smooth)
self.wait(0.5)
self.play(
text.restore, logo.restore,
rate_func=smooth, run_time=1
)
self.wait()
class Fractal_by_logo(Scene):
def construct(self):
logo = Logo(size=5)
start = logo.part_ur.copy().move_to(ORIGIN).set_height(5)
to_be_replaced = VGroup(start)
self.add(start)
self.wait()
time_01 = 2.5
for i in range(5):
n = len(to_be_replaced)
to_be_replaced_new = VGroup()
s = 0
for m in to_be_replaced:
to_replace_m = logo.copy().move_to(m.get_center()).set_height(m.get_height() * 1.335)
if time_01/n < 1/self.camera.frame_rate:
self.remove(m)
self.add(to_replace_m)
s+=1
if s * time_01/n > 1/self.camera.frame_rate:
self.wait(s * time_01 / n)
s = 0
else:
self.play(ReplacementTransform(m, to_replace_m), run_time=time_01/n)
to_be_replaced_new.add(*to_replace_m)
self.wait(0.8)
to_be_replaced = to_be_replaced_new
self.wait(4)
class MyTransform(Animation):
CONFIG = {
"radians": PI/2,
"axis": OUT,
"about_point": None,
"remover": True,
}
def __init__(self, mobject, target, **kwargs):
digest_config(self, kwargs)
self.mobject = mobject.copy()
self.target = target
def clean_up_from_scene(self, scene):
if self.is_remover():
scene.remove(self.mobject)
scene.add(self.target)
def interpolate_mobject(self, alpha):
now = self.starting_mobject.copy()
now.rotate(
alpha * self.radians,
axis=self.axis,
about_point=self.about_point,
)
for i in range(3):
now[i].set_color(interpolate_color(self.starting_mobject[i].get_color(), self.target[i].get_color(), alpha))
self.mobject.become(now)
# final
class Logo_black(Scene):
CONFIG = {
"font": "Orbitron Bold",
}
def construct(self):
logo = Logo(size=8/3)
squares = VGroup(*[Polygon(ORIGIN, UR, UL), Polygon(ORIGIN, UL, DL), Polygon(ORIGIN, DL, DR), Polygon(ORIGIN, DR, UR),])
squares.set_fill(WHITE, 1).set_stroke(width=0.5, color=WHITE).rotate(np.arctan(0.5)).set_height(logo.inner_triangles.get_height())
for s in squares:
s.scale(0.8)
text = VGroup(
Text("Manim", font=self.font),
Text("Kindergarten", font=self.font)
).arrange(DOWN, aligned_edge=LEFT, buff=0.3).set_height(2.1).next_to(logo, buff=1.5).shift(DOWN*0.2)
text[1][0].set_color(logo.color_2[2])
text[0][0].set_color(logo.color_1[2])
all_logo = VGroup(logo, text).center()
line = Line(UP, DOWN, stroke_width=8).move_to(mid(logo.get_right(), text.get_left()))
line.set_length(1.4)
text.add(line)
bg = Rectangle(height=10, width=10, fill_color=BLACK, fill_opacity=1, stroke_width=0)
bg.add_updater(lambda m: m.move_to(logo, aligned_edge=RIGHT).shift(RIGHT*0.2))
text.save_state()
text.shift((text.get_right()[0]-bg.get_right()[0]+0.2)*LEFT)
logo.save_state()
logo.move_to(ORIGIN)
logo.scale(1.5)
tris = logo.inner_triangles.copy().rotate(-PI)
self.add(text, bg)
self.wait(0.3)
self.add(tris)
self.wait(0.3)
self.remove(tris)
self.wait(0.2)
self.add(tris)
self.wait(0.15)
self.remove(tris)
self.wait(0.1)
self.add(tris)
self.wait(0.1)
self.remove(tris)
self.wait(0.075)
self.add(tris)
self.wait(0.075)
self.remove(tris)
self.wait(0.05)
self.add(tris)
self.wait(0.05)
self.remove(tris)
# square = Square().set_height(tris.get_height()).set_stroke(width=0.5, color=WHITE)
# self.play(ReplacementTransform(square, tris), run_time=1)
self.wait(0.2)
self.play(ShowSubmobjectsOneByOne(tris), rate_func=linear, run_time=0.4)
for i in tris:
self.add(i)
self.wait(0.1)
self.play(*[ReplacementTransform(tris[i], squares[i]) for i in range(4)],
rate_func=rush_from, run_time=0.6)
#self.play(ReplacementTransform(tris, squares), rate_func=linear, run_time=0.8)
self.wait(0.1)
self.play(*[ReplacementTransform(squares[i], logo[i]) for i in range(4)],
rate_func=rush_from, run_time=0.6)
#self.play(ReplacementTransform(squares, logo), rate_func=linear, run_time=1.5)
self.wait(0.1)
self.play(
text.restore, logo.restore,
rate_func=rush_from, run_time=0.8
)
self.wait(1)
self.play(FadeOut(VGroup(*self.mobjects)))
class Logo_white(Scene):
CONFIG = {
"font": "Orbitron Bold",
"camera_config": {
"background_color": WHITE,
},
}
def construct(self):
logo = Logo(size=8/3, black_bg=False)
squares = VGroup(*[Polygon(ORIGIN, UR, UL), Polygon(ORIGIN, UL, DL), Polygon(ORIGIN, DL, DR), Polygon(ORIGIN, DR, UR),])
squares.set_fill(BLUE_C, 1).set_stroke(width=0.5, color=BLUE_C).rotate(np.arctan(0.5)).set_height(logo.inner_triangles.get_height())
squares[0].set_fill('#C59978', 1).set_stroke(width=0.5, color='#C59978')
for s in squares:
s.scale(0.8)
text = VGroup(
Text("Manim", font=self.font, color=BLACK),
Text("Kindergarten", font=self.font, color=BLACK)
).arrange(DOWN, aligned_edge=LEFT, buff=0.3).set_height(2.1).next_to(logo, buff=1.5).shift(DOWN*0.2)
text[1][0].set_color(logo.color_2[2])
text[0][0].set_color(logo.color_1[2])
all_logo = VGroup(logo, text).center()
line = Line(UP, DOWN, stroke_width=8, color=BLACK).move_to(mid(logo.get_right(), text.get_left()))
line.set_length(1.4)
text.add(line)
bg = Rectangle(height=10, width=10, fill_color=WHITE, fill_opacity=1, stroke_width=0)
bg.add_updater(lambda m: m.move_to(logo, aligned_edge=RIGHT).shift(RIGHT*0.2))
text.save_state()
text.shift((text.get_right()[0]-bg.get_right()[0]+0.2)*LEFT)
logo.save_state()
logo.move_to(ORIGIN)
logo.scale(1.5)
tris = logo.inner_triangles.copy().rotate(-PI)
tris.set_color(BLUE_C)
tris[0].set_color('#C59978')
self.add(text, bg)
self.wait(0.3)
self.add(tris)
self.wait(0.3)
self.remove(tris)
self.wait(0.2)
self.add(tris)
self.wait(0.15)
self.remove(tris)
self.wait(0.1)
self.add(tris)
self.wait(0.1)
self.remove(tris)
self.wait(0.075)
self.add(tris)
self.wait(0.075)
self.remove(tris)
self.wait(0.05)
self.add(tris)
self.wait(0.05)
self.remove(tris)
# square = Square().set_height(tris.get_height()).set_stroke(width=0.5, color=WHITE)
# self.play(ReplacementTransform(square, tris), run_time=1)
self.wait(0.2)
self.play(ShowSubmobjectsOneByOne(tris), rate_func=linear, run_time=0.4)
for i in tris:
self.add(i)
self.wait(0.1)
self.play(*[ReplacementTransform(tris[i], squares[i]) for i in range(4)],
rate_func=rush_from, run_time=0.6)
#self.play(ReplacementTransform(tris, squares), rate_func=linear, run_time=0.8)
self.wait(0.1)
self.play(*[ReplacementTransform(squares[i], logo[i]) for i in range(4)],
rate_func=rush_from, run_time=0.6)
#self.play(ReplacementTransform(squares, logo), rate_func=linear, run_time=1.5)
self.wait(0.1)
self.play(
text.restore, logo.restore,
rate_func=rush_from, run_time=0.8
)
self.wait(1)
self.play(FadeOut(VGroup(*self.mobjects)))
class Logo_Rotate_Out(Scene):
CONFIG = {
"font": "Orbitron",
}
def construct(self):
logo = Logo(size=8/3)
squares = VGroup(*[Polygon(ORIGIN, UR, UL), Polygon(ORIGIN, UL, DL), Polygon(ORIGIN, DL, DR), Polygon(ORIGIN, DR, UR),])
squares.set_fill(WHITE, 1).set_stroke(width=0.5, color=WHITE).rotate(np.arctan(0.5)).set_height(logo.inner_triangles.get_height())
for s in squares:
s.scale(0.8)
text = VGroup(
VGroup(*[Text(t, font=self.font) for t in 'Manim']).arrange(direction=RIGHT * 0.5, aligned_edge=DOWN),
VGroup(*[Text(t, font=self.font) for t in 'Kindergarten']).arrange(direction=RIGHT * 0.5, aligned_edge=DOWN)
).arrange(DOWN, aligned_edge=LEFT, buff=0.3).set_height(2.1).next_to(logo, buff=1.5).shift(DOWN*0.2)
text[1][6].align_to(text[1][5], UP)
text[1][0].set_color(logo.color_2[2])
text[0][0].set_color(logo.color_1[2])
all_logo = VGroup(logo, text).center()
line = Line(UP, DOWN, stroke_width=8).move_to(mid(logo.get_right(), text.get_left()))
line.set_length(1.4)
text.add(line)
bg = Rectangle(height=10, width=10, fill_color=BLACK, fill_opacity=1, stroke_width=0)
bg.add_updater(lambda m: m.move_to(logo, aligned_edge=RIGHT).shift(RIGHT*0.2))
text.save_state()
text.shift((text.get_right()[0]-bg.get_right()[0]+0.2)*LEFT)
logo.save_state()
logo.move_to(ORIGIN)
logo.scale(1.5)
tris = logo.inner_triangles.copy().rotate(-PI)
self.add(text, bg)
self.wait(0.3)
self.add(tris)
self.wait(0.3)
self.remove(tris)
self.wait(0.2)
self.add(tris)
self.wait(0.15)
self.remove(tris)
self.wait(0.1)
self.add(tris)
self.wait(0.1)
self.remove(tris)
self.wait(0.075)
self.add(tris)
self.wait(0.075)
self.remove(tris)
self.wait(0.05)
self.add(tris)
self.wait(0.05)
self.remove(tris)
# square = Square().set_height(tris.get_height()).set_stroke(width=0.5, color=WHITE)
# self.play(ReplacementTransform(square, tris), run_time=1)
self.wait(0.2)
self.play(ShowSubmobjectsOneByOne(tris), rate_func=linear, run_time=0.4)
for i in tris:
self.add(i)
self.wait(0.1)
self.play(*[ReplacementTransform(tris[i], squares[i]) for i in range(4)],
rate_func=rush_from, run_time=0.6)
#self.play(ReplacementTransform(tris, squares), rate_func=linear, run_time=0.8)
self.wait(0.1)
self.play(*[ReplacementTransform(squares[i], logo[i]) for i in range(4)],
rate_func=rush_from, run_time=0.6)
#self.play(ReplacementTransform(squares, logo), rate_func=linear, run_time=1.5)
self.wait(0.1)
self.play(
text.restore, logo.restore,
rate_func=rush_from, run_time=0.8
)
self.wait(0.75)
self.play(VGroup(*self.mobjects).shift, UP * 1.2)
self.wait(0.5)
s = ValueTracker(-7)
def rotate_out(a, dt):
w = 1.
if a.get_center()[0] < s.get_value() or a.get_center()[-1] != 0:
a.rotate(w * (1 + 2.5 * np.random.random()) * DEGREES, axis=UR, about_point=RIGHT * s.get_value() + OUT * 0.25)
# a.rotate(w * (1 + 2 * np.random.random()) * DEGREES, axis=UR, about_point=RIGHT * s.get_value() + OUT * (0.12+s.get_value()*0.2))
for i in range(4):
for mob in logo[i]:
mob.add_updater(rotate_out)
for tex in text[0]:
tex.add_updater(rotate_out)
for tex in text[1]:
tex.add_updater(rotate_out)
line.add_updater(rotate_out)
self.remove(bg)
# s_speed = 0.16
# for i in range(160):
# s.increment_value(s_speed)
# self.wait(1/self.camera.frame_rate)
self.play(s.set_value, 10, rate_func=rush_into, run_time=3.6)
self.wait(1)
| 16,392 | 1,358 | 261 |
a1109e345f71fa4c087beb4cc1cd05af8bbcaa78 | 3,110 | py | Python | quickbooks/batch.py | varunbheemaiah/python-quickbooks | f5459a07619ae220e24099c6e0c8e8db890bb66b | [
"MIT"
] | 234 | 2015-08-25T02:41:33.000Z | 2020-03-30T15:30:23.000Z | quickbooks/batch.py | varunbheemaiah/python-quickbooks | f5459a07619ae220e24099c6e0c8e8db890bb66b | [
"MIT"
] | 170 | 2015-09-12T07:02:32.000Z | 2020-03-20T13:34:34.000Z | quickbooks/batch.py | varunbheemaiah/python-quickbooks | f5459a07619ae220e24099c6e0c8e8db890bb66b | [
"MIT"
] | 142 | 2015-08-26T07:08:56.000Z | 2020-03-20T11:59:52.000Z | import uuid
from .client import QuickBooks
from .exceptions import QuickbooksException
from .objects.batchrequest import IntuitBatchRequest, BatchItemRequest, BatchOperation, BatchResponse, BatchItemResponse
| 34.175824 | 120 | 0.678135 | import uuid
from .client import QuickBooks
from .exceptions import QuickbooksException
from .objects.batchrequest import IntuitBatchRequest, BatchItemRequest, BatchOperation, BatchResponse, BatchItemResponse
class BatchManager(object):
def __init__(self, operation, max_request_items=30):
self._max_request_items = max_request_items
if operation in ["create", "update", "delete"]:
self._operation = operation
else:
raise QuickbooksException("Operation not supported.")
def save(self, obj_list, qb=None):
batch_response = BatchResponse()
while len(obj_list) > 0:
temp_list = obj_list[:self._max_request_items]
obj_list = [item for item in obj_list if item not in temp_list]
result = self.process_batch(temp_list, qb=qb)
batch_response.batch_responses += result.batch_responses
batch_response.original_list += result.original_list
batch_response.successes += result.successes
batch_response.faults += result.faults
return batch_response
def process_batch(self, obj_list, qb=None):
if not qb:
qb = QuickBooks()
batch = self.list_to_batch_request(obj_list)
json_data = qb.batch_operation(batch.to_json())
batch_response = self.batch_results_to_list(json_data, batch, obj_list)
return batch_response
def list_to_batch_request(self, obj_list):
batch = IntuitBatchRequest()
for obj in obj_list:
batch_item = BatchItemRequest()
batch_item.bId = str(uuid.uuid4())
batch_item.operation = self._operation
batch_item.set_object(obj)
batch.BatchItemRequest.append(batch_item)
return batch
def batch_results_to_list(self, json_data, batch, original_list):
response = BatchResponse()
response.original_list = original_list
for data in json_data['BatchItemResponse']:
response_item = BatchItemResponse.from_json(data)
batch_item = [obj for obj in batch.BatchItemRequest if obj.bId == response_item.bId][0]
response_item.set_object(batch_item.get_object())
response.batch_responses.append(response_item)
if response_item.Fault:
response_item.Fault.original_object = response_item.get_object()
response.faults.append(response_item.Fault)
else:
class_obj = type(response_item.get_object())
new_object = class_obj.from_json(data[class_obj.qbo_object_name])
response.successes.append(new_object)
return response
def batch_create(obj_list, qb=None):
batch_mgr = BatchManager(BatchOperation.CREATE)
return batch_mgr.save(obj_list, qb=qb)
def batch_update(obj_list, qb=None):
batch_mgr = BatchManager(BatchOperation.UPDATE)
return batch_mgr.save(obj_list, qb=qb)
def batch_delete(obj_list, qb=None):
batch_mgr = BatchManager(BatchOperation.DELETE)
return batch_mgr.save(obj_list, qb=qb) | 2,666 | 6 | 226 |
bd246d6ac90c60a4c8d7fbe993a1475528e524bc | 463 | py | Python | test/test_inference.py | phanxuanphucnd/wav2asr | 6e4d6f6ce0165bd1f2baf3c219b7755dc2202c36 | [
"MIT"
] | 1 | 2021-06-23T01:41:46.000Z | 2021-06-23T01:41:46.000Z | test/test_inference.py | phanxuanphucnd/wav2asr | 6e4d6f6ce0165bd1f2baf3c219b7755dc2202c36 | [
"MIT"
] | null | null | null | test/test_inference.py | phanxuanphucnd/wav2asr | 6e4d6f6ce0165bd1f2baf3c219b7755dc2202c36 | [
"MIT"
] | 2 | 2021-07-28T14:51:47.000Z | 2021-10-30T19:53:34.000Z | from arizona.asr.learner import Wav2AsrLearner
learner = Wav2AsrLearner(
pretrain_model='path/to/pretrain.pt',
finetune_model='path/to/finetune.pt',
dictionary='path/to/dict.ltr.txt',
lm_type='kenlm',
lm_lexicon='path/to/lm/lexicon.txt',
lm_model='path/to/lm/lm.bin',
lm_weight=1.5,
word_score=-1,
beam_size=50
)
hypos = learner.transcribe([
'./data/test_1.wav',
'./data/test_1.wav'
])
print("===")
print(hypos) | 22.047619 | 46 | 0.658747 | from arizona.asr.learner import Wav2AsrLearner
learner = Wav2AsrLearner(
pretrain_model='path/to/pretrain.pt',
finetune_model='path/to/finetune.pt',
dictionary='path/to/dict.ltr.txt',
lm_type='kenlm',
lm_lexicon='path/to/lm/lexicon.txt',
lm_model='path/to/lm/lm.bin',
lm_weight=1.5,
word_score=-1,
beam_size=50
)
hypos = learner.transcribe([
'./data/test_1.wav',
'./data/test_1.wav'
])
print("===")
print(hypos) | 0 | 0 | 0 |
83953f1d614b2e84fcb3dcb99d7d5d34855995b9 | 740 | py | Python | tests/data_sources/satellite/test_satellite_model.py | lenassero/nowcasting_dataset | deaf098c4d318f3ef532bac73f9cc4fa2858479b | [
"MIT"
] | null | null | null | tests/data_sources/satellite/test_satellite_model.py | lenassero/nowcasting_dataset | deaf098c4d318f3ef532bac73f9cc4fa2858479b | [
"MIT"
] | null | null | null | tests/data_sources/satellite/test_satellite_model.py | lenassero/nowcasting_dataset | deaf098c4d318f3ef532bac73f9cc4fa2858479b | [
"MIT"
] | null | null | null | """Test Satellite model."""
import os
import tempfile
import numpy as np
import pytest
from nowcasting_dataset.data_sources.fake import satellite_fake
from nowcasting_dataset.data_sources.satellite.satellite_model import Satellite
| 23.125 | 79 | 0.739189 | """Test Satellite model."""
import os
import tempfile
import numpy as np
import pytest
from nowcasting_dataset.data_sources.fake import satellite_fake
from nowcasting_dataset.data_sources.satellite.satellite_model import Satellite
def test_satellite_init(): # noqa: D103
_ = satellite_fake()
def test_satellite_validation(): # noqa: D103
sat = satellite_fake()
Satellite.model_validation(sat)
sat.data[0, 0] = np.nan
with pytest.raises(Exception):
Satellite.model_validation(sat)
def test_satellite_save(): # noqa: D103
with tempfile.TemporaryDirectory() as dirpath:
satellite_fake().save_netcdf(path=dirpath, batch_i=0)
assert os.path.exists(f"{dirpath}/satellite/000000.nc")
| 435 | 0 | 69 |
d8dcee92379cf3ecf4def5fe4e4a50edef0975b9 | 5,230 | py | Python | src/htrun/host_tests_plugins/host_test_registry.py | Patater/greentea | 4fcf55396ab7f5960a542edb88365686b8f33624 | [
"Apache-2.0"
] | 37 | 2015-09-10T13:52:40.000Z | 2021-11-17T11:53:30.000Z | src/htrun/host_tests_plugins/host_test_registry.py | Patater/greentea | 4fcf55396ab7f5960a542edb88365686b8f33624 | [
"Apache-2.0"
] | 270 | 2015-09-09T13:00:53.000Z | 2021-12-16T17:39:05.000Z | src/htrun/host_tests_plugins/host_test_registry.py | Patater/greentea | 4fcf55396ab7f5960a542edb88365686b8f33624 | [
"Apache-2.0"
] | 51 | 2015-09-04T13:16:47.000Z | 2021-11-18T15:40:03.000Z | #
# Copyright (c) 2021 Arm Limited and Contributors. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
"""Registry of available host test plugins."""
class HostTestRegistry:
"""Register and store host test plugins for further usage."""
# Here we actually store all the plugins
PLUGINS = {} # 'Plugin Name' : Plugin Object
def print_error(self, text):
"""Print an error message to the console.
Args:
text: Error message reason.
"""
print("Plugin load failed. Reason: %s" % text)
def register_plugin(self, plugin):
"""Store a plugin in the registry.
This method also calls the plugin's setup() method to configure the plugin.
Args:
plugin: Plugin instance.
Returns:
True if plugin setup was successful and plugin can be registered, else
False.
"""
# TODO:
# - check for unique caps for specified type
if plugin.name not in self.PLUGINS:
if plugin.setup(): # Setup plugin can be completed without errors
self.PLUGINS[plugin.name] = plugin
return True
else:
self.print_error("%s setup failed" % plugin.name)
else:
self.print_error("%s already loaded" % plugin.name)
return False
def call_plugin(self, type, capability, *args, **kwargs):
"""Execute the first plugin found with a particular 'type' and 'capability'.
Args:
type: Plugin type.
capability: Plugin capability name.
args: Additional plugin parameters.
kwargs: Additional plugin parameters.
Returns:
True if a plugin was found and execution succeeded, otherwise False.
"""
for plugin_name in self.PLUGINS:
plugin = self.PLUGINS[plugin_name]
if plugin.type == type and capability in plugin.capabilities:
return plugin.execute(capability, *args, **kwargs)
return False
def get_plugin_caps(self, type):
"""List all capabilities for plugins with the specified type.
Args:
type: Plugin type.
Returns:
List of capabilities found. If there are no capabilities an empty
list is returned.
"""
result = []
for plugin_name in self.PLUGINS:
plugin = self.PLUGINS[plugin_name]
if plugin.type == type:
result.extend(plugin.capabilities)
return sorted(result)
def load_plugin(self, name):
"""Import a plugin module.
Args:
name: Name of the module to import.
Returns:
Imported module.
Raises:
ImportError: The module with the given name was not found.
"""
mod = __import__("module_%s" % name)
return mod
def get_string(self):
"""User friendly printing method to show hooked plugins.
Returns:
PrettyTable formatted string describing the contents of the plugin
registry.
"""
from prettytable import PrettyTable, HEADER
column_names = [
"name",
"type",
"capabilities",
"stable",
"os_support",
"required_parameters",
]
pt = PrettyTable(column_names, junction_char="|", hrules=HEADER)
for column in column_names:
pt.align[column] = "l"
for plugin_name in sorted(self.PLUGINS.keys()):
name = self.PLUGINS[plugin_name].name
type = self.PLUGINS[plugin_name].type
stable = self.PLUGINS[plugin_name].stable
capabilities = ", ".join(self.PLUGINS[plugin_name].capabilities)
is_os_supported = self.PLUGINS[plugin_name].is_os_supported()
required_parameters = ", ".join(
self.PLUGINS[plugin_name].required_parameters
)
row = [
name,
type,
capabilities,
stable,
is_os_supported,
required_parameters,
]
pt.add_row(row)
return pt.get_string()
def get_dict(self):
"""Return a dictionary of registered plugins."""
result = {}
for plugin_name in sorted(self.PLUGINS.keys()):
name = self.PLUGINS[plugin_name].name
type = self.PLUGINS[plugin_name].type
stable = self.PLUGINS[plugin_name].stable
capabilities = self.PLUGINS[plugin_name].capabilities
is_os_supported = self.PLUGINS[plugin_name].is_os_supported()
required_parameters = self.PLUGINS[plugin_name].required_parameters
result[plugin_name] = {
"name": name,
"type": type,
"stable": stable,
"capabilities": capabilities,
"os_support": is_os_supported,
"required_parameters": required_parameters,
}
return result
def __str__(self):
"""Return str representation of object."""
return self.get_string()
| 32.893082 | 84 | 0.570937 | #
# Copyright (c) 2021 Arm Limited and Contributors. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
"""Registry of available host test plugins."""
class HostTestRegistry:
"""Register and store host test plugins for further usage."""
# Here we actually store all the plugins
PLUGINS = {} # 'Plugin Name' : Plugin Object
def print_error(self, text):
"""Print an error message to the console.
Args:
text: Error message reason.
"""
print("Plugin load failed. Reason: %s" % text)
def register_plugin(self, plugin):
"""Store a plugin in the registry.
This method also calls the plugin's setup() method to configure the plugin.
Args:
plugin: Plugin instance.
Returns:
True if plugin setup was successful and plugin can be registered, else
False.
"""
# TODO:
# - check for unique caps for specified type
if plugin.name not in self.PLUGINS:
if plugin.setup(): # Setup plugin can be completed without errors
self.PLUGINS[plugin.name] = plugin
return True
else:
self.print_error("%s setup failed" % plugin.name)
else:
self.print_error("%s already loaded" % plugin.name)
return False
def call_plugin(self, type, capability, *args, **kwargs):
"""Execute the first plugin found with a particular 'type' and 'capability'.
Args:
type: Plugin type.
capability: Plugin capability name.
args: Additional plugin parameters.
kwargs: Additional plugin parameters.
Returns:
True if a plugin was found and execution succeeded, otherwise False.
"""
for plugin_name in self.PLUGINS:
plugin = self.PLUGINS[plugin_name]
if plugin.type == type and capability in plugin.capabilities:
return plugin.execute(capability, *args, **kwargs)
return False
def get_plugin_caps(self, type):
"""List all capabilities for plugins with the specified type.
Args:
type: Plugin type.
Returns:
List of capabilities found. If there are no capabilities an empty
list is returned.
"""
result = []
for plugin_name in self.PLUGINS:
plugin = self.PLUGINS[plugin_name]
if plugin.type == type:
result.extend(plugin.capabilities)
return sorted(result)
def load_plugin(self, name):
"""Import a plugin module.
Args:
name: Name of the module to import.
Returns:
Imported module.
Raises:
ImportError: The module with the given name was not found.
"""
mod = __import__("module_%s" % name)
return mod
def get_string(self):
"""User friendly printing method to show hooked plugins.
Returns:
PrettyTable formatted string describing the contents of the plugin
registry.
"""
from prettytable import PrettyTable, HEADER
column_names = [
"name",
"type",
"capabilities",
"stable",
"os_support",
"required_parameters",
]
pt = PrettyTable(column_names, junction_char="|", hrules=HEADER)
for column in column_names:
pt.align[column] = "l"
for plugin_name in sorted(self.PLUGINS.keys()):
name = self.PLUGINS[plugin_name].name
type = self.PLUGINS[plugin_name].type
stable = self.PLUGINS[plugin_name].stable
capabilities = ", ".join(self.PLUGINS[plugin_name].capabilities)
is_os_supported = self.PLUGINS[plugin_name].is_os_supported()
required_parameters = ", ".join(
self.PLUGINS[plugin_name].required_parameters
)
row = [
name,
type,
capabilities,
stable,
is_os_supported,
required_parameters,
]
pt.add_row(row)
return pt.get_string()
def get_dict(self):
"""Return a dictionary of registered plugins."""
result = {}
for plugin_name in sorted(self.PLUGINS.keys()):
name = self.PLUGINS[plugin_name].name
type = self.PLUGINS[plugin_name].type
stable = self.PLUGINS[plugin_name].stable
capabilities = self.PLUGINS[plugin_name].capabilities
is_os_supported = self.PLUGINS[plugin_name].is_os_supported()
required_parameters = self.PLUGINS[plugin_name].required_parameters
result[plugin_name] = {
"name": name,
"type": type,
"stable": stable,
"capabilities": capabilities,
"os_support": is_os_supported,
"required_parameters": required_parameters,
}
return result
def __str__(self):
"""Return str representation of object."""
return self.get_string()
| 0 | 0 | 0 |
e37a75611063a66617e3e1deb56404ce08506cd3 | 1,407 | py | Python | genofunk/subcommands/apply.py | rmcolq/genofunk | ffa031fb361fc736e839d0e36d36f8ed7ade30dc | [
"MIT"
] | 1 | 2021-01-09T23:25:02.000Z | 2021-01-09T23:25:02.000Z | genofunk/subcommands/apply.py | rmcolq/genofunk | ffa031fb361fc736e839d0e36d36f8ed7ade30dc | [
"MIT"
] | null | null | null | genofunk/subcommands/apply.py | rmcolq/genofunk | ffa031fb361fc736e839d0e36d36f8ed7ade30dc | [
"MIT"
] | null | null | null | import logging
import os
import sys
from genofunk import apply
| 37.026316 | 115 | 0.436389 | import logging
import os
import sys
from genofunk import apply
def run(options):
if options.verbose:
log_level = logging.DEBUG
msg = "Using debug logging"
else:
log_level = logging.INFO
msg = "Using info logging"
#log_file = f"apply.log"
#if os.path.exists(log_file):
# os.unlink(log_file)
logging.basicConfig(
#filename=log_file,
stream=sys.stdout,
level=log_level,
format="%(asctime)s\t%(levelname)s\t%(message)s",
datefmt="%d/%m/%Y %I:%M:%S",
)
logging.info(msg)
logging.info(
"Input parameters:\nDirectory: %s\nEdit file: %s\nFeatures: %s\nOutput prefix: %s\nConcatenate: %s"
%(options.directory,
options.edit_file,
options.features,
options.output_prefix,
options.concatinate)
)
g = apply.Apply()
g.run(options.directory, options.edit_file, options.output_prefix, options.features, options.concatinate)
| 1,319 | 0 | 23 |
65d5b20d577cda0ce9aba91179e9550e701e1148 | 3,132 | py | Python | script/alert.py | xbot/SmartHome | c473d455d7555e8806a2f5d09dff6db6beb8ae9b | [
"MIT"
] | 1 | 2021-02-09T09:40:17.000Z | 2021-02-09T09:40:17.000Z | script/alert.py | xbot/SmartHome | c473d455d7555e8806a2f5d09dff6db6beb8ae9b | [
"MIT"
] | null | null | null | script/alert.py | xbot/SmartHome | c473d455d7555e8806a2f5d09dff6db6beb8ae9b | [
"MIT"
] | null | null | null | #!/usr/bin/env python2
# encoding: utf-8
####################################################
#
# Description: Alert for invasion using pushbullet.
# Author: Donie Leigh<donie.leigh at gmail.com>
# License: MIT
#
####################################################
import sys, getopt, time, json
from ConfigParser import ConfigParser, NoSectionError, NoOptionError
from systemd import journal
CONFIG_FILE = '/etc/smarthome.conf'
cfg = None
host = None
username = None
password = None
topic_msg = None
topic_image = None
timePoint = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
alertMsg = timePoint + ',发现入侵者!!!'
apiKey = None
if __name__ == '__main__':
opts, args = getopt.getopt(sys.argv[1:], "f:")
imageFile = ''
for op, value in opts:
if op == "-f":
imageFile = value
if len(imageFile) == 0:
sys.stderr.write("Image file missing.\n")
sys.exit()
try:
cfg = ConfigParser()
cfg.read(CONFIG_FILE)
protocol = cfg.get('global', 'protocol')
if protocol == 'pushbullet':
from yapbl import PushBullet
apiKey = cfg.get('global', 'apiKey')
if apiKey is None or len(apiKey) == 0:
raise NoOptionError('apiKey', 'global')
send_alert_by_pushbullet(imageFile)
else:
import paho.mqtt.client as mqtt
username = cfg.get('mosquitto', 'user')
if username is None or len(username) == 0:
raise NoOptionError('user', 'mosquitto')
password = cfg.get('mosquitto', 'password')
if password is None or len(password) == 0:
raise NoOptionError('password', 'mosquitto')
host = cfg.get('mosquitto', 'host')
if host is None or len(host) == 0:
raise NoOptionError('host', 'mosquitto')
topic_msg = cfg.get('mosquitto', 'topic_msg')
if topic_msg is None or len(topic_msg) == 0:
raise NoOptionError('topic_msg', 'mosquitto')
topic_image = cfg.get('mosquitto', 'topic_image')
if topic_image is None or len(topic_image) == 0:
raise NoOptionError('topic_image', 'mosquitto')
send_alert_by_mosquitto(imageFile)
except (NoSectionError, NoOptionError), e:
err = 'Config file is missing or invalid: ' + str(e)
journal.send(err, PRIORITY=journal.LOG_ERR)
sys.stderr.write(err + "\n")
sys.exit(1)
| 33.319149 | 113 | 0.587165 | #!/usr/bin/env python2
# encoding: utf-8
####################################################
#
# Description: Alert for invasion using pushbullet.
# Author: Donie Leigh<donie.leigh at gmail.com>
# License: MIT
#
####################################################
import sys, getopt, time, json
from ConfigParser import ConfigParser, NoSectionError, NoOptionError
from systemd import journal
CONFIG_FILE = '/etc/smarthome.conf'
cfg = None
host = None
username = None
password = None
topic_msg = None
topic_image = None
timePoint = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
alertMsg = timePoint + ',发现入侵者!!!'
apiKey = None
def send_alert_by_pushbullet(image):
pb = PushBullet(apiKey)
pb.push_note('入侵警报', alertMsg)
pb.push_file(open(image, 'rb'))
def send_alert_by_mosquitto(image):
mq = mqtt.Client()
mq.username_pw_set(username, password)
mq.on_publish = lambda mosq, userdata, mid: mosq.disconnect()
mq.connect(host, 1883, 60)
# data = {'type':'alert', 'data':alertMsg}
# mq.publish(topic_msg, json.dumps(data), 0)
import binascii
with open(imageFile, 'rb') as f:
mq.publish(topic_image, json.dumps({'image':binascii.b2a_base64(f.read()), 'time':time.time()*1000}), 2);
mq.loop_forever()
if __name__ == '__main__':
opts, args = getopt.getopt(sys.argv[1:], "f:")
imageFile = ''
for op, value in opts:
if op == "-f":
imageFile = value
if len(imageFile) == 0:
sys.stderr.write("Image file missing.\n")
sys.exit()
try:
cfg = ConfigParser()
cfg.read(CONFIG_FILE)
protocol = cfg.get('global', 'protocol')
if protocol == 'pushbullet':
from yapbl import PushBullet
apiKey = cfg.get('global', 'apiKey')
if apiKey is None or len(apiKey) == 0:
raise NoOptionError('apiKey', 'global')
send_alert_by_pushbullet(imageFile)
else:
import paho.mqtt.client as mqtt
username = cfg.get('mosquitto', 'user')
if username is None or len(username) == 0:
raise NoOptionError('user', 'mosquitto')
password = cfg.get('mosquitto', 'password')
if password is None or len(password) == 0:
raise NoOptionError('password', 'mosquitto')
host = cfg.get('mosquitto', 'host')
if host is None or len(host) == 0:
raise NoOptionError('host', 'mosquitto')
topic_msg = cfg.get('mosquitto', 'topic_msg')
if topic_msg is None or len(topic_msg) == 0:
raise NoOptionError('topic_msg', 'mosquitto')
topic_image = cfg.get('mosquitto', 'topic_image')
if topic_image is None or len(topic_image) == 0:
raise NoOptionError('topic_image', 'mosquitto')
send_alert_by_mosquitto(imageFile)
except (NoSectionError, NoOptionError), e:
err = 'Config file is missing or invalid: ' + str(e)
journal.send(err, PRIORITY=journal.LOG_ERR)
sys.stderr.write(err + "\n")
sys.exit(1)
| 588 | 0 | 46 |
feb9c5c67882480ba12e4d921db5600c99a3ce73 | 8,149 | py | Python | pdserver/game.py | Gustavo6046/polydung | e8626c67b0f59e00a2400b5a5c644e3f6b925e00 | [
"MIT"
] | null | null | null | pdserver/game.py | Gustavo6046/polydung | e8626c67b0f59e00a2400b5a5c644e3f6b925e00 | [
"MIT"
] | null | null | null | pdserver/game.py | Gustavo6046/polydung | e8626c67b0f59e00a2400b5a5c644e3f6b925e00 | [
"MIT"
] | null | null | null | import objects
import logging
import sqlite3
import threading
import json
import serverprotocol
import sqlite3
import base64
import socket
tileset = []
SCODE_NEEDAUTH = 0
SCODE_BANNED = 1
SCODE_BADAUTH = 2
SCODE_BADREG = 3
| 32.466135 | 155 | 0.51356 | import objects
import logging
import sqlite3
import threading
import json
import serverprotocol
import sqlite3
import base64
import socket
tileset = []
SCODE_NEEDAUTH = 0
SCODE_BANNED = 1
SCODE_BADAUTH = 2
SCODE_BADREG = 3
class TileType(object):
def __init__(self, sprite="nulltile", functions=()):
self.index = len(tileset)
self.sprite = sprite
self.functions = dict(functions)
tileset.append(self)
def serializable(self):
return {
"index": self.index,
"sprite": self.sprite,
}
class Game(object):
def __init__(self, save='polydung.db', listen_port=3048, map_width=120, map_height=67, logger=None): # kinda 16:9
self.save = save
self.clients = []
self.logger = logger or logging.getLogger("PolydungServer")
self.block_db = False
db = self.database()
c = db.cursor()
c.execute('SELECT * FROM sqlite_master WHERE TYPE = "table";')
if len(c.fetchall()) > 0:
self.map = []
c.execute("SELECT * FROM TileMap;")
self.block_db = True
for ind, js in c.fetchall():
self.map.insert(ind, json.loads(js))
self.objects = []
c.execute('SELECT * FROM Objects;')
for o, in c.fetchall():
objects.PDObject.deserialize(o)
self.block_db = False
else:
self.objects = []
self.map = [[0 for _ in range(map_width)] for _ in range(map_height)]
c.execute("CREATE TABLE TileMap (row int, json text);")
c.execute("CREATE TABLE Objects (json text);")
c.execute("CREATE TABLE IpBans (ip text);")
c.execute("CREATE TABLE AccountBans (username text);")
c.execute("CREATE TABLE Accounts (fails int, logins int, username text, password text);")
for i, row in enumerate(self.map):
c.execute("INSERT INTO TileMap VALUES (?, ?);", (i, json.dumps(row)))
db.commit()
c.close()
db.close()
self.logger.info("Hosting at port: {}".format(listen_port))
threading.Thread(target=self._listen_loop, args=(listen_port,)).start()
def is_authentic(self, username, password=None):
db = self.database()
c = db.cursor()
c.execute("SELECT password FROM Accounts WHERE username = ?", (username,))
p = c.fetchone()[0]
if p is None:
if password is None:
return False
else:
c.execute("INSERT INTO Accounts VALUES (?, ?, ?, ?);", (0, 1, username, password))
db.commit()
c.close()
db.close()
return True
elif password is None:
return False
res = (password == p)
if res:
c.execute("UPDATE Accounts SET logins = logins + 1 WHERE username = ?", (username,))
else:
c.execute("UPDATE Accounts SET fails = fails + 1 WHERE username = ?", (username,))
db.commit()
c.close()
db.close()
return res
def _listen_loop(self, port):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('', port))
s.listen(5)
while True:
(clsock, addr) = s.accept()
self.logger.info("Client attempting connection from {}:{}".format(*addr))
serverprotocol.Client(self, addr, clsock)
def is_host_banned(self, ip):
db = self.database()
c = db.cursor()
c.execute("SELECT * FROM IpBans WHERE ip = ?", (ip,))
return len(c.fetchall()) > 0
def is_account_banned(self, username):
db = self.database()
c = db.cursor()
c.execute("SELECT * FROM AccountBans WHERE username = ?", (username,))
return len(c.fetchall()) > 0
def authenticate(self, client):
if self.is_authentic(client.username, client.password):
if self.is_host_banned(client.host[0]):
self.logger.info("Client {}:{} ({}) is IP banned!".format(*client.host, client.username))
return SCODE_BANNED
elif self.is_account_banned(client.username):
self.logger.info("Client {}:{} ({}) is user banned!".format(*client.host, client.username))
return SCODE_BANNED
else:
self.logger.info("Client {}:{} authenticated as {}".format(*client.host, client.username))
self.add_client(client)
elif client.password is None:
db = self.database()
c = db.cursor()
c.execute("SELECT * FROM Accounts WHERE username = ?;", (client.username,))
if len(c.fetchall()) == 0:
self.logger.info("Client {}:{} tried to make a new account {} but hasn't set a new password for it!".format(*client.host, client.username))
return SCODE_BADREG
else:
self.logger.info("Client {}:{} doesn't have the password to account {}".format(*client.host, client.username))
return SCODE_NEEDAUTH
else:
self.logger.info("Client {}:{} has the wrong password to account {}".format(*client.host, client.username))
return SCODE_BADAUTH
def disconnect(self, client):
if client in self.clients:
self.clients.remove(client)
def handle_object_creation(self, obj):
self.objects.append(obj)
for cl in self.clients:
cl.send("SPAWN", obj.serialize())
if not self.block_db:
db = self.database()
c = db.cursor()
c.execute('INSERT INTO Objects VALUES (?);', (obj.serialize(),))
db.commit()
c.close()
db.close()
def database(self):
return sqlite3.connect(self.save)
def serialize_map(self):
return json.dumps(self.map)
def add_client(self, cl):
for cl in self.clients:
cl.send("JOINED", cl.username)
self.clients.append(cl)
def update_object(self, obj):
for cl in self.clients:
cl.update_object(obj)
def global_snapshot(self):
for cl in self.clients:
cl.send("TILESET", json.dumps([tp.serializable() for tp in tileset]))
cl.snapshot()
cl.send_map()
for kind in objects.kinds.values():
cl.send("CLASS", json.dumps(kind.serializable()))
def update_map(self, coords):
for cl in self.clients:
cl.send("TILECHANGE", coords[0], coords[1], str(self.map[coords[0]][coords[1]]))
def tick(self, tdelta):
for o in self.objects:
o.tick(tdelta)
def __getitem__(self, key):
return self.map[key[0]][key[1]]
def __setitem__(self, key, value):
self.map[key[0]][key[1]] = value
self.update_map(key)
if not self.block_db:
db = self.database()
c = db.cursor()
c.execute('UPDATE TileMap SET json=? WHERE row=?;', (json.dumps(self.map[key[0]]), key[0]))
db.commit()
c.close()
db.close()
def call_tile(self, coords, name, **kwargs):
t = self.map[coords[0]][coords[1]]
f = tileset[t[0]].functions[name]
nbe = netbyte.Netbyte()
nbe['coords'] = coords
nbe['state'] = t[1]
for k, v in kwargs.items():
nbe[k] = v
nbe.execute_instructions(*f) | 7,191 | 0 | 732 |
40e3bed3f3d2b5c988c4e485fc439ec96b3aea05 | 271 | py | Python | mainapp/migrations/0073_merge_20180822_0015.py | raeeska/rescuekerala | 649070cd051e0bf2ef54549c96493d5c4c5d89c9 | [
"MIT"
] | 1 | 2018-08-21T15:06:21.000Z | 2018-08-21T15:06:21.000Z | mainapp/migrations/0073_merge_20180822_0015.py | pranavmodx/rescuekerala | dd75a06b191b39ff4bdcd8e42d61c98a6509f052 | [
"MIT"
] | 1 | 2018-08-28T13:26:26.000Z | 2018-08-28T13:26:26.000Z | mainapp/migrations/0073_merge_20180822_0015.py | shashwat1002/rescuekerala | be7b7a959a2f4fa949768115b9c76eb3cccc32cb | [
"MIT"
] | 5 | 2019-11-07T11:34:56.000Z | 2019-11-07T11:36:00.000Z | # Generated by Django 2.1 on 2018-08-21 18:45
from django.db import migrations
| 18.066667 | 47 | 0.649446 | # Generated by Django 2.1 on 2018-08-21 18:45
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('mainapp', '0072_auto_20180822_0000'),
('mainapp', '0071_auto_20180821_2358'),
]
operations = [
]
| 0 | 167 | 23 |
ffffabc9876cf60dca77175cfe7e1da5cc1dacb6 | 193 | py | Python | F29.Phen2Gene/WebAPI/about.py | foundation29org/F29.Phen2Gene | a5a211c9d839382dee32ee7be93c845fe8affd1e | [
"MIT"
] | null | null | null | F29.Phen2Gene/WebAPI/about.py | foundation29org/F29.Phen2Gene | a5a211c9d839382dee32ee7be93c845fe8affd1e | [
"MIT"
] | null | null | null | F29.Phen2Gene/WebAPI/about.py | foundation29org/F29.Phen2Gene | a5a211c9d839382dee32ee7be93c845fe8affd1e | [
"MIT"
] | null | null | null | from flask import current_app
from flask_restplus import Resource
from ._api import API
@API.route('/version')
| 19.3 | 55 | 0.766839 | from flask import current_app
from flask_restplus import Resource
from ._api import API
@API.route('/version')
class About(Resource):
def get(self): return current_app.config['VERSION']
| 30 | 1 | 48 |
b6da1f10160c1db772b332558a590bb3896e3f11 | 3,313 | py | Python | 4_m5C_step-by-step_pileup/concat_bam.py | caoy3/RNA-m5C | 65b4e9d66f4c2a75e2854c7f9de5ed63b892d844 | [
"MIT"
] | 2 | 2021-03-07T12:02:56.000Z | 2022-03-12T08:24:36.000Z | 4_m5C_step-by-step_pileup/concat_bam.py | caoy3/RNA-m5C | 65b4e9d66f4c2a75e2854c7f9de5ed63b892d844 | [
"MIT"
] | null | null | null | 4_m5C_step-by-step_pileup/concat_bam.py | caoy3/RNA-m5C | 65b4e9d66f4c2a75e2854c7f9de5ed63b892d844 | [
"MIT"
] | 8 | 2019-05-17T08:22:29.000Z | 2022-03-04T04:18:41.000Z | #!bin/usr/env python
#Jianheng Liu @ Zhanglab, SYSU
#Feb, 2018
#Email: liujh26@mail2.sysu.edu.cn
#Usage: This program is used to merge multiple BAM filtes to one, then sort and index it
#Input: [.bam]
import sys,os
import argparse
import pysam
import time
from time import gmtime, strftime
if __name__ == "__main__":
description = """
"""
parser = argparse.ArgumentParser(prog="concat_bam",version="1.0",fromfile_prefix_chars='@',description=description,formatter_class=argparse.RawTextHelpFormatter)
# Require
group_required = parser.add_argument_group("Required")
group_required.add_argument("-i","--input",dest="input", nargs='*',required=True,help="Input bam files")
group_required.add_argument("-o","--output",dest="output",required=True,help="Output bam")
group_optional = parser.add_argument_group("Optional")
group_optional.add_argument("--sort",dest="sort",default=False,action="store_true",help="Sort bam (and delete unsort)")
group_optional.add_argument("--no-del-bam",dest="no_del_bam",default=False,action="store_true",help="Do not del bam file after sorting")
group_optional.add_argument("--index",dest="index",default=False,action="store_true",help="Index sorted bam")
group_optional.add_argument("-t","--threads",dest="threads",default=1,type=int,help="Threads for samtools sort, default=1")
group_optional.add_argument("-m","--memory",dest="memory",default="1G",help="Memory for samtools sort, default=4G")
options = parser.parse_args()
hid = 0
hid_dict = {}
lift_over = {}
new_header = {}
new_header['HD'] = {'SO': 'unsorted', 'VN': '1.0'}
new_header['SQ'] = []
for fn in options.input:
hid,new_header,hid_dict,lift_over = read_headers(fn,hid,new_header,hid_dict,lift_over)
with pysam.AlignmentFile(options.output, 'wb', header = new_header) as OUTPUT:
for fn in options.input:
merge_bam(fn,OUTPUT)
if options.sort == True:
sys.stderr.write("[%s]Sorting bam...\n" % strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
if options.threads > 1:
pysam.sort("-@",str(options.threads),"-m",options.memory,"-o", options.output.replace(".bam",".sorted.bam"),options.output)
else:
pysam.sort("-m",options.memory,"-o", options.output.replace(".bam",".sorted.bam"), options.output)
if options.index == True:
sys.stderr.write("[%s]Indexing bam...\n" % strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
pysam.index(options.output.replace(".bam",".sorted.bam"))
if options.no_del_bam == False:
os.remove(options.output) | 40.901235 | 162 | 0.711138 | #!bin/usr/env python
#Jianheng Liu @ Zhanglab, SYSU
#Feb, 2018
#Email: liujh26@mail2.sysu.edu.cn
#Usage: This program is used to merge multiple BAM filtes to one, then sort and index it
#Input: [.bam]
import sys,os
import argparse
import pysam
import time
from time import gmtime, strftime
def merge_bam(fin,fout):
with pysam.AlignmentFile(fn, 'rb') as INPUT:
for read in INPUT:
# name = read.reference_name
# new_id = hid_dict.get(name)
# read.reference_id = new_id
read.reference_id = lift_over[fin][read.reference_id]
read.next_reference_id = -1
read.next_reference_start = 0
# if read.next_reference_id is not None:
# read.next_reference_id = lift_over[fin][read.next_reference_id]
fout.write(read)
def read_headers(fn,hid,new_header,hid_dict,lift_over):
lift_over[fn] = {}
with pysam.AlignmentFile(fn, 'rb') as INPUT:
n = 0
for header in INPUT.header["SQ"]:
if header['SN'] not in hid_dict:
hid_dict[header['SN']] = hid
new_header['SQ'].append(header)
hid += 1
lift_over[fn][n] = hid_dict[header['SN']]
n += 1
return hid,new_header,hid_dict,lift_over
if __name__ == "__main__":
description = """
"""
parser = argparse.ArgumentParser(prog="concat_bam",version="1.0",fromfile_prefix_chars='@',description=description,formatter_class=argparse.RawTextHelpFormatter)
# Require
group_required = parser.add_argument_group("Required")
group_required.add_argument("-i","--input",dest="input", nargs='*',required=True,help="Input bam files")
group_required.add_argument("-o","--output",dest="output",required=True,help="Output bam")
group_optional = parser.add_argument_group("Optional")
group_optional.add_argument("--sort",dest="sort",default=False,action="store_true",help="Sort bam (and delete unsort)")
group_optional.add_argument("--no-del-bam",dest="no_del_bam",default=False,action="store_true",help="Do not del bam file after sorting")
group_optional.add_argument("--index",dest="index",default=False,action="store_true",help="Index sorted bam")
group_optional.add_argument("-t","--threads",dest="threads",default=1,type=int,help="Threads for samtools sort, default=1")
group_optional.add_argument("-m","--memory",dest="memory",default="1G",help="Memory for samtools sort, default=4G")
options = parser.parse_args()
hid = 0
hid_dict = {}
lift_over = {}
new_header = {}
new_header['HD'] = {'SO': 'unsorted', 'VN': '1.0'}
new_header['SQ'] = []
for fn in options.input:
hid,new_header,hid_dict,lift_over = read_headers(fn,hid,new_header,hid_dict,lift_over)
with pysam.AlignmentFile(options.output, 'wb', header = new_header) as OUTPUT:
for fn in options.input:
merge_bam(fn,OUTPUT)
if options.sort == True:
sys.stderr.write("[%s]Sorting bam...\n" % strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
if options.threads > 1:
pysam.sort("-@",str(options.threads),"-m",options.memory,"-o", options.output.replace(".bam",".sorted.bam"),options.output)
else:
pysam.sort("-m",options.memory,"-o", options.output.replace(".bam",".sorted.bam"), options.output)
if options.index == True:
sys.stderr.write("[%s]Indexing bam...\n" % strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
pysam.index(options.output.replace(".bam",".sorted.bam"))
if options.no_del_bam == False:
os.remove(options.output) | 782 | 0 | 46 |