hexsha
stringlengths 40
40
| size
int64 7
1.04M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
247
| max_stars_repo_name
stringlengths 4
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
368k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
247
| max_issues_repo_name
stringlengths 4
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
247
| max_forks_repo_name
stringlengths 4
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.04M
| avg_line_length
float64 1.77
618k
| max_line_length
int64 1
1.02M
| alphanum_fraction
float64 0
1
| original_content
stringlengths 7
1.04M
| filtered:remove_function_no_docstring
int64 -102
942k
| filtered:remove_class_no_docstring
int64 -354
977k
| filtered:remove_delete_markers
int64 0
60.1k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2617cb2e73b14895dfae1fd3fe2018b9394d74fd
| 825
|
py
|
Python
|
monapay/management/commands/initaccounts.py
|
monapayjp/monapay
|
c896b104b5bea328d119f009710589ffd174386a
|
[
"BSD-3-Clause"
] | 4
|
2015-02-12T18:54:44.000Z
|
2021-04-15T05:21:06.000Z
|
monapay/management/commands/initaccounts.py
|
monapayjp/monapay
|
c896b104b5bea328d119f009710589ffd174386a
|
[
"BSD-3-Clause"
] | 1
|
2018-02-03T17:35:36.000Z
|
2018-02-03T17:35:36.000Z
|
monapay/management/commands/initaccounts.py
|
monapayjp/monapay
|
c896b104b5bea328d119f009710589ffd174386a
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from monapay.rpc import make_rpc_connection
| 35.869565
| 72
| 0.646061
|
# -*- coding: utf-8 -*-
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from monapay.rpc import make_rpc_connection
class Command(BaseCommand):
def __init__(self, *args, **kwargs):
super(Command, self).__init__(*args, **kwargs)
def handle(self, *args, **kwargs):
conn = make_rpc_connection()
accounts = conn.listaccounts(as_dict=True)
account_names = [
settings.PAYMENT_WALLET_ACCOUNT_NAME,
settings.PAYMENT_FEE_ACCOUNT_NAME
]
for account_name in account_names:
if not account_name in accounts:
address = conn.getnewaddress(account=account_name)
print("create a new account '{0}'. address: {1}".format(
account_name, address))
| 575
| 6
| 76
|
de7dbbbaf0bfc916c7c7f590fcfc3589933d6da2
| 163
|
py
|
Python
|
data/typing/pandas.api.types.py
|
vfdev-5/python-record-api
|
006faf0bba9cd4cb55fbacc13d2bbda365f5bf0b
|
[
"MIT"
] | 67
|
2020-08-17T11:53:26.000Z
|
2021-11-08T20:16:06.000Z
|
data/typing/pandas.api.types.py
|
vfdev-5/python-record-api
|
006faf0bba9cd4cb55fbacc13d2bbda365f5bf0b
|
[
"MIT"
] | 36
|
2020-08-17T11:09:51.000Z
|
2021-12-15T18:09:47.000Z
|
data/typing/pandas.api.types.py
|
pydata-apis/python-api-record
|
684cffbbb6dc6e81f9de4e02619c8b0ebc557b2b
|
[
"MIT"
] | 7
|
2020-08-19T05:06:47.000Z
|
2020-11-04T05:10:38.000Z
|
from typing import *
# usage.dask: 8
# usage.sklearn: 9
CategoricalDtype: object
# usage.dask: 2
is_categorical_dtype: object
# usage.dask: 3
is_sparse: object
| 13.583333
| 28
| 0.748466
|
from typing import *
# usage.dask: 8
# usage.sklearn: 9
CategoricalDtype: object
# usage.dask: 2
is_categorical_dtype: object
# usage.dask: 3
is_sparse: object
| 0
| 0
| 0
|
ac8e698132504eb73e69452ac813966d0ee22822
| 8,425
|
py
|
Python
|
architect/orms/decorators.py
|
portcast/architect
|
0cc6374d718855837ab3073b708c9765feb046cb
|
[
"Apache-2.0"
] | null | null | null |
architect/orms/decorators.py
|
portcast/architect
|
0cc6374d718855837ab3073b708c9765feb046cb
|
[
"Apache-2.0"
] | null | null | null |
architect/orms/decorators.py
|
portcast/architect
|
0cc6374d718855837ab3073b708c9765feb046cb
|
[
"Apache-2.0"
] | null | null | null |
"""
Defines decorators that Architect provides.
"""
import inspect
import functools
from .bases import BaseFeature
from .registry import registry, Registrar
from ..exceptions import (
ORMError,
FeatureInstallError,
FeatureUninstallError,
MethodAutoDecorateError
)
from copy import deepcopy
class install(object):
"""
Install decorator installs the requested feature for a model. All features are
installed inside the "architect" namespace to avoid all possible naming conflicts.
"""
def __init__(self, feature, **options):
"""
:param string feature: (required). A feature to install.
:param dictionary options: (optional). Feature options.
"""
# for backward compatibility
options = set_list_vals('subtype', **options)
options = set_list_vals('constraint', **options)
options = set_list_vals('column', **options)
self.features = {}
self.feature = feature
self.options = {'feature': options, 'global': dict((k, v) for k, v in options.items() if k in ('db',))}
def __call__(self, model):
"""
:param class model: (required). A model class where feature will be installed.
"""
orm = self.options['feature'].pop('orm', None) or model.__class__.__module__.split('.')[0]
if orm not in Registrar.orms:
try:
__import__('{0}.features'.format(orm), globals(), level=1, fromlist='*')
except ImportError:
import os
import pkgutil
raise ORMError(
current=orm,
model=model.__name__,
allowed=[name for _, name, is_pkg in pkgutil.iter_modules([os.path.dirname(__file__)]) if is_pkg])
self.init_feature(self.feature, model, registry[orm])
# If a model already has some architect features installed, we need to
# gather them and merge with the new feature that needs to be installed
if hasattr(model, 'architect'):
for name, obj in model.architect.__dict__.items():
if isinstance(obj, BaseFeature):
if name not in self.features:
self.features[name] = {'class': obj.__class__, 'options': obj.options}
# Some ORMs disallow setting new attributes on model classes, we
# have to fix this by providing the default __setattr__ behaviour
type(model).__setattr__ = lambda o, n, v: type.__setattr__(o, n, v)
# So what's going on here ? The idea is to create an "architect" namespace using the
# Architect class which is a descriptor itself, which when accessed returns the
# autogenerated class with all the requested features. The most important part here
# is that because we're using a descriptor we can get access to the model class from
# every feature class and all that absolutely correctly works even with the model
# inheritance. While the same can be achieved using metaclasses, the problem is that
# every ORM also uses metaclasses which produces the metaclass conflict because a
# class can't have two metaclasses. This situation can also be solved but it requires
# much more magical stuff to be written that is why this approach was chosen
model.architect = Architect(self.features)
return model
def init_feature(self, feature, model, features_registry):
"""
Initializes the requested feature.
:param string feature: (required). A feature to initialize.
:param class model: (required). A model where feature will be initialized.
:param dict features_registry: (required). A registry with available features for the current ORM.
"""
try:
feature_cls = features_registry[feature]
except KeyError:
raise FeatureInstallError(current=feature, model=model.__name__, allowed=features_registry.keys())
for name in feature_cls.decorate:
try:
original = getattr(model, name)
if getattr(original, 'is_decorated', False): # Handle the inheritance cases
original = original.original
except AttributeError:
raise MethodAutoDecorateError(current=name, model=model.__name__)
decorator = getattr(feature_cls, '_decorate_{0}'.format(name))
decorated = functools.wraps(original)(decorator(original))
decorated.original = original
decorated.is_decorated = True
setattr(model, name, decorated)
self.features[feature] = {
'class': feature_cls,
'options': self.options['feature'] if feature == self.feature else self.options['global']
}
if hasattr(feature_cls, 'register_hooks'):
feature_cls.register_hooks(model)
for dependency in feature_cls.dependencies:
self.init_feature(dependency, model, features_registry)
class uninstall(object):
"""
Uninstall decorator uninstalls the requested feature and all it's dependencies from a model.
"""
def __init__(self, feature):
"""
:param string feature: (required). A feature to uninstall.
"""
self.feature = feature
def __call__(self, model):
"""
:param class model: (required). A model class to work with.
"""
self.deinit_feature(self.feature, model)
return model
def deinit_feature(self, feature, model):
"""
Deinitializes requested feature and it's dependencies.
:param string feature: (required). A feature to deinitialize.
:param class model: (required). A model class to work with.
"""
try:
feature_obj = getattr(model.architect, feature)
except AttributeError:
raise FeatureUninstallError(
current=feature,
model=model.__name__,
allowed=[name for name, obj in model.architect.__dict__.items() if isinstance(obj, BaseFeature)])
# The concept of "unbound methods" has been removed from Python 3. When accessing a method
# from a class, we now get a plain function object. This is what the isfunction check for
methods = inspect.getmembers(model, predicate=lambda m: inspect.isfunction(m) or inspect.ismethod(m))
for name, method in methods:
if getattr(method, 'is_decorated', False):
setattr(model, name, method.original)
delattr(model.architect, feature) # TODO: prohibit uninstall if there are dependant features
for dependency in feature_obj.dependencies:
self.deinit_feature(dependency, model)
| 41.502463
| 118
| 0.622077
|
"""
Defines decorators that Architect provides.
"""
import inspect
import functools
from .bases import BaseFeature
from .registry import registry, Registrar
from ..exceptions import (
ORMError,
FeatureInstallError,
FeatureUninstallError,
MethodAutoDecorateError
)
from copy import deepcopy
def set_list_vals(key, **options):
options = deepcopy(options)
val = options.get(key)
key_plural = '{}s'.format(key)
list_vals = options.get(key_plural, [])
if not list_vals:
if val and type(val) == str:
list_vals = [val]
options[key_plural] = list_vals
return options
class install(object):
"""
Install decorator installs the requested feature for a model. All features are
installed inside the "architect" namespace to avoid all possible naming conflicts.
"""
def __init__(self, feature, **options):
"""
:param string feature: (required). A feature to install.
:param dictionary options: (optional). Feature options.
"""
# for backward compatibility
options = set_list_vals('subtype', **options)
options = set_list_vals('constraint', **options)
options = set_list_vals('column', **options)
self.features = {}
self.feature = feature
self.options = {'feature': options, 'global': dict((k, v) for k, v in options.items() if k in ('db',))}
def __call__(self, model):
"""
:param class model: (required). A model class where feature will be installed.
"""
orm = self.options['feature'].pop('orm', None) or model.__class__.__module__.split('.')[0]
if orm not in Registrar.orms:
try:
__import__('{0}.features'.format(orm), globals(), level=1, fromlist='*')
except ImportError:
import os
import pkgutil
raise ORMError(
current=orm,
model=model.__name__,
allowed=[name for _, name, is_pkg in pkgutil.iter_modules([os.path.dirname(__file__)]) if is_pkg])
self.init_feature(self.feature, model, registry[orm])
# If a model already has some architect features installed, we need to
# gather them and merge with the new feature that needs to be installed
if hasattr(model, 'architect'):
for name, obj in model.architect.__dict__.items():
if isinstance(obj, BaseFeature):
if name not in self.features:
self.features[name] = {'class': obj.__class__, 'options': obj.options}
# Some ORMs disallow setting new attributes on model classes, we
# have to fix this by providing the default __setattr__ behaviour
type(model).__setattr__ = lambda o, n, v: type.__setattr__(o, n, v)
# So what's going on here ? The idea is to create an "architect" namespace using the
# Architect class which is a descriptor itself, which when accessed returns the
# autogenerated class with all the requested features. The most important part here
# is that because we're using a descriptor we can get access to the model class from
# every feature class and all that absolutely correctly works even with the model
# inheritance. While the same can be achieved using metaclasses, the problem is that
# every ORM also uses metaclasses which produces the metaclass conflict because a
# class can't have two metaclasses. This situation can also be solved but it requires
# much more magical stuff to be written that is why this approach was chosen
class Architect(object):
def __init__(self, features):
self.map = {}
self.features = features
def __get__(self, model_obj, model_cls):
# If a model class accesses an architect namespace for the first
# time, we need to put it inside a map for the future reference
if model_cls not in self.map:
self.map[model_cls] = {'features': {}}
for feature, options in self.features.items():
self.map[model_cls]['features'][feature] = options['class'](
model_obj, model_cls, **options['options'])
self.map[model_cls]['architect'] = type('Architect', (object,), dict(
self.map[model_cls]['features'], **{'__module__': 'architect'}))
# We have to notify each feature object if a model object wants
# to get access to it, otherwise it won't have an idea about it
if model_obj is not None:
for feature in self.map[model_cls]['features']:
self.map[model_cls]['features'][feature].model_obj = model_obj
return self.map[model_cls]['architect']
model.architect = Architect(self.features)
return model
def init_feature(self, feature, model, features_registry):
"""
Initializes the requested feature.
:param string feature: (required). A feature to initialize.
:param class model: (required). A model where feature will be initialized.
:param dict features_registry: (required). A registry with available features for the current ORM.
"""
try:
feature_cls = features_registry[feature]
except KeyError:
raise FeatureInstallError(current=feature, model=model.__name__, allowed=features_registry.keys())
for name in feature_cls.decorate:
try:
original = getattr(model, name)
if getattr(original, 'is_decorated', False): # Handle the inheritance cases
original = original.original
except AttributeError:
raise MethodAutoDecorateError(current=name, model=model.__name__)
decorator = getattr(feature_cls, '_decorate_{0}'.format(name))
decorated = functools.wraps(original)(decorator(original))
decorated.original = original
decorated.is_decorated = True
setattr(model, name, decorated)
self.features[feature] = {
'class': feature_cls,
'options': self.options['feature'] if feature == self.feature else self.options['global']
}
if hasattr(feature_cls, 'register_hooks'):
feature_cls.register_hooks(model)
for dependency in feature_cls.dependencies:
self.init_feature(dependency, model, features_registry)
class uninstall(object):
"""
Uninstall decorator uninstalls the requested feature and all it's dependencies from a model.
"""
def __init__(self, feature):
"""
:param string feature: (required). A feature to uninstall.
"""
self.feature = feature
def __call__(self, model):
"""
:param class model: (required). A model class to work with.
"""
self.deinit_feature(self.feature, model)
return model
def deinit_feature(self, feature, model):
"""
Deinitializes requested feature and it's dependencies.
:param string feature: (required). A feature to deinitialize.
:param class model: (required). A model class to work with.
"""
try:
feature_obj = getattr(model.architect, feature)
except AttributeError:
raise FeatureUninstallError(
current=feature,
model=model.__name__,
allowed=[name for name, obj in model.architect.__dict__.items() if isinstance(obj, BaseFeature)])
# The concept of "unbound methods" has been removed from Python 3. When accessing a method
# from a class, we now get a plain function object. This is what the isfunction check for
methods = inspect.getmembers(model, predicate=lambda m: inspect.isfunction(m) or inspect.ismethod(m))
for name, method in methods:
if getattr(method, 'is_decorated', False):
setattr(model, name, method.original)
delattr(model.architect, feature) # TODO: prohibit uninstall if there are dependant features
for dependency in feature_obj.dependencies:
self.deinit_feature(dependency, model)
| 1,483
| 3
| 122
|
9c1d726650bd473804e5a039a8c9a56480255dc6
| 1,144
|
py
|
Python
|
Machine-Learning-A-Z-Udemy-master/Machine Learning A-Z Template Folder/Part 1 - Data Preprocessing/Mpython_working.py
|
CT83/Independent-Coursework
|
5c7f93a7e05e64b13cb821f603efc54c92ad96c6
|
[
"Apache-2.0"
] | null | null | null |
Machine-Learning-A-Z-Udemy-master/Machine Learning A-Z Template Folder/Part 1 - Data Preprocessing/Mpython_working.py
|
CT83/Independent-Coursework
|
5c7f93a7e05e64b13cb821f603efc54c92ad96c6
|
[
"Apache-2.0"
] | null | null | null |
Machine-Learning-A-Z-Udemy-master/Machine Learning A-Z Template Folder/Part 1 - Data Preprocessing/Mpython_working.py
|
CT83/Independent-Coursework
|
5c7f93a7e05e64b13cb821f603efc54c92ad96c6
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Importing the Libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the Data
dataset=pd.read_csv('Data.csv')
X = dataset.iloc[:,:-1].values
y = dataset.iloc[:,3].values
# Taking care of missing Data
from sklearn.preprocessing import Imputer
imputer = Imputer(missing_values='NaN', strategy='mean', axis=0)
imputer = imputer.fit(X[:,1:3])
X[:,1:3] = imputer.transform(X[:,1:3])
# Encoding Categorical Data
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
labelencoder_X = LabelEncoder()
X[:,0] = labelencoder_X.fit_transform(X[:,0])
onehotencoder = OneHotEncoder(categorical_features=[0])
X = onehotencoder.fit_transform(X).toarray()
labelencoder_y = LabelEncoder()
y = labelencoder_y.fit_transform(y)
#Splitting the Data into Training Set and Test Set
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)
# Feature Scaling
from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)
| 30.918919
| 92
| 0.768357
|
# -*- coding: utf-8 -*-
# Importing the Libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the Data
dataset=pd.read_csv('Data.csv')
X = dataset.iloc[:,:-1].values
y = dataset.iloc[:,3].values
# Taking care of missing Data
from sklearn.preprocessing import Imputer
imputer = Imputer(missing_values='NaN', strategy='mean', axis=0)
imputer = imputer.fit(X[:,1:3])
X[:,1:3] = imputer.transform(X[:,1:3])
# Encoding Categorical Data
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
labelencoder_X = LabelEncoder()
X[:,0] = labelencoder_X.fit_transform(X[:,0])
onehotencoder = OneHotEncoder(categorical_features=[0])
X = onehotencoder.fit_transform(X).toarray()
labelencoder_y = LabelEncoder()
y = labelencoder_y.fit_transform(y)
#Splitting the Data into Training Set and Test Set
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)
# Feature Scaling
from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)
| 0
| 0
| 0
|
18411d6cbef85b0fa08f04f819e453cb8daf0028
| 13,542
|
py
|
Python
|
tzdealer/tzdealer/report/customer_age/customer_age.py
|
Lewinta/tzdealer
|
3e6a8f39e16029b217ae84bed806cb79bbc89dbf
|
[
"MIT"
] | null | null | null |
tzdealer/tzdealer/report/customer_age/customer_age.py
|
Lewinta/tzdealer
|
3e6a8f39e16029b217ae84bed806cb79bbc89dbf
|
[
"MIT"
] | null | null | null |
tzdealer/tzdealer/report/customer_age/customer_age.py
|
Lewinta/tzdealer
|
3e6a8f39e16029b217ae84bed806cb79bbc89dbf
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2013, TZCODE SRL and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.utils import cstr, flt, date_diff, nowdate
def get_data(filters):
"""
Return the data that needs to be rendered
"""
fields = get_fields(filters)
conditions = get_conditions(filters)
results = []
pinv_date = " "
sinv_date = " "
if filters.get('from_date'):
sinv_date = sinv_date + " AND `tabSales Invoice`.posting_date >= '{}' ".format(filters.get('from_date'))
pinv_date = pinv_date + " AND `tabPayment Entry`.posting_date >= '{}' ".format(filters.get('from_date'))
if filters.get('to_date'):
sinv_date = sinv_date + " AND `tabSales Invoice`.posting_date <= '{}' ".format(filters.get('to_date'))
pinv_date = pinv_date + " AND `tabPayment Entry`.posting_date <= '{}' ".format(filters.get('to_date'))
data = frappe.db.sql("""
Select
{fields}
From
`tabSales Invoice`
Inner Join
`tabSales Invoice Item`
On
`tabSales Invoice`.name = `tabSales Invoice Item`.parent
And
`tabSales Invoice`.docstatus = 1
{sinv_date}
Inner Join
`tabItem`
On
`tabSales Invoice Item`.item_code = `tabItem`.item_code
Left Join
`tabSales Taxes and Charges`
On
`tabSales Invoice`.name = `tabSales Taxes and Charges`.parent
Left Join
`tabTax Segment Item`
On
`tabSales Taxes and Charges`.account_head = `tabTax Segment Item`.tax
Left Join
`tabPayment Entry Reference`
On
`tabPayment Entry Reference`.reference_name = `tabSales Invoice`.name
And
`tabPayment Entry Reference`.docstatus = 1
Left Join
`tabPayment Entry`
On
`tabPayment Entry Reference`.parent = `tabPayment Entry`.name
And
`tabPayment Entry`.docstatus = 1
{pinv_date}
Left Join
`tabAddress`
On
`tabItem`.location = `tabAddress`.name
Left Join
`tabDelivery Checklist`
On
`tabDelivery Checklist`.name = `tabItem`.name
Where
{conditions}
Group By
`tabSales Invoice`.name, `tabPayment Entry Reference`.name
Order By
`tabSales Invoice`.name, `tabPayment Entry`.name
""".format(
fields=fields,
sinv_date=sinv_date,
pinv_date=pinv_date,
conditions=conditions or "1 = 1"),
filters, as_dict=True, debug=False)
last_inv = ''
vim = ''
entry = ''
pay_date = ''
mode = ''
for row in data:
total_costs = flt(row.pinv_price) + flt(row.fee) + flt(row.transport) + \
flt(row.delivery) + flt(row.parts) + flt(row.repair) + flt(row.others)
vim_number = row.cont_vim.split('-')[0] if row.cont_vim and '-' in row.cont_vim else row.vim_number
details = "-"
if row.item_type == "Vehicles":
details = "{} {} {} {}".format(row.make or "", row.model or "", row.exterior_color or "", row.year or "")
if row.item_type == "Containers":
details = "{} {}".format(row.booking_no or "", row.container_no or "")
if row.item_type == "Vehicle Parts":
details = row.part_type
if row.item_type == "Services":
details = row.item_type
if last_inv != row.sinv_name or vim_number != vim or entry != row.payment_entry:
paid_arr = [flt(x.allocated_amount) for x in filter(lambda x, n=row.sinv_name : x.get('sinv_name') == n and x.get("payment_type") == "Receive", data)]
returns_arr = [flt(x.allocated_amount) for x in filter(lambda x, n=row.sinv_name : x.get('sinv_name') == n and x.get("payment_type") == "Pay", data)]
total_paid = sum(paid_arr) if paid_arr else .00
total_return = sum(returns_arr) if returns_arr else .00
final_sale = flt(row.base_grand_total) + flt(row.credit_note)
if filters.get('print_customer'):
results.append(
(
row.customer ,
vim_number,
details,
row.base_grand_total if last_inv != row.sinv_name else .00,
row.credit_note if last_inv != row.sinv_name else '',
final_sale if last_inv != row.sinv_name and row.is_return == 0 else 0, # Final Sale
row.p_posting_date if entry != row.payment_entry or mode != row.mode_of_payment or pay_date != row.p_posting_date else '-',
row.mode_of_payment if entry != row.payment_entry or mode != row.mode_of_payment or pay_date != row.p_posting_date else ' ',
row.allocated_amount if last_inv != row.sinv_name or entry != row.payment_entry else .00,
total_paid if last_inv != row.sinv_name else .00,
total_return if last_inv != row.sinv_name else .00, #Refund
row.outstanding_amount if last_inv != row.sinv_name else .00,
)
)
else:
results.append(
(
row.company,
row.location,
row.item_code,
row.item_type, #Item Type.
row.customer ,
# row.customer if last_inv != row.sinv_name else '',
vim_number,
details,
# row.due_date if last_inv != row.sinv_name else '',
row.sinv_date if last_inv != row.sinv_name else '',
date_diff(nowdate(), row.sinv_date) if last_inv != row.sinv_name else '',
# row.net_total if last_inv != row.sinv_name else '',
# row.gst_total if last_inv != row.sinv_name else '',
# row.pst_total if last_inv != row.sinv_name else '',
row.base_grand_total if last_inv != row.sinv_name else .00,
row.credit_note if last_inv != row.sinv_name else '',
final_sale if last_inv != row.sinv_name and row.is_return == 0 else 0, # Final Sale
# row.grand_total if last_inv != row.sinv_name and row.currency == "USD" else .00,
row.p_posting_date if entry != row.payment_entry or mode != row.mode_of_payment or pay_date != row.p_posting_date else '-',
row.mode_of_payment if entry != row.payment_entry or mode != row.mode_of_payment or pay_date != row.p_posting_date else ' ',
row.allocated_amount if last_inv != row.sinv_name or entry != row.payment_entry else .00,
# flt(row.grand_total) - flt(row.outstanding_amount) if last_inv != row.sinv_name else .00, # Total Paid
total_paid if last_inv != row.sinv_name else .00,
total_return if last_inv != row.sinv_name else .00, #Refund
row.outstanding_amount if last_inv != row.sinv_name else .00,
row.item_code, # Notes
row.payment_entry,
row.sinv_name if last_inv != row.sinv_name else '',
row.checklist,
row.status,
row.title_status,
row.bl,
)
)
else:
if filters.get('customer_print'):
results.append(
(
"", # Customer
"", # Vim Number
"", # Details
"", # Total Sale
"", # Credit Note
"", # Final Sale
row.p_posting_date, # Pay Date
row.mode_of_payment, # Payment Type
row.allocated_amount, # Breakdown
row.payment_entry, # Payment Entry
row.bl
)
)
else:
results.append(
(
"", # Company
"", # Location
"", # Stock No.
"", #Item Type
"", # Customer
"", # Vim Number
"", # Details
# "", # Due Date
"", # Inv Date
"", # Age
"", # Total Sale
"", # Credit Note
"", # Final Sale
# "", # Sale N/ Total
# "", # GST
# "", # PST
row.p_posting_date, # Pay Date
row.mode_of_payment, # Payment Type
row.allocated_amount, # Breakdown
"", # Total Paid
"", # Refund
"", # Outstanding
" ", # Notes
row.payment_entry, # Payment Entry
"", # Checklist
"", # Status.
"", # Title Status.
# "", # GPrice
row.bl
)
)
last_inv = row.sinv_name
vim = vim_number
entry = row.payment_entry
pay_date = row.p_posting_date
mode = row.mode_of_payment
return results
def get_conditions(filters):
"""
Return sql conditions ready to use in query
NOTE: Value is mandatory if condition_type == value
"""
company = frappe.get_value("User Permission", {
"user":frappe.session.user,
"allow":"Company",
}, "for_value")
conditions = [("Item", "item_type", "!=", "Services")]
if filters.get('company'):
conditions.append(
("Sales Invoice", "company", "=", filters.get('company'))
)
if filters.get('customer'):
conditions.append(
("Sales Invoice", "customer", "=", filters.get('customer'))
)
if filters.get('payment_status') == "Unpaid Only":
conditions.append(
("Sales Invoice", "outstanding_amount", ">", 0)
)
if filters.get('payment_status') == "Paid Only":
conditions.append(
("Sales Invoice", "outstanding_amount", "=", 0)
)
if filters.get('item_code'):
conditions.append(
("Sales Invoice Item", "item_code", "=", filters.get('item_code'))
)
sql_conditions = []
if not conditions:
return sql_conditions
for doctype, fieldname, compare, value in conditions:
if fieldname == '-':
continue
if value == "NULL":
sql_condition = "`tab{doctype}`.`{fieldname}` {compare} {value}" \
.format(doctype=doctype, fieldname=fieldname, compare=compare,
value=value)
else:
sql_condition = "`tab{doctype}`.`{fieldname}` {compare} '{value}'" \
.format(doctype=doctype, fieldname=fieldname, compare=compare,
value=value)
sql_conditions.append(sql_condition)
# frappe.errprint(conditions)
return " And ".join(sql_conditions)
def get_fields(filters):
"""
Return sql fields ready to be used on query
"""
fields = (
("Sales Invoice", "company"),
("CONCAT(`tabItem`._default_supplier, ' - ', `tabAddress`.city, ', ', `tabAddress`.state) as location"),
("Sales Invoice Item", "item_code"),
("Item", "vim_number"),
("Item", "make"),
("Item", "model"),
("Item", "item_type"),
("Item", "part_type"),
("Item", "year"),
("Item", "exterior_color"),
("Delivery Checklist", "status", "checklist"),
("Sales Invoice Item", "vim_number", "cont_vim"),
("Sales Invoice Item", "item_name"),
("Sales Invoice", "due_date", "due_date"),
("Sales Invoice", "posting_date", "sinv_date"),
("Sales Invoice", "customer"),
("Sales Invoice", "invoice_type"),
("Sales Invoice", "net_total"),
("""
SUM(
IF(
`tabTax Segment Item`.parent = 'GST',
IFNULL(`tabSales Taxes and Charges`.tax_amount, 0), 0
)
) as gst_total
"""
),
("""
SUM(
IF(
`tabTax Segment Item`.parent = 'PST',
IFNULL(`tabSales Taxes and Charges`.tax_amount, 0), 0
)
) as pst_total
"""
),
("Sales Invoice", "currency"),
("Sales Invoice", "base_grand_total"),
("Sales Invoice", "grand_total"),
("Sales Invoice", "is_return"),
("Payment Entry", "posting_date", "p_posting_date"),
("""(SELECT SUM(b.grand_total) FROM `tabSales Invoice` as b WHERE b.is_return = 1 and b.docstatus = 1 and b.return_against = `tabSales Invoice`.name ) as credit_note"""),
("Payment Entry", "mode_of_payment"),
("Payment Entry", "payment_type"),
("Payment Entry Reference", "allocated_amount"),
("Sales Invoice", "outstanding_amount"),
("Payment Entry Reference", "parent", "payment_entry"),
("Sales Invoice", "name", "sinv_name"),
("Sales Invoice Item", "gprice"),
("Item", "status"),
("Item", "title_status"),
("Item", "bl"),
)
sql_fields = []
for args in fields:
sql_field = get_field(args)
sql_fields.append(sql_field)
# frappe.errprint(", ".join(sql_fields))
return ", ".join(sql_fields)
| 30.431461
| 172
| 0.628563
|
# Copyright (c) 2013, TZCODE SRL and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.utils import cstr, flt, date_diff, nowdate
def execute(filters=None):
return get_columns(filters), get_data(filters)
def get_columns(filters):
columns = ''
if filters.get("print_customer"):
columns = (
("Customer", "Link/Customer", 130),
("Vim Number", "Data", 150),
("Details", "Data", 250),
("Total Sale", "Currency", 90),
("Credit Note", "Currency", 90),
("Final Sale", "Currency", 90),
("Pay Date", "Date", 90),
("Payment Type", "Data", 100),
("Breakdown", "Currency", 100),
("Total Paid", "Currency", 90),
("Refund", "Currency", 90),
("Outstanding", "Currency", 100),
)
else:
columns = (
("Company", "Data", 120),
("S. Location", "Data", 230),
("Stock No.", "Link/Item", 110),
("Item Type", "Data", 100),
("Customer", "Link/Customer", 130),
("Vim Number", "Data", 150),
("Details", "Data", 250),
# ("Model", "Data", 100),
# ("Year", "Data", 60),
# ("Color", "Data", 70),
# ("Due Date", "Date", 90),
("Inv. Date", "Date", 90),
("Age", "Data", 90),
# ("Sale N/Total", "Currency", 100),
# ("GST", "Currency", 100),
# ("PST", "Currency", 100),
("Total Sale", "Currency", 90),
("Credit Note", "Currency", 90),
("Final Sale", "Currency", 90),
# ("Sale G/Total USD", "Currency", 150),
("Pay Date", "Date", 90),
("Payment Type", "Data", 100),
("Breakdown", "Currency", 100),
("Total Paid", "Currency", 90),
("Refund", "Currency", 90),
("Outstanding", "Currency", 100),
("Notes", "Data", 100),
("Payment Entry", "Link/Payment Entry", 100),
("Sales Inv.", "Link/Sales Invoice", 100),
# ("G Price", "Currency", 100),
("Checklist", "Data", 90),
("VEH Status", "Data", 100),
("Title Status", "Data", 100),
("BL", "Data", 90),
)
formatted_columns = []
for label, fieldtype, width in columns:
formatted_columns.append(
get_formatted_column(label, fieldtype, width)
)
return formatted_columns
def get_data(filters):
"""
Return the data that needs to be rendered
"""
fields = get_fields(filters)
conditions = get_conditions(filters)
results = []
pinv_date = " "
sinv_date = " "
if filters.get('from_date'):
sinv_date = sinv_date + " AND `tabSales Invoice`.posting_date >= '{}' ".format(filters.get('from_date'))
pinv_date = pinv_date + " AND `tabPayment Entry`.posting_date >= '{}' ".format(filters.get('from_date'))
if filters.get('to_date'):
sinv_date = sinv_date + " AND `tabSales Invoice`.posting_date <= '{}' ".format(filters.get('to_date'))
pinv_date = pinv_date + " AND `tabPayment Entry`.posting_date <= '{}' ".format(filters.get('to_date'))
data = frappe.db.sql("""
Select
{fields}
From
`tabSales Invoice`
Inner Join
`tabSales Invoice Item`
On
`tabSales Invoice`.name = `tabSales Invoice Item`.parent
And
`tabSales Invoice`.docstatus = 1
{sinv_date}
Inner Join
`tabItem`
On
`tabSales Invoice Item`.item_code = `tabItem`.item_code
Left Join
`tabSales Taxes and Charges`
On
`tabSales Invoice`.name = `tabSales Taxes and Charges`.parent
Left Join
`tabTax Segment Item`
On
`tabSales Taxes and Charges`.account_head = `tabTax Segment Item`.tax
Left Join
`tabPayment Entry Reference`
On
`tabPayment Entry Reference`.reference_name = `tabSales Invoice`.name
And
`tabPayment Entry Reference`.docstatus = 1
Left Join
`tabPayment Entry`
On
`tabPayment Entry Reference`.parent = `tabPayment Entry`.name
And
`tabPayment Entry`.docstatus = 1
{pinv_date}
Left Join
`tabAddress`
On
`tabItem`.location = `tabAddress`.name
Left Join
`tabDelivery Checklist`
On
`tabDelivery Checklist`.name = `tabItem`.name
Where
{conditions}
Group By
`tabSales Invoice`.name, `tabPayment Entry Reference`.name
Order By
`tabSales Invoice`.name, `tabPayment Entry`.name
""".format(
fields=fields,
sinv_date=sinv_date,
pinv_date=pinv_date,
conditions=conditions or "1 = 1"),
filters, as_dict=True, debug=False)
last_inv = ''
vim = ''
entry = ''
pay_date = ''
mode = ''
for row in data:
total_costs = flt(row.pinv_price) + flt(row.fee) + flt(row.transport) + \
flt(row.delivery) + flt(row.parts) + flt(row.repair) + flt(row.others)
vim_number = row.cont_vim.split('-')[0] if row.cont_vim and '-' in row.cont_vim else row.vim_number
details = "-"
if row.item_type == "Vehicles":
details = "{} {} {} {}".format(row.make or "", row.model or "", row.exterior_color or "", row.year or "")
if row.item_type == "Containers":
details = "{} {}".format(row.booking_no or "", row.container_no or "")
if row.item_type == "Vehicle Parts":
details = row.part_type
if row.item_type == "Services":
details = row.item_type
if last_inv != row.sinv_name or vim_number != vim or entry != row.payment_entry:
paid_arr = [flt(x.allocated_amount) for x in filter(lambda x, n=row.sinv_name : x.get('sinv_name') == n and x.get("payment_type") == "Receive", data)]
returns_arr = [flt(x.allocated_amount) for x in filter(lambda x, n=row.sinv_name : x.get('sinv_name') == n and x.get("payment_type") == "Pay", data)]
total_paid = sum(paid_arr) if paid_arr else .00
total_return = sum(returns_arr) if returns_arr else .00
final_sale = flt(row.base_grand_total) + flt(row.credit_note)
if filters.get('print_customer'):
results.append(
(
row.customer ,
vim_number,
details,
row.base_grand_total if last_inv != row.sinv_name else .00,
row.credit_note if last_inv != row.sinv_name else '',
final_sale if last_inv != row.sinv_name and row.is_return == 0 else 0, # Final Sale
row.p_posting_date if entry != row.payment_entry or mode != row.mode_of_payment or pay_date != row.p_posting_date else '-',
row.mode_of_payment if entry != row.payment_entry or mode != row.mode_of_payment or pay_date != row.p_posting_date else ' ',
row.allocated_amount if last_inv != row.sinv_name or entry != row.payment_entry else .00,
total_paid if last_inv != row.sinv_name else .00,
total_return if last_inv != row.sinv_name else .00, #Refund
row.outstanding_amount if last_inv != row.sinv_name else .00,
)
)
else:
results.append(
(
row.company,
row.location,
row.item_code,
row.item_type, #Item Type.
row.customer ,
# row.customer if last_inv != row.sinv_name else '',
vim_number,
details,
# row.due_date if last_inv != row.sinv_name else '',
row.sinv_date if last_inv != row.sinv_name else '',
date_diff(nowdate(), row.sinv_date) if last_inv != row.sinv_name else '',
# row.net_total if last_inv != row.sinv_name else '',
# row.gst_total if last_inv != row.sinv_name else '',
# row.pst_total if last_inv != row.sinv_name else '',
row.base_grand_total if last_inv != row.sinv_name else .00,
row.credit_note if last_inv != row.sinv_name else '',
final_sale if last_inv != row.sinv_name and row.is_return == 0 else 0, # Final Sale
# row.grand_total if last_inv != row.sinv_name and row.currency == "USD" else .00,
row.p_posting_date if entry != row.payment_entry or mode != row.mode_of_payment or pay_date != row.p_posting_date else '-',
row.mode_of_payment if entry != row.payment_entry or mode != row.mode_of_payment or pay_date != row.p_posting_date else ' ',
row.allocated_amount if last_inv != row.sinv_name or entry != row.payment_entry else .00,
# flt(row.grand_total) - flt(row.outstanding_amount) if last_inv != row.sinv_name else .00, # Total Paid
total_paid if last_inv != row.sinv_name else .00,
total_return if last_inv != row.sinv_name else .00, #Refund
row.outstanding_amount if last_inv != row.sinv_name else .00,
row.item_code, # Notes
row.payment_entry,
row.sinv_name if last_inv != row.sinv_name else '',
row.checklist,
row.status,
row.title_status,
row.bl,
)
)
else:
if filters.get('customer_print'):
results.append(
(
"", # Customer
"", # Vim Number
"", # Details
"", # Total Sale
"", # Credit Note
"", # Final Sale
row.p_posting_date, # Pay Date
row.mode_of_payment, # Payment Type
row.allocated_amount, # Breakdown
row.payment_entry, # Payment Entry
row.bl
)
)
else:
results.append(
(
"", # Company
"", # Location
"", # Stock No.
"", #Item Type
"", # Customer
"", # Vim Number
"", # Details
# "", # Due Date
"", # Inv Date
"", # Age
"", # Total Sale
"", # Credit Note
"", # Final Sale
# "", # Sale N/ Total
# "", # GST
# "", # PST
row.p_posting_date, # Pay Date
row.mode_of_payment, # Payment Type
row.allocated_amount, # Breakdown
"", # Total Paid
"", # Refund
"", # Outstanding
" ", # Notes
row.payment_entry, # Payment Entry
"", # Checklist
"", # Status.
"", # Title Status.
# "", # GPrice
row.bl
)
)
last_inv = row.sinv_name
vim = vim_number
entry = row.payment_entry
pay_date = row.p_posting_date
mode = row.mode_of_payment
return results
def get_formatted_column(label, fieldtype, width):
# [label]:[fieldtype/Options]:width
parts = (
_(label),
fieldtype,
cstr(width)
)
return ":".join(parts)
def get_conditions(filters):
"""
Return sql conditions ready to use in query
NOTE: Value is mandatory if condition_type == value
"""
company = frappe.get_value("User Permission", {
"user":frappe.session.user,
"allow":"Company",
}, "for_value")
conditions = [("Item", "item_type", "!=", "Services")]
if filters.get('company'):
conditions.append(
("Sales Invoice", "company", "=", filters.get('company'))
)
if filters.get('customer'):
conditions.append(
("Sales Invoice", "customer", "=", filters.get('customer'))
)
if filters.get('payment_status') == "Unpaid Only":
conditions.append(
("Sales Invoice", "outstanding_amount", ">", 0)
)
if filters.get('payment_status') == "Paid Only":
conditions.append(
("Sales Invoice", "outstanding_amount", "=", 0)
)
if filters.get('item_code'):
conditions.append(
("Sales Invoice Item", "item_code", "=", filters.get('item_code'))
)
sql_conditions = []
if not conditions:
return sql_conditions
for doctype, fieldname, compare, value in conditions:
if fieldname == '-':
continue
if value == "NULL":
sql_condition = "`tab{doctype}`.`{fieldname}` {compare} {value}" \
.format(doctype=doctype, fieldname=fieldname, compare=compare,
value=value)
else:
sql_condition = "`tab{doctype}`.`{fieldname}` {compare} '{value}'" \
.format(doctype=doctype, fieldname=fieldname, compare=compare,
value=value)
sql_conditions.append(sql_condition)
# frappe.errprint(conditions)
return " And ".join(sql_conditions)
def get_fields(filters):
"""
Return sql fields ready to be used on query
"""
fields = (
("Sales Invoice", "company"),
("CONCAT(`tabItem`._default_supplier, ' - ', `tabAddress`.city, ', ', `tabAddress`.state) as location"),
("Sales Invoice Item", "item_code"),
("Item", "vim_number"),
("Item", "make"),
("Item", "model"),
("Item", "item_type"),
("Item", "part_type"),
("Item", "year"),
("Item", "exterior_color"),
("Delivery Checklist", "status", "checklist"),
("Sales Invoice Item", "vim_number", "cont_vim"),
("Sales Invoice Item", "item_name"),
("Sales Invoice", "due_date", "due_date"),
("Sales Invoice", "posting_date", "sinv_date"),
("Sales Invoice", "customer"),
("Sales Invoice", "invoice_type"),
("Sales Invoice", "net_total"),
("""
SUM(
IF(
`tabTax Segment Item`.parent = 'GST',
IFNULL(`tabSales Taxes and Charges`.tax_amount, 0), 0
)
) as gst_total
"""
),
("""
SUM(
IF(
`tabTax Segment Item`.parent = 'PST',
IFNULL(`tabSales Taxes and Charges`.tax_amount, 0), 0
)
) as pst_total
"""
),
("Sales Invoice", "currency"),
("Sales Invoice", "base_grand_total"),
("Sales Invoice", "grand_total"),
("Sales Invoice", "is_return"),
("Payment Entry", "posting_date", "p_posting_date"),
("""(SELECT SUM(b.grand_total) FROM `tabSales Invoice` as b WHERE b.is_return = 1 and b.docstatus = 1 and b.return_against = `tabSales Invoice`.name ) as credit_note"""),
("Payment Entry", "mode_of_payment"),
("Payment Entry", "payment_type"),
("Payment Entry Reference", "allocated_amount"),
("Sales Invoice", "outstanding_amount"),
("Payment Entry Reference", "parent", "payment_entry"),
("Sales Invoice", "name", "sinv_name"),
("Sales Invoice Item", "gprice"),
("Item", "status"),
("Item", "title_status"),
("Item", "bl"),
)
sql_fields = []
for args in fields:
sql_field = get_field(args)
sql_fields.append(sql_field)
# frappe.errprint(", ".join(sql_fields))
return ", ".join(sql_fields)
def get_field(args):
if len(args) == 2:
doctype, fieldname = args
alias = fieldname
elif len(args) == 3:
doctype, fieldname, alias = args
else:
return args
sql_field = "`tab{doctype}`.`{fieldname}` as {alias}" \
.format(doctype=doctype, fieldname=fieldname, alias=alias)
return sql_field
| 2,287
| 0
| 92
|
6469e5d0d5c80ef3183e5f71c4afdb4b3f3f36a4
| 934
|
py
|
Python
|
setup.py
|
bertsky/ocrd_vandalize
|
0cb84fb387d0145d386c2a425f20beee5d355b3d
|
[
"Apache-2.0"
] | 8
|
2021-12-15T14:45:11.000Z
|
2022-02-09T16:51:20.000Z
|
setup.py
|
bertsky/ocrd_vandalize
|
0cb84fb387d0145d386c2a425f20beee5d355b3d
|
[
"Apache-2.0"
] | 1
|
2022-01-14T10:58:27.000Z
|
2022-01-14T10:58:27.000Z
|
setup.py
|
bertsky/ocrd_vandalize
|
0cb84fb387d0145d386c2a425f20beee5d355b3d
|
[
"Apache-2.0"
] | 2
|
2022-01-14T11:06:54.000Z
|
2022-01-21T14:54:10.000Z
|
# -*- coding: utf-8 -*-
import codecs
import json
from setuptools import setup, find_packages
#
with codecs.open('README.md', encoding='utf-8') as f:
README = f.read()
with open('./ocrd-tool.json', 'r') as f:
version = json.load(f)['version']
setup(
name='ocrd_vandalize',
version=version,
description='Demo processor to illustrate the OCR-D Pytonn API',
long_description=README,
long_description_content_type='text/markdown',
author='OCR-D',
author_email='info@ocr-d.de',
url='https://github.com/OCR-D/ocrd_vandalize',
license='Apache License 2.0',
packages=find_packages(exclude=('tests', 'docs')),
include_package_data=True,
install_requires=open('requirements.txt').read().split('\n'),
package_data={
'': ['*.json', '*.ttf'],
},
entry_points={
'console_scripts': [
'ocrd-vandalize=ocrd_vandalize.ocrd_cli:cli',
]
},
)
| 26.685714
| 68
| 0.638116
|
# -*- coding: utf-8 -*-
import codecs
import json
from setuptools import setup, find_packages
#
with codecs.open('README.md', encoding='utf-8') as f:
README = f.read()
with open('./ocrd-tool.json', 'r') as f:
version = json.load(f)['version']
setup(
name='ocrd_vandalize',
version=version,
description='Demo processor to illustrate the OCR-D Pytonn API',
long_description=README,
long_description_content_type='text/markdown',
author='OCR-D',
author_email='info@ocr-d.de',
url='https://github.com/OCR-D/ocrd_vandalize',
license='Apache License 2.0',
packages=find_packages(exclude=('tests', 'docs')),
include_package_data=True,
install_requires=open('requirements.txt').read().split('\n'),
package_data={
'': ['*.json', '*.ttf'],
},
entry_points={
'console_scripts': [
'ocrd-vandalize=ocrd_vandalize.ocrd_cli:cli',
]
},
)
| 0
| 0
| 0
|
77f7cfaaa0a25a3602dcfef15eb55e962868b137
| 142
|
py
|
Python
|
exercise/test4.py
|
LeeBeral/python
|
9f0d360d69ee5245e3ef13a9dc9fc666374587a4
|
[
"MIT"
] | null | null | null |
exercise/test4.py
|
LeeBeral/python
|
9f0d360d69ee5245e3ef13a9dc9fc666374587a4
|
[
"MIT"
] | null | null | null |
exercise/test4.py
|
LeeBeral/python
|
9f0d360d69ee5245e3ef13a9dc9fc666374587a4
|
[
"MIT"
] | null | null | null |
dic = {'name':'lili','age':12}
print(dic)
del dic['name']
print(dic)
dic2 ={'name':'mam'}
print(dic2)
dic.update(dic2)
print(dic)
| 12.909091
| 31
| 0.584507
|
dic = {'name':'lili','age':12}
print(dic)
del dic['name']
print(dic)
dic2 ={'name':'mam'}
print(dic2)
dic.update(dic2)
print(dic)
| 0
| 0
| 0
|
084a7a4364ba109593d9c831baac3d5b551aa8da
| 3,182
|
py
|
Python
|
hacktheback/rest/forms/serializers/form.py
|
hackthevalley/hack-the-back
|
a418f2d2751656fed76d0b8c95c8e2a060525e78
|
[
"MIT"
] | null | null | null |
hacktheback/rest/forms/serializers/form.py
|
hackthevalley/hack-the-back
|
a418f2d2751656fed76d0b8c95c8e2a060525e78
|
[
"MIT"
] | null | null | null |
hacktheback/rest/forms/serializers/form.py
|
hackthevalley/hack-the-back
|
a418f2d2751656fed76d0b8c95c8e2a060525e78
|
[
"MIT"
] | null | null | null |
from django.db import transaction
from rest_framework import serializers
from hacktheback.forms.models import Form, Question, QuestionOption
| 29.738318
| 79
| 0.562539
|
from django.db import transaction
from rest_framework import serializers
from hacktheback.forms.models import Form, Question, QuestionOption
class QuestionOptionSerializer(serializers.ModelSerializer):
class Meta:
model = QuestionOption
exclude = ("question",)
def create(self, validated_data):
"""
Create a new :model: `forms.QuestionOption` object.
"""
question = self.context["question"]
return QuestionOption.objects.create(
question=question, **validated_data
)
class QuestionSerializer(serializers.ModelSerializer):
options = QuestionOptionSerializer(
many=True, required=False, read_only=True
)
class Meta:
model = Question
fields = (
"id",
"order",
"label",
"type",
"description",
"placeholder",
"required",
"default_answer",
"options",
)
def to_representation(self, instance):
"""
Set `options` to return a null value if the type of the instance is not
an option type.
"""
ret = super().to_representation(instance)
if instance.type not in Question.OPTION_TYPES:
ret["options"] = None
return ret
def create(self, validated_data):
"""
Create a new :model: `forms.Question` object.
"""
form = self.context["form"]
return Question.objects.create(form=form, **validated_data)
def update(self, instance, validated_data):
"""
Update a :model: `forms.Question` object.
"""
with transaction.atomic():
# Save Question object
instance.label = validated_data.get("label", instance.label)
prev_type = instance.type
instance.type = validated_data.get("type", instance.type)
instance.description = validated_data.get(
"description", instance.description
)
instance.placeholder = validated_data.get(
"placeholder", instance.placeholder
)
instance.required = validated_data.get(
"required", instance.required
)
instance.default_answer = validated_data.get(
"default_answer", instance.default_answer
)
instance.save()
if (
instance.type in Question.NON_OPTION_TYPES
and prev_type in Question.OPTION_TYPES
):
# Delete previous options if the question was an option
# type but no longer is that type anymore
QuestionOption.objects.filter(question=instance).delete()
return
class FormSerializer(serializers.ModelSerializer):
questions = QuestionSerializer(many=True, read_only=True)
class Meta:
model = Form
fields = (
"id",
"title",
"description",
"type",
"is_draft",
"questions",
"start_at",
"end_at",
"created_at",
)
| 0
| 2,968
| 69
|
e49d30b97a80af7ec73993c80fa023f88465b849
| 298
|
py
|
Python
|
data/kids/nofz/delta_z_correlated_to_uncorrelated.py
|
KiDS-WL/Cat_to_Obs_K1000_P1
|
0de7f79cab150416859ffe58ac2d0f5659aedb5d
|
[
"MIT"
] | 7
|
2020-11-18T12:58:03.000Z
|
2021-07-01T08:54:29.000Z
|
data/kids/nofz/delta_z_correlated_to_uncorrelated.py
|
KiDS-WL/Cat_to_Obs_K1000_P1
|
0de7f79cab150416859ffe58ac2d0f5659aedb5d
|
[
"MIT"
] | null | null | null |
data/kids/nofz/delta_z_correlated_to_uncorrelated.py
|
KiDS-WL/Cat_to_Obs_K1000_P1
|
0de7f79cab150416859ffe58ac2d0f5659aedb5d
|
[
"MIT"
] | 3
|
2020-12-09T13:30:22.000Z
|
2022-03-02T01:40:13.000Z
|
import numpy as np
filename='deltaz.asc'
file=open(filename)
delta_z=np.loadtxt(file,comments='#')
filename='SOM_cov_multiplied.asc'
file=open(filename)
cov_z=np.loadtxt(file,comments='#')
L = np.linalg.cholesky(cov_z)
inv_L = np.linalg.inv(L)
delta_x = np.dot(inv_L,delta_z)
print(delta_x)
| 16.555556
| 37
| 0.744966
|
import numpy as np
filename='deltaz.asc'
file=open(filename)
delta_z=np.loadtxt(file,comments='#')
filename='SOM_cov_multiplied.asc'
file=open(filename)
cov_z=np.loadtxt(file,comments='#')
L = np.linalg.cholesky(cov_z)
inv_L = np.linalg.inv(L)
delta_x = np.dot(inv_L,delta_z)
print(delta_x)
| 0
| 0
| 0
|
69d6b18ed07e94f12372de95034df57e7fe40895
| 12,986
|
py
|
Python
|
Main/fase_1.py
|
DouglasAndC/GAME-COLORANDO-PYGAME
|
b96cc8df2dcfeb5e8a2ed0abb72c7dfb5bde2a00
|
[
"MIT"
] | null | null | null |
Main/fase_1.py
|
DouglasAndC/GAME-COLORANDO-PYGAME
|
b96cc8df2dcfeb5e8a2ed0abb72c7dfb5bde2a00
|
[
"MIT"
] | null | null | null |
Main/fase_1.py
|
DouglasAndC/GAME-COLORANDO-PYGAME
|
b96cc8df2dcfeb5e8a2ed0abb72c7dfb5bde2a00
|
[
"MIT"
] | null | null | null |
import pygame
from pygame.locals import *
from pygame import *
import globals
import pygame as pg
from pygame.mixer import Sound
import time
pg.init()
# definindo cores
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
BLUE = (0, 0, 255)
GREEN = (0, 255, 0)
RED = (255, 0, 0)
ROXO = (127,0,127)
LARANJA = (255, 127, 0)
AMARELO = (255,255,0)
COR1 = (153, 0, 153)
PELE = (251, 230, 226)
CorSelecionada = (0, 255, 0)
global cor1
cor1 = (0, 0, 0)
cont = 1
fase = 0
pygame.init()
pygame.mixer.init()
fundo = pygame.mixer.music.load(globals.get_path() + '\\Sound\\gameplay.mpeg')
click = pygame.mixer.Sound(globals.get_path() + '\\Sound\\click.wav')
pygame.mixer.music.play()
screen = pygame.display.set_mode((800, 700))
# carregando fonte
font = pygame.font.SysFont(None, 55)
pygame.display.set_caption('COLORANDO')
# preenchendo o fundo com preto
screen.fill(PELE)
regras()
menu()
menu_botoes = menu()
while True:
if(cont == 1):
menu()
cor1 = primeiro()
cont= cont + 1
elif(cont == 2):
cor2 = segundo()
cont= cont + 1
pygame.display.flip()
elif(cont == 3):
resultado = misturar(cor1,cor2)
decisao = clicarConfirmarOuExcluir()
if(decisao==1):
if (fase == 0):
if(resultado == GREEN):
fase = fase + 1
cont = 1
else:
cont = 1
elif(fase == 1):
if(resultado == LARANJA):
fase = 2
cont = 1
else:
cont = 1
elif(fase == 2):
if(resultado == ROXO):
fase = 3
cont = 4
else:
cont = 1
else:
cont = 1
else:
cont = 1
elif(cont == 4):
break
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
exit()
import main
| 38.306785
| 110
| 0.495303
|
import pygame
from pygame.locals import *
from pygame import *
import globals
import pygame as pg
from pygame.mixer import Sound
import time
pg.init()
# definindo cores
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
BLUE = (0, 0, 255)
GREEN = (0, 255, 0)
RED = (255, 0, 0)
ROXO = (127,0,127)
LARANJA = (255, 127, 0)
AMARELO = (255,255,0)
COR1 = (153, 0, 153)
PELE = (251, 230, 226)
CorSelecionada = (0, 255, 0)
global cor1
cor1 = (0, 0, 0)
cont = 1
fase = 0
pygame.init()
pygame.mixer.init()
fundo = pygame.mixer.music.load(globals.get_path() + '\\Sound\\gameplay.mpeg')
click = pygame.mixer.Sound(globals.get_path() + '\\Sound\\click.wav')
pygame.mixer.music.play()
screen = pygame.display.set_mode((800, 700))
# carregando fonte
font = pygame.font.SysFont(None, 55)
pygame.display.set_caption('COLORANDO')
# preenchendo o fundo com preto
screen.fill(PELE)
def regras():
flag = True
while True:
if(flag== False):
break
else:
for event in pygame.event.get():
pos = pygame.mouse.get_pos()
pressed1,pressed2,pressed3 = pygame.mouse.get_pressed()
btn0 = botoesMenu(BLACK,300,500,372/2,149/2)
txtFase3 = pygame.image.load(globals.get_path()+'\\View\\fase1\\txtFase1.png').convert_alpha()
screen.fill(PELE)
novo_txtFase3 = pygame.transform.scale(txtFase3,(int(1363/2),int(541/2)))
screen.blit(novo_txtFase3,(60,150))
screen.blit(botao_continuar('btnContinuar'),(300,500))
pygame.display.update()
if event.type == QUIT:
pygame.quit()
exit()
elif btn0.collidepoint(pos) and pressed1:
click.play()
flag = False
break
def botao_continuar(botao):
btn = pygame.image.load(globals.get_path()+'\\View\\corrida\\'+botao+'.png').convert_alpha()
novo_botao = pygame.transform.scale(btn,(int(372/2),int(149/2)))
return novo_botao
def dimensao_botao(botao):
btn = pygame.image.load(globals.get_path()+'\\View\\fase1\\'+botao+'.png').convert_alpha()
novo_botao = pygame.transform.scale(btn,(int(400),int(1500/2)))
return novo_botao
def titulo():
titulo = pygame.image.load(globals.get_path()+'\\View\\fase1\\titulo.png').convert_alpha()
novo_titulo = pygame.transform.scale(titulo,(int(726/2), int(217/2)))
screen.blit(novo_titulo,(210,50))
def torneira():
titulo = pygame.image.load(globals.get_path()+'\\View\\fase1\\torneira.png').convert_alpha()
novo_titulo = pygame.transform.scale(titulo,(int(650), int(500)))
screen.blit(novo_titulo,(-200,0))
def balde():
titulo = pygame.image.load(globals.get_path()+'\\View\\fase1\\balde.png').convert_alpha()
novo_titulo = pygame.transform.scale(titulo,(int(500), int(700)))
screen.blit(novo_titulo,(2,0))
def botoesMenu(color,x,y,width,height):
return pygame.draw.rect(screen, color, [x, y, width, height])
def instrucao():
titulo = pygame.image.load(globals.get_path()+'\\View\\fase1\\txtCorFormada.png').convert_alpha()
screen.blit(titulo,(50,40))
def btnConfirmar():
titulo = pygame.image.load(globals.get_path()+'\\View\\fase1\\btnConfirmar.png').convert_alpha()
novo_titulo = pygame.transform.scale(titulo,(int(115), int(215)))
screen.blit(novo_titulo,(500,495))
def menu():
btn1 = botoesMenu(BLACK,630,240,40,70)#azul
btn2 = botoesMenu(BLACK,630,340,40,70)#amarelo
btn3 = botoesMenu(BLACK,630,440,40,70)#vermelho
btn4 = botoesMenu(BLACK,730,240,40,70)#verde
btn5 = botoesMenu(BLACK,730,340,40,70)#laranja
btn6 = botoesMenu(BLACK,730,440,40,70)#roxo
btn7 = botoesMenu(BLACK,620,580,60,100)#juntar
btn8 = botoesMenu(BLACK,720,560,60,130)#lixo
btn9 = botoesMenu(BLACK,515,560,60,120)#confirma
screen.fill(PELE)
#titulo()
instrucao()
torneira()
balde()
#Quadrados que indicarão a cor a ser desenhada
#screen.blit(dimensao_botao('quad_verde'),(460,-300))
#screen.blit(dimensao_botao('quad_laranja'),(460,-300))
#screen.blit(dimensao_botao('quad_roxo'),(460,-300))
if(fase ==0):
screen.blit(dimensao_botao('quad_verde'),(460,-300))
elif(fase == 1):
screen.blit(dimensao_botao('quad_laranja'),(460,-300))
else:
screen.blit(dimensao_botao('quad_roxo'),(460,-300))
screen.blit(dimensao_botao('btnAzul'),(450,-100))
screen.blit(dimensao_botao('btnAmarelo'),(450,0))
screen.blit(dimensao_botao('btnVermelho'),(450,100))
if(fase >=1):
screen.blit(dimensao_botao('btnVerde'),(550,-100))
if(fase >= 2):
screen.blit(dimensao_botao('btnLaranja'),(550,0))
if(fase >= 3):
screen.blit(dimensao_botao('btnRoxo'),(550,100))
pygame.display.update()
return btn1,btn2,btn3,btn4,btn5,btn6,btn7,btn8,btn9
def derramando1(cor,x, y, widht,height):
aux = 1
while(aux < 365):
botoesMenu(cor,188,313,60,aux)
pygame.display.flip()
aux= aux + 1
botoesMenu(cor,x, y, widht,height)
pygame.display.flip()
def derramando2(cor,x, y, widht,height):
aux = 1
while(aux!= 195):
botoesMenu(cor,188,313,60,aux)
pygame.display.flip()
aux= aux + 1
botoesMenu(cor,x, y, widht,height)
pygame.display.flip()
def primeiro():
while True:
for event in pygame.event.get():
pygame.display.update()
pos = pygame.mouse.get_pos()
pressed1, pressed2, pressed3 = pygame.mouse.get_pressed()
if menu_botoes[0].collidepoint(pos) and pressed1:
derramando1(BLUE,19, 508, 468, 170)
botoesMenu(PELE,188,313,60,195)
pygame.display.flip()
return BLUE
elif menu_botoes[1].collidepoint(pos) and pressed1:
derramando1(AMARELO,19, 508, 468, 170)
botoesMenu(PELE,188,313,60,195)
pygame.display.flip()
return AMARELO
elif menu_botoes[2].collidepoint(pos) and pressed1:
derramando1(RED,19, 508, 468, 170)
botoesMenu(PELE,188,313,60,195)
pygame.display.flip()
return RED
elif menu_botoes[3].collidepoint(pos) and pressed1:
if(fase>= 1):
derramando1(GREEN,19, 508, 468, 170)
botoesMenu(PELE,188,313,60,195)
pygame.display.flip()
return GREEN
elif menu_botoes[4].collidepoint(pos) and pressed1:
if(fase>= 2):
derramando1(LARANJA,19, 508, 468, 170)
botoesMenu(PELE,188,313,60,195)
pygame.display.flip()
return LARANJA
elif menu_botoes[5].collidepoint(pos) and pressed1:
if(fase>= 3):
derramando1(ROXO,19, 508, 468, 170)
botoesMenu(PELE,188,313,60,195)
pygame.display.flip()
return ROXO
if event.type == QUIT:
pygame.quit()
exit()
def segundo():
while True:
for event in pygame.event.get():
pygame.display.update()
pos = pygame.mouse.get_pos()
pressed1, pressed2, pressed3 = pygame.mouse.get_pressed()
if menu_botoes[0].collidepoint(pos) and pressed1:
derramando2(BLUE,19, 373, 468, 170)
botoesMenu(PELE,188,313,60,60)
pygame.display.flip()
return BLUE
elif menu_botoes[1].collidepoint(pos) and pressed1:
derramando2(AMARELO,19, 373, 468, 170)
botoesMenu(PELE,188,313,60,60)
pygame.display.flip()
return AMARELO
elif menu_botoes[2].collidepoint(pos) and pressed1:
derramando2(RED,19, 373, 468, 170)
botoesMenu(PELE,188,313,60,60)
pygame.display.flip()
return RED
elif menu_botoes[3].collidepoint(pos) and pressed1:
if(fase>= 1):
derramando2(GREEN,19, 373, 468, 170)
botoesMenu(PELE,188,313,60,60)
pygame.display.flip()
return GREEN
elif menu_botoes[4].collidepoint(pos) and pressed1:
if(fase>= 2):
derramando2(LARANJA,19, 373, 468, 170)
botoesMenu(PELE,188,313,60,60)
pygame.display.flip()
return LARANJA
elif menu_botoes[5].collidepoint(pos) and pressed1:
if(fase>= 3):
derramando2(ROXO,19, 373, 468, 170)
botoesMenu(PELE,188,313,60,60)
pygame.display.flip()
return ROXO
if event.type == QUIT:
pygame.quit()
exit()
def misturar(cor1,cor2):
screen.blit(dimensao_botao('btnMistura'),(450,250))
while True:
for event in pygame.event.get():
pygame.display.update()
pos = pygame.mouse.get_pos()
pressed1, pressed2, pressed3 = pygame.mouse.get_pressed()
if menu_botoes[6].collidepoint(pos) and pressed1:
resultado = globals.misturar_cores(cor1,cor2)
botoesMenu(resultado,19, 373, 468, 305)
botoesMenu(PELE,620,560,70,120)
pygame.display.update()
return resultado
if event.type == QUIT:
pygame.quit()
exit()
def clicarConfirmarOuExcluir():
botoesMenu(PELE,620,560,70,120)
pygame.display.update()
btnConfirmar()
screen.blit(dimensao_botao('btnExclui'),(550,250))
while True:
for event in pygame.event.get():
pygame.display.update()
pos = pygame.mouse.get_pos()
pressed1, pressed2, pressed3 = pygame.mouse.get_pressed()
if menu_botoes[7].collidepoint(pos) and pressed1:
screen.fill(PELE)
menu()
return 0
elif menu_botoes[8].collidepoint(pos) and pressed1:
screen.fill(PELE)
menu()
return 1
elif event.type == QUIT:
pygame.quit()
exit()
regras()
menu()
menu_botoes = menu()
while True:
if(cont == 1):
menu()
cor1 = primeiro()
cont= cont + 1
elif(cont == 2):
cor2 = segundo()
cont= cont + 1
pygame.display.flip()
elif(cont == 3):
resultado = misturar(cor1,cor2)
decisao = clicarConfirmarOuExcluir()
if(decisao==1):
if (fase == 0):
if(resultado == GREEN):
fase = fase + 1
cont = 1
else:
cont = 1
elif(fase == 1):
if(resultado == LARANJA):
fase = 2
cont = 1
else:
cont = 1
elif(fase == 2):
if(resultado == ROXO):
fase = 3
cont = 4
else:
cont = 1
else:
cont = 1
else:
cont = 1
elif(cont == 4):
break
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
exit()
import main
| 10,312
| 0
| 365
|
146fddc89ca4117d40b10362d56eec7e35ad4254
| 2,442
|
py
|
Python
|
rescale_baseline_density.py
|
sjforeman/RadioFisher
|
fe25f969de9a700c5697168ba9e0d2645c55ed81
|
[
"AFL-3.0"
] | 3
|
2020-12-05T11:28:47.000Z
|
2021-07-09T02:42:21.000Z
|
rescale_baseline_density.py
|
sjforeman/RadioFisher
|
fe25f969de9a700c5697168ba9e0d2645c55ed81
|
[
"AFL-3.0"
] | null | null | null |
rescale_baseline_density.py
|
sjforeman/RadioFisher
|
fe25f969de9a700c5697168ba9e0d2645c55ed81
|
[
"AFL-3.0"
] | 2
|
2021-07-09T02:42:23.000Z
|
2021-11-30T06:37:47.000Z
|
#!/usr/bin/python
"""
Rescale baseline density files, Nring(u), into n(x) = n(u=d/lambda) / lambda^2,
which is approx. const. with frequency (and x = u / nu).
Phil Bull (2014)
"""
import numpy as np
import pylab as P
import scipy.integrate
import os, sys
try:
Ndish = int(sys.argv[1])
infile = sys.argv[2]
outfile = sys.argv[3]
except:
print "Expects 3 arguments: Ndish, infile, outfile"
sys.exit(1)
def process_baseline_file(fname):
"""
Process SKA N(u) files and output n(d), which can then be converted into a
freq.-dep. n(u).
"""
# Extract info. about baseline file
fname_end = fname.split("/")[-1]
tmp = fname_end.split("_")
freq = float(tmp[1]) / 1e6 # Freq., MHz
dec = float(tmp[2][3:]) # Declination, degrees
ts = float(tmp[3].split("sec")[0]) # Single baseline integ. time, in sec
if len(tmp) == 6:
du = float(tmp[5][2:-4]) # u bin width, ~1/sqrt(fov)
else:
du = float(tmp[4][2:-4]) # u bin width, ~1/sqrt(fov)
# Output information about this datafile
print "-"*50
print "Filename:", fname
print "-"*50
print "Ndish: ", Ndish
print "Freq.: ", freq, "MHz"
print "Dec.: ", dec, "deg"
print "t_s: ", ts, "sec"
print "du: ", du
print "-"*50
# Load datafile and convert to density (don't need bin centres; just use edges)
u, Nring = np.genfromtxt(fname).T
n = Nring / (2.*np.pi * u * du) / (24.*3600. / ts) # Eq. 18 of Mario's notes
# Remove leading zeros (or other special values), if any, to ensure
# interpolation has a sharp cut at u_min
minidx = None; jj = -1
while minidx is None:
jj += 1
if (n[jj] != 0.) and (not np.isnan(n[jj])) and (not np.isinf(n[jj])):
minidx = jj
u = u[minidx:]
n = n[minidx:]
# Integrate n(u) to find normalisation (should be N_dish^2)
norm = scipy.integrate.simps(2.*np.pi*n*u, u)
print "n(u) renorm. factor:", 0.5 * Ndish * (Ndish - 1) / norm, "(applied)"
# (Renorm factor should be close to 1 if Ndish is correct)
n *= 0.5 * Ndish * (Ndish - 1) / norm
# Convert to freq.-independent expression, n(x) = n(u) * nu^2,
# where nu is in MHz.
n_x = n * freq**2.
x = u / freq
return x, n_x
# Process input file
x, n_x = process_baseline_file(infile)
# Output to disk
np.savetxt(outfile, np.column_stack((x, n_x)))
print "Done."
| 30.148148
| 83
| 0.581081
|
#!/usr/bin/python
"""
Rescale baseline density files, Nring(u), into n(x) = n(u=d/lambda) / lambda^2,
which is approx. const. with frequency (and x = u / nu).
Phil Bull (2014)
"""
import numpy as np
import pylab as P
import scipy.integrate
import os, sys
try:
Ndish = int(sys.argv[1])
infile = sys.argv[2]
outfile = sys.argv[3]
except:
print "Expects 3 arguments: Ndish, infile, outfile"
sys.exit(1)
def process_baseline_file(fname):
"""
Process SKA N(u) files and output n(d), which can then be converted into a
freq.-dep. n(u).
"""
# Extract info. about baseline file
fname_end = fname.split("/")[-1]
tmp = fname_end.split("_")
freq = float(tmp[1]) / 1e6 # Freq., MHz
dec = float(tmp[2][3:]) # Declination, degrees
ts = float(tmp[3].split("sec")[0]) # Single baseline integ. time, in sec
if len(tmp) == 6:
du = float(tmp[5][2:-4]) # u bin width, ~1/sqrt(fov)
else:
du = float(tmp[4][2:-4]) # u bin width, ~1/sqrt(fov)
# Output information about this datafile
print "-"*50
print "Filename:", fname
print "-"*50
print "Ndish: ", Ndish
print "Freq.: ", freq, "MHz"
print "Dec.: ", dec, "deg"
print "t_s: ", ts, "sec"
print "du: ", du
print "-"*50
# Load datafile and convert to density (don't need bin centres; just use edges)
u, Nring = np.genfromtxt(fname).T
n = Nring / (2.*np.pi * u * du) / (24.*3600. / ts) # Eq. 18 of Mario's notes
# Remove leading zeros (or other special values), if any, to ensure
# interpolation has a sharp cut at u_min
minidx = None; jj = -1
while minidx is None:
jj += 1
if (n[jj] != 0.) and (not np.isnan(n[jj])) and (not np.isinf(n[jj])):
minidx = jj
u = u[minidx:]
n = n[minidx:]
# Integrate n(u) to find normalisation (should be N_dish^2)
norm = scipy.integrate.simps(2.*np.pi*n*u, u)
print "n(u) renorm. factor:", 0.5 * Ndish * (Ndish - 1) / norm, "(applied)"
# (Renorm factor should be close to 1 if Ndish is correct)
n *= 0.5 * Ndish * (Ndish - 1) / norm
# Convert to freq.-independent expression, n(x) = n(u) * nu^2,
# where nu is in MHz.
n_x = n * freq**2.
x = u / freq
return x, n_x
# Process input file
x, n_x = process_baseline_file(infile)
# Output to disk
np.savetxt(outfile, np.column_stack((x, n_x)))
print "Done."
| 0
| 0
| 0
|
8adf03f135b35749475e53a47a588a4b9f8f0a62
| 1,275
|
py
|
Python
|
lesson26.py
|
pingstech/Opencv_Examples2
|
05327af3dbf40e2b8e9f651c8154f45e018518e4
|
[
"MIT"
] | 1
|
2021-09-05T13:14:19.000Z
|
2021-09-05T13:14:19.000Z
|
lesson26.py
|
pingstech/Opencv_Examples2
|
05327af3dbf40e2b8e9f651c8154f45e018518e4
|
[
"MIT"
] | null | null | null |
lesson26.py
|
pingstech/Opencv_Examples2
|
05327af3dbf40e2b8e9f651c8154f45e018518e4
|
[
"MIT"
] | null | null | null |
import cv2
cap=cv2.VideoCapture(0)
ret,frame1=cap.read()
ret,frame2=cap.read()
print(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
print(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
while (cap.isOpened()):
diff=cv2.absdiff(frame1,frame2)
diff=doContour(setFrame(diff),frame1)
cv2.imshow('Detection',frame1)
frame1=frame2
ret,frame2=cap.read()
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
| 30.357143
| 80
| 0.682353
|
import cv2
def setFrame(frame):
grayFrame=cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
blurFrame=cv2.GaussianBlur(grayFrame,(5,5),0)
_,threshFrame=cv2.threshold(blurFrame,20,255,cv2.THRESH_BINARY)
dilatedFrame=cv2.dilate(threshFrame,None,iterations=3)
return dilatedFrame
def doContour(frame,orgFrame,areaSize=1500):
contours,_=cv2.findContours(frame,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
font=cv2.FONT_HERSHEY_SIMPLEX
for contour in contours:
(x,y,w,h)=cv2.boundingRect(contour)
if cv2.contourArea(contour)<areaSize:
continue
cv2.rectangle(orgFrame,(x,y),(x+w,y+h),(0,0,255),2)
cv2.putText(orgFrame,"x value :{}".format(x),(20,50),font,1,(0,255,0),1)
cv2.putText(orgFrame,"y value :{}".format(y),(20,75),font,1,(0,255,0),1)
return frame
cap=cv2.VideoCapture(0)
ret,frame1=cap.read()
ret,frame2=cap.read()
print(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
print(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
while (cap.isOpened()):
diff=cv2.absdiff(frame1,frame2)
diff=doContour(setFrame(diff),frame1)
cv2.imshow('Detection',frame1)
frame1=frame2
ret,frame2=cap.read()
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
| 785
| 0
| 46
|
6a9b4be7df6137a1ebf46f6a4b6479a4ae5b8614
| 3,444
|
py
|
Python
|
api/g6trayreturn.py
|
smu-iot20-g7/beocrescent-dashboard
|
93776c7cba76e5c57fc2eed8c35584e7541557fe
|
[
"MIT"
] | null | null | null |
api/g6trayreturn.py
|
smu-iot20-g7/beocrescent-dashboard
|
93776c7cba76e5c57fc2eed8c35584e7541557fe
|
[
"MIT"
] | null | null | null |
api/g6trayreturn.py
|
smu-iot20-g7/beocrescent-dashboard
|
93776c7cba76e5c57fc2eed8c35584e7541557fe
|
[
"MIT"
] | 1
|
2020-12-05T06:48:25.000Z
|
2020-12-05T06:48:25.000Z
|
from flask import Flask, jsonify, request
import pymongo
from flask_cors import CORS
from os import environ
from bson.json_util import dumps
import json
app = Flask(__name__)
client = pymongo.MongoClient(
"mongodb+srv://iotadmin:iotadminpassword@cluster0.cowqf.mongodb.net/iotTest?retryWrites=true&w=majority&ssl=true&ssl_cert_reqs=CERT_NONE")
CORS(app)
db = client['iotTest']
positivetrayreturn = db['positivetrayreturn']
stall_distribution = db['stall_distribution']
empty_trayreturn = db['empty_trayreturn']
@app.route("/g6trayreturndistr/<stall_id>/<date_wanted>", methods=['GET'])
@app.route("/g6trayclear/<date_wanted>", methods=['GET'])
@app.route("/g6total/<stall_id>/<date_wanted>", methods=['GET'])
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5002, debug=True)
| 33.764706
| 142
| 0.574042
|
from flask import Flask, jsonify, request
import pymongo
from flask_cors import CORS
from os import environ
from bson.json_util import dumps
import json
app = Flask(__name__)
client = pymongo.MongoClient(
"mongodb+srv://iotadmin:iotadminpassword@cluster0.cowqf.mongodb.net/iotTest?retryWrites=true&w=majority&ssl=true&ssl_cert_reqs=CERT_NONE")
CORS(app)
db = client['iotTest']
positivetrayreturn = db['positivetrayreturn']
stall_distribution = db['stall_distribution']
empty_trayreturn = db['empty_trayreturn']
@app.route("/g6trayreturndistr/<stall_id>/<date_wanted>", methods=['GET'])
def g6trayreturndistr(stall_id, date_wanted):
list_data = {"returns": {'06:00': 0, '07:00': 0, '08:00': 0, '09:00': 0, '10:00': 0, '11:00': 0, '12:00': 0, '13:00': 0, '14:00': 0,
'15:00': 0, '16:00': 0, '17:00': 0, '18:00': 0, '19:00': 0, '20:00': 0, "21:00": 0, "22:00": 0, "23:00": 0},
"distr": {'06:00': 0, '07:00': 0, '08:00': 0, '09:00': 0, '10:00': 0, '11:00': 0, '12:00': 0, '13:00': 0, '14:00': 0,
'15:00': 0, '16:00': 0, '17:00': 0, '18:00': 0, '19:00': 0, '20:00': 0, "21:00": 0, "22:00": 0, "23:00": 0}}
data = positivetrayreturn.find({"stall_id": int(stall_id)})
for x in data:
d = str(x["datetime"])
date = d[:10]
if date == date_wanted:
time = d[11:13] + ":00"
counttime = list_data["returns"][time]
counttime += 1
list_data["returns"][time] = counttime
print(list_data)
data2 = stall_distribution.find({"rasp_id": int(stall_id)})
for x in data2:
d = str(x["datetime"])
date = d[:10]
if date == date_wanted:
time = d[11:13] + ":00"
counttime = list_data["distr"][time]
counttime += 1
list_data["distr"][time] = counttime
print(list_data)
return json.dumps(list_data), 200
@app.route("/g6trayclear/<date_wanted>", methods=['GET'])
def g6trayclear(date_wanted):
count = 0
data = empty_trayreturn.find()
for x in data:
datetime = str(x["datetime"])
d = datetime.replace(",", "-")
date = d[:10]
if date == date_wanted:
count += 1
data = {"Cleared": count}
print(data)
return json.dumps(data), 200
@app.route("/g6total/<stall_id>/<date_wanted>", methods=['GET'])
def g6total(stall_id, date_wanted):
total_distr = 0
data = stall_distribution.find({"rasp_id": int(stall_id)})
for x in data:
datetime = str(x["datetime"])
d = datetime.replace(",", "-")
date = d[:10]
if date == date_wanted:
total_distr += 1
total_return = 0
data2 = positivetrayreturn.find({"stall_id": int(stall_id)})
for y in data2:
datetime = str(y["datetime"])
d = datetime.replace(",", "-")
date = d[:10]
if date == date_wanted:
total_return += 1
clear_count = 0
data3 = empty_trayreturn.find()
for y in data3:
datetime = str(y["datetime"])
d = datetime.replace(",", "-")
date = d[:10]
if date == date_wanted:
clear_count += 1
not_returned = total_distr - total_return
data = {"NotReturned": not_returned , "Returned": total_return, "Cleared": clear_count}
print(data)
return json.dumps(data), 200
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5002, debug=True)
| 2,578
| 0
| 66
|
374f44188164e9d4772b1935cff59220da10b3cd
| 4,782
|
py
|
Python
|
calc.py
|
bryli/AKParser
|
ebecc471458b54a272134dc0f9b35f5da35a6257
|
[
"MIT"
] | 1
|
2020-03-01T15:14:18.000Z
|
2020-03-01T15:14:18.000Z
|
calc.py
|
bryli/AKParser
|
ebecc471458b54a272134dc0f9b35f5da35a6257
|
[
"MIT"
] | null | null | null |
calc.py
|
bryli/AKParser
|
ebecc471458b54a272134dc0f9b35f5da35a6257
|
[
"MIT"
] | null | null | null |
import json
from collections import Counter
ALLSKL = 'allSkillLvlup' # Key for character dictionary to get 2-7 level up mats
LVCOST = 'lvlUpCost'
MCOSTC = 'levelUpCostCond' # Key for mastery level cost upgrades.
MCOST = 'levelUpCost'
SKILLS = 'skills'
ELITE = 'phases'
PROMOTE = 'evolveCost'
RARE = 'rarity'
ID = 'id'
CT = 'count'
CHAR_LOC = "chardata.json"
FORMULAS = "formulas.json"
ITEMNAMES = "itemnames.json"
ITEMIDS = "itemids.json"
MASTERY = "masterylist.json"
NM = 'name'
COST = 'costs'
if __name__ == '__main__':
main()
| 37.069767
| 114
| 0.604559
|
import json
from collections import Counter
ALLSKL = 'allSkillLvlup' # Key for character dictionary to get 2-7 level up mats
LVCOST = 'lvlUpCost'
MCOSTC = 'levelUpCostCond' # Key for mastery level cost upgrades.
MCOST = 'levelUpCost'
SKILLS = 'skills'
ELITE = 'phases'
PROMOTE = 'evolveCost'
RARE = 'rarity'
ID = 'id'
CT = 'count'
CHAR_LOC = "chardata.json"
FORMULAS = "formulas.json"
ITEMNAMES = "itemnames.json"
ITEMIDS = "itemids.json"
MASTERY = "masterylist.json"
NM = 'name'
COST = 'costs'
def main():
try:
with open(CHAR_LOC, "r") as charfile:
chardata = (json.loads(charfile.read()))
with open(FORMULAS, "r") as fmfile:
formulas = (json.loads(fmfile.read()))
with open(ITEMNAMES, "r") as inmfile:
itemnames = (json.loads(inmfile.read()))
with open(ITEMIDS, "r") as iidfile:
itemids = (json.loads(iidfile.read()))
with open(MASTERY, "r") as mstrfile:
masterylist = (json.loads(mstrfile.read()))
except Exception:
return IOError("Failed to read files.")
compiled_mats = {}
[chardata.pop(char) for char in list(chardata.keys()) if chardata[char][RARE] < 2]
for char in chardata.keys():
compiled_mats.update(charCost(char, chardata, formulas, itemnames, itemids,
masterylist.get(chardata[char][NM])))
return(compiled_mats)
def charCost(char, chardata, formulas, itemnames, itemids, mastery = None, reduce = ['Loxic Kohl', 'Grindstone']):
rarity = chardata[char][RARE]
if mastery is None:
if rarity < 5:
mastery = [1, 2]
else:
mastery = [1, 2, 3]
elitemats = eliteCost(char, chardata)
skillmats = skillCost(char, chardata, mastery)
if reduce is not None:
if rarity > 2:
elitemats = reduceMaterials(elitemats, formulas, itemnames, reduce)
skillmats = reduceMaterials(skillmats, formulas, itemnames, reduce)
elitemats = dict((itemids[itemid], count) for (itemid, count) in elitemats.items())
skillmats = dict((itemids[itemid], count) for (itemid, count) in skillmats.items())
# Combines elitemats and skillmats by adding the values corresponding to keys in both sets.
totmats = Counter(elitemats)
totmats.update(Counter(skillmats))
return {chardata[char][NM]:[mastery, elitemats, skillmats, totmats]}
def skillCost(char, chardata, toMaster):
# chardata[char] -> char info
# chardata[char]['allSkillLvlup'] -> list of list of dictionaries of items for skill level up costs 2-7
# for sklv in chardata[char]['allSkillLvlup']: Unlock info and level up cost for each skill level
# for mats in sklv['lvlUpCost']: mats for unlock
# mats['id'] id of item
# mats['count'] # of item
rarity = chardata[char][RARE]
skillmats = {}
for sklv in chardata[char][ALLSKL]:
for mats in sklv[LVCOST]:
skillmats[mats[ID]] = mats[CT]
if rarity > 2:
for skill in toMaster:
for sklv in chardata[char][SKILLS][skill - 1][MCOSTC]:
for mats in sklv[MCOST]:
if mats[ID] in skillmats:
skillmats[mats[ID]] += mats[CT]
else:
skillmats[mats[ID]] = mats[CT]
return skillmats
def eliteCost(char, chardata):
elitemats = {}
rarity = chardata[char][RARE]
if rarity > 2:
for eliteStgNum in range(len(chardata[char][ELITE])):
if eliteStgNum > 0:
for mats in chardata[char][ELITE][eliteStgNum][PROMOTE]:
if mats[ID] in elitemats:
elitemats[mats[ID]] += mats[CT]
else:
elitemats[mats[ID]] = mats[CT]
return elitemats
else:
return None
def reduceMaterials(mats, formulas, itemnames, reducemats):
toReduce = []
for item in reducemats:
toReduce.append(itemnames[item])
result = {item:0 for item in toReduce}
for mat in mats.keys():
reduced = reduce({mat:mats[mat]}, formulas, toReduce)
for itemid in reduced.keys():
result[itemid] += reduced[itemid]
return(result)
def reduce(mat, formulas, reducemats):
result = {item: 0 for item in reducemats}
key = list(mat.keys())[0]
if key in result:
result[key] += mat[key]
return result
elif len(formulas[key][COST]) == 0:
return result
else:
for item in formulas[key][COST]:
reduced = reduce({item[ID]:item[CT]}, formulas, reducemats)
for itemid in reduced.keys():
result[itemid] += reduced[itemid] * mat[key]
return result
if __name__ == '__main__':
main()
| 4,108
| 0
| 138
|
dcb02e7452a985e2c94a463cfd5bbd8aea8b03f9
| 583
|
py
|
Python
|
backend/__init__.py
|
allenai/twentyquestions
|
48f0de26d2dca963cdae1263245d3b267ae7a771
|
[
"Apache-2.0"
] | 9
|
2020-07-23T03:36:26.000Z
|
2022-03-03T15:20:36.000Z
|
backend/__init__.py
|
allenai/twentyquestions
|
48f0de26d2dca963cdae1263245d3b267ae7a771
|
[
"Apache-2.0"
] | null | null | null |
backend/__init__.py
|
allenai/twentyquestions
|
48f0de26d2dca963cdae1263245d3b267ae7a771
|
[
"Apache-2.0"
] | 2
|
2021-12-21T18:36:43.000Z
|
2022-03-30T07:28:52.000Z
|
"""A backend for playing twenty questions."""
import logging
import flask
from backend.views import (
twentyquestions,
socketio)
logger = logging.getLogger(__name__)
app = flask.Flask(__name__)
@app.route('/')
def root():
"""A root page for twentyquestions."""
return (
'This server is used by the Allen Institute for Artificial'
' Intelligence to crowdsource common sense by playing 20'
' Questions.',
200
)
# register blueprints
app.register_blueprint(twentyquestions)
# set up the web socket
socketio.init_app(app)
| 16.657143
| 67
| 0.682676
|
"""A backend for playing twenty questions."""
import logging
import flask
from backend.views import (
twentyquestions,
socketio)
logger = logging.getLogger(__name__)
app = flask.Flask(__name__)
@app.route('/')
def root():
"""A root page for twentyquestions."""
return (
'This server is used by the Allen Institute for Artificial'
' Intelligence to crowdsource common sense by playing 20'
' Questions.',
200
)
# register blueprints
app.register_blueprint(twentyquestions)
# set up the web socket
socketio.init_app(app)
| 0
| 0
| 0
|
6f2ae1ff62c113ac8412a10c9d0e89f76b2b9b99
| 1,665
|
py
|
Python
|
telegram_handler.py
|
anant-j/API-Toolkit
|
ced92c711fcb93f96b81a87e57bb4e5dd647224c
|
[
"CC0-1.0"
] | 1
|
2019-12-27T17:05:52.000Z
|
2019-12-27T17:05:52.000Z
|
telegram_handler.py
|
anant-j/API-Toolkit
|
ced92c711fcb93f96b81a87e57bb4e5dd647224c
|
[
"CC0-1.0"
] | 2
|
2021-02-09T03:25:59.000Z
|
2021-06-02T00:51:13.000Z
|
telegram_handler.py
|
anant-j/API-Toolkit
|
ced92c711fcb93f96b81a87e57bb4e5dd647224c
|
[
"CC0-1.0"
] | null | null | null |
import json
import requests
import os
my_directory = os.path.dirname(os.path.abspath(__file__))
with open(f'{my_directory}/secrets/keys.json') as f:
api_keys = json.load(f)
TGKEY = api_keys["Telegram"]["Key"]
DEVID = api_keys["Telegram"]["DeviceID"]
def send_analytics(req, fingerprint):
"""Sends analytics data to Telegram API
Args:
req (dict): Hashmap containing request information
"""
content = f'Someone from {req["city"]} , {req["country_name"]} visited your Website @ {req["Page"]} \nCarrier: {req["org"]} \nOS: {req["Operating System"]} \nBrowser: {req["Browser"]} \nDate-Time: {req["Date & Time"]} \nIP: {req["ip"]}\nFingerprint: {fingerprint}'
push(content)
def send_form(formData):
"""Sends Form data to Telegram API
Args:
formData (dict): Hashmap containing form data
"""
content = f'Someone sent you a message via contact form.\nName: {formData["name"]}\nEmail: {formData["email"]}\nAbout: {formData["about"]}\nMessage: {formData["message"]}'
push(content)
def send_performance(name, response_time, allowed):
"""Sends Performance data to Telegram API
Args:
name (string): The name of the endpoint that the alert is raised for
response_time (float): The average response time for the alert
allowed (float): The allowed response time for the service
"""
content = f'Perfomance Alert\nThe {name} endpoint took an average of : {response_time} to compute \n while the allowed time is : {allowed}'
push(content)
| 36.195652
| 268
| 0.681081
|
import json
import requests
import os
my_directory = os.path.dirname(os.path.abspath(__file__))
with open(f'{my_directory}/secrets/keys.json') as f:
api_keys = json.load(f)
TGKEY = api_keys["Telegram"]["Key"]
DEVID = api_keys["Telegram"]["DeviceID"]
def send_analytics(req, fingerprint):
"""Sends analytics data to Telegram API
Args:
req (dict): Hashmap containing request information
"""
content = f'Someone from {req["city"]} , {req["country_name"]} visited your Website @ {req["Page"]} \nCarrier: {req["org"]} \nOS: {req["Operating System"]} \nBrowser: {req["Browser"]} \nDate-Time: {req["Date & Time"]} \nIP: {req["ip"]}\nFingerprint: {fingerprint}'
push(content)
def send_form(formData):
"""Sends Form data to Telegram API
Args:
formData (dict): Hashmap containing form data
"""
content = f'Someone sent you a message via contact form.\nName: {formData["name"]}\nEmail: {formData["email"]}\nAbout: {formData["about"]}\nMessage: {formData["message"]}'
push(content)
def send_performance(name, response_time, allowed):
"""Sends Performance data to Telegram API
Args:
name (string): The name of the endpoint that the alert is raised for
response_time (float): The average response time for the alert
allowed (float): The allowed response time for the service
"""
content = f'Perfomance Alert\nThe {name} endpoint took an average of : {response_time} to compute \n while the allowed time is : {allowed}'
push(content)
def push(content):
url = f'https://api.telegram.org/{TGKEY}/sendMessage?text={content}&chat_id={DEVID}'
requests.post(url)
| 109
| 0
| 23
|
c46beeec7b11c4abe6e2db62dfaeed8601291fe1
| 73,322
|
py
|
Python
|
topobank/manager/views.py
|
ComputationalMechanics/TopoBank
|
3e598d4b98cbffa43764e335f026efcbe7580c8a
|
[
"MIT"
] | 1
|
2020-06-04T23:18:53.000Z
|
2020-06-04T23:18:53.000Z
|
topobank/manager/views.py
|
ComputationalMechanics/TopoBank
|
3e598d4b98cbffa43764e335f026efcbe7580c8a
|
[
"MIT"
] | 168
|
2020-06-02T14:46:45.000Z
|
2021-03-19T12:11:07.000Z
|
topobank/manager/views.py
|
ComputationalMechanics/TopoBank
|
3e598d4b98cbffa43764e335f026efcbe7580c8a
|
[
"MIT"
] | null | null | null |
import datetime
import logging
import os.path
import traceback
from io import BytesIO
import django_tables2 as tables
import numpy as np
from bokeh.embed import components
from bokeh.models import DataRange1d, LinearColorMapper, ColorBar, LabelSet, FuncTickFormatter, TapTool, OpenURL
from bokeh.plotting import figure, ColumnDataSource
from django.conf import settings
from django.contrib.auth.mixins import UserPassesTestMixin
from django.core.exceptions import PermissionDenied
from django.core.files import File
from django.core.files.storage import FileSystemStorage
from django.core.files.storage import default_storage
from django.db.models import Q
from django.db import transaction
from django.http import HttpResponse, Http404
from django.shortcuts import redirect, render
from django.urls import reverse, reverse_lazy
from django.utils.decorators import method_decorator
from django.utils.safestring import mark_safe
from django.views.generic import DetailView, UpdateView, CreateView, DeleteView, TemplateView, ListView, FormView
from django.views.generic.edit import FormMixin
from django_tables2 import RequestConfig
from django.contrib.staticfiles.storage import staticfiles_storage
from formtools.wizard.views import SessionWizardView
from guardian.decorators import permission_required_or_403
from guardian.shortcuts import get_users_with_perms, get_objects_for_user, get_anonymous_user
from notifications.signals import notify
from rest_framework.decorators import api_view
from rest_framework.generics import ListAPIView, RetrieveAPIView
from rest_framework.pagination import PageNumberPagination
from rest_framework.response import Response
from rest_framework.renderers import JSONRenderer
from rest_framework.utils.urls import remove_query_param, replace_query_param
from trackstats.models import Metric, Period
from .forms import TopographyFileUploadForm, TopographyMetaDataForm, TopographyWizardUnitsForm, DEFAULT_LICENSE
from .forms import TopographyForm, SurfaceForm, SurfaceShareForm, SurfacePublishForm
from .models import Topography, Surface, TagModel, \
NewPublicationTooFastException, LoadTopographyException, PlotTopographyException
from .serializers import SurfaceSerializer, TagSerializer
from .utils import selected_instances, bandwidths_data, get_topography_reader, tags_for_user, get_reader_infos, \
mailto_link_for_reporting_an_error, current_selection_as_basket_items, filtered_surfaces, \
filtered_topographies, get_search_term, get_category, get_sharing_status, get_tree_mode, \
get_permission_table_data
from ..usage_stats.utils import increase_statistics_by_date, increase_statistics_by_date_and_object
from ..users.models import User
from ..users.utils import get_default_group
from ..publication.models import Publication, MAX_LEN_AUTHORS_FIELD
from .containers import write_surface_container
from ..taskapp.tasks import renew_squeezed_datafile, renew_topography_thumbnail, renew_analyses_related_to_topography
# create dicts with labels and option values for Select tab
CATEGORY_FILTER_CHOICES = {'all': 'All categories',
**{cc[0]: cc[1] + " only" for cc in Surface.CATEGORY_CHOICES}}
SHARING_STATUS_FILTER_CHOICES = {
'all': 'All accessible surfaces',
'own': 'Only own surfaces',
'shared': 'Only surfaces shared with you',
'published': 'Only surfaces published by anyone',
}
TREE_MODE_CHOICES = ['surface list', 'tag tree']
MAX_PAGE_SIZE = 100
DEFAULT_PAGE_SIZE = 10
DEFAULT_SELECT_TAB_STATE = {
'search_term': '', # empty string means: no search
'category': 'all',
'sharing_status': 'all',
'tree_mode': 'surface list',
'page_size': 10,
'current_page': 1,
# all these values are the default if no filter has been applied
# and the page is loaded the first time
}
MEASUREMENT_TIME_INFO_FIELD = 'acquisition_time'
_log = logging.getLogger(__name__)
surface_view_permission_required = method_decorator(
permission_required_or_403('manager.view_surface', ('manager.Surface', 'pk', 'pk'))
# translates to:
#
# In order to access, a specific permission is required. This permission
# is 'view_surface' for a specific surface. Which surface? This is calculated
# from view argument 'pk' (the last element in tuple), which is used to get a
# 'manager.Surface' instance (first element in tuple) with field 'pk' with same value as
# last element in tuple (the view argument 'pk').
#
# Or in pseudocode:
#
# s = Surface.objects.get(pk=view.kwargs['pk'])
# assert request.user.has_perm('view_surface', s)
)
surface_update_permission_required = method_decorator(
permission_required_or_403('manager.change_surface', ('manager.Surface', 'pk', 'pk'))
)
surface_delete_permission_required = method_decorator(
permission_required_or_403('manager.delete_surface', ('manager.Surface', 'pk', 'pk'))
)
surface_share_permission_required = method_decorator(
permission_required_or_403('manager.share_surface', ('manager.Surface', 'pk', 'pk'))
)
surface_publish_permission_required = method_decorator(
permission_required_or_403('manager.publish_surface', ('manager.Surface', 'pk', 'pk'))
)
#
# Using a wizard because we need intermediate calculations
#
# There are 3 forms, used in 3 steps (0,1, then 2):
#
# 0: loading of the topography file
# 1: choosing the data source, add measurement date and a description
# 2: adding physical size and units (for data which is not available in the file, for 1D or 2D)
#
# Maybe an alternative would be to use AJAX calls as described here (under "GET"):
#
# https://sixfeetup.com/blog/making-your-django-templates-ajax-y
#
def topography_plot(request, pk):
"""Render an HTML snippet with topography plot"""
try:
pk = int(pk)
topo = Topography.objects.get(pk=pk)
assert request.user.has_perm('view_surface', topo.surface)
except (ValueError, Topography.DoesNotExist, AssertionError):
raise PermissionDenied() # This should be shown independent of whether the surface exists
errors = [] # list of dicts with keys 'message' and 'link'
context = {}
plotted = False
try:
plot = topo.get_plot()
plotted = True
except LoadTopographyException as exc:
err_message = "Topography '{}' (id: {}) cannot be loaded unexpectedly.".format(
topo.name, topo.id)
_log.error(err_message)
link = mailto_link_for_reporting_an_error(f"Failure loading topography (id: {topo.id})",
"Plotting measurement",
err_message,
traceback.format_exc())
errors.append(dict(message=err_message, link=link))
except PlotTopographyException as exc:
err_message = "Topography '{}' (id: {}) cannot be plotted.".format(topo.name, topo.id)
_log.error(err_message)
link = mailto_link_for_reporting_an_error(f"Failure plotting measurement (id: {topo.id})",
"Plotting measurement",
err_message,
traceback.format_exc())
errors.append(dict(message=err_message, link=link))
if plotted:
script, div = components(plot)
context['image_plot_script'] = script
context['image_plot_div'] = div
context['errors'] = errors
return render(request, 'manager/topography_plot.html', context=context)
def download_surface(request, surface_id):
"""Returns a file comprised from topographies contained in a surface.
:param request:
:param surface_id: surface id
:return:
"""
#
# Check existence and permissions for given surface
#
try:
surface = Surface.objects.get(id=surface_id)
except Surface.DoesNotExist:
raise PermissionDenied()
if not request.user.has_perm('view_surface', surface):
raise PermissionDenied()
content_data = None
#
# If the surface has been published, there might be a container file already.
# If yes:
# Is there already a container?
# Then it instead of creating a new container.from
# If no, save the container in the publication later.
# If no: create a container for this surface on the fly
#
renew_publication_container = False
if surface.is_published:
pub = surface.publication
# noinspection PyBroadException
try:
with pub.container.open() as cf:
content_data = cf.read()
_log.debug(f"Read container for published surface {pub.short_url} from storage.")
except Exception: # not interested here, why it fails
renew_publication_container = True
if content_data is None:
container_bytes = BytesIO()
_log.info(f"Preparing container of surface id={surface_id} for download..")
write_surface_container(container_bytes, [surface], request=request)
content_data = container_bytes.getvalue()
if renew_publication_container:
try:
container_bytes.seek(0)
_log.info(f"Saving container for publication with URL {pub.short_url} to storage for later..")
pub.container.save(pub.container_storage_path, container_bytes)
except (OSError, BlockingIOError) as exc:
_log.error(f"Cannot save container for publication {pub.short_url} to storage. "
f"Reason: {exc}")
# Prepare response object.
response = HttpResponse(content_data,
content_type='application/x-zip-compressed')
response['Content-Disposition'] = 'attachment; filename="{}"'.format('surface.zip')
increase_statistics_by_date_and_object(Metric.objects.SURFACE_DOWNLOAD_COUNT,
period=Period.DAY, obj=surface)
return response
def download_selection_as_surfaces(request):
"""Returns a file comprised from surfaces related to the selection.
:param request: current request
:return:
"""
from .utils import current_selection_as_surface_list
surfaces = current_selection_as_surface_list(request)
container_bytes = BytesIO()
write_surface_container(container_bytes, surfaces, request=request)
# Prepare response object.
response = HttpResponse(container_bytes.getvalue(),
content_type='application/x-zip-compressed')
response['Content-Disposition'] = 'attachment; filename="{}"'.format('surface.zip')
# increase download count for each surface
for surf in surfaces:
increase_statistics_by_date_and_object(Metric.objects.SURFACE_DOWNLOAD_COUNT,
period=Period.DAY, obj=surf)
return response
#######################################################################################
# Views for REST interface
#######################################################################################
class TagTreeView(ListAPIView):
"""
Generate tree of tags with surfaces and topographies underneath.
"""
serializer_class = TagSerializer
pagination_class = SurfaceSearchPaginator
class SurfaceListView(ListAPIView):
"""
List all surfaces with topographies underneath.
"""
serializer_class = SurfaceSerializer
pagination_class = SurfaceSearchPaginator
def set_surface_select_status(request, pk, select_status):
"""Marks the given surface as 'selected' in session or checks this.
:param request: request
:param pk: primary key of the surface
:param select_status: True if surface should be selected, False if it should be unselected
:return: JSON Response
The response returns the current selection as suitable for the basket.
"""
try:
pk = int(pk)
surface = Surface.objects.get(pk=pk)
assert request.user.has_perm('view_surface', surface)
except (ValueError, Surface.DoesNotExist, AssertionError):
raise PermissionDenied() # This should be shown independent of whether the surface exists
surface_key = _surface_key(pk)
selection = _selection_set(request)
is_selected = surface_key in selection
if request.method == 'POST':
if select_status:
# surface should be selected
selection.add(surface_key)
elif is_selected:
selection.remove(surface_key)
request.session['selection'] = list(selection)
data = current_selection_as_basket_items(request)
return Response(data)
@api_view(['POST'])
def select_surface(request, pk):
"""Marks the given surface as 'selected' in session.
:param request: request
:param pk: primary key of the surface
:return: JSON Response
The response returns the current selection as suitable for the basket.
"""
return set_surface_select_status(request, pk, True)
@api_view(['POST'])
def unselect_surface(request, pk):
"""Marks the given surface as 'unselected' in session.
:param request: request
:param pk: primary key of the surface
:return: JSON Response
The response returns the current selection as suitable for the basket.
"""
return set_surface_select_status(request, pk, False)
def set_topography_select_status(request, pk, select_status):
"""Marks the given topography as 'selected' or 'unselected' in session.
:param request: request
:param pk: primary key of the surface
:param select_status: True or False, True means "mark as selected", False means "mark as unselected"
:return: JSON Response
The response returns the current selection as suitable for the basket.
"""
try:
pk = int(pk)
topo = Topography.objects.get(pk=pk)
assert request.user.has_perm('view_surface', topo.surface)
except (ValueError, Topography.DoesNotExist, AssertionError):
raise PermissionDenied() # This should be shown independent of whether the surface exists
topography_key = _topography_key(pk)
selection = _selection_set(request)
is_selected = topography_key in selection
if request.method == 'POST':
if select_status:
# topography should be selected
selection.add(topography_key)
elif is_selected:
selection.remove(topography_key)
request.session['selection'] = list(selection)
data = current_selection_as_basket_items(request)
return Response(data)
@api_view(['POST'])
def select_topography(request, pk):
"""Marks the given topography as 'selected' in session.
:param request: request
:param pk: primary key of the surface
:return: JSON Response
The response returns the current selection as suitable for the basket.
"""
return set_topography_select_status(request, pk, True)
@api_view(['POST'])
def unselect_topography(request, pk):
"""Marks the given topography as 'selected' in session.
:param request: request
:param pk: primary key of the surface
:return: JSON Response
The response returns the current selection as suitable for the basket.
"""
return set_topography_select_status(request, pk, False)
def set_tag_select_status(request, pk, select_status):
"""Marks the given tag as 'selected' in session or checks this.
:param request: request
:param pk: primary key of the tag
:param select_status: True if tag should be selected, False if it should be unselected
:return: JSON Response
The response returns the current selection as suitable for the basket.
"""
try:
pk = int(pk)
tag = TagModel.objects.get(pk=pk)
except ValueError:
raise PermissionDenied()
if not tag in tags_for_user(request.user):
raise PermissionDenied()
tag_key = _tag_key(pk)
selection = _selection_set(request)
is_selected = tag_key in selection
if request.method == 'POST':
if select_status:
# tag should be selected
selection.add(tag_key)
elif is_selected:
selection.remove(tag_key)
request.session['selection'] = list(selection)
data = current_selection_as_basket_items(request)
return Response(data)
@api_view(['POST'])
def select_tag(request, pk):
"""Marks the given tag as 'selected' in session.
:param request: request
:param pk: primary key of the tag
:return: JSON Response
The response returns the current selection as suitable for the basket.
"""
return set_tag_select_status(request, pk, True)
@api_view(['POST'])
def unselect_tag(request, pk):
"""Marks the given tag as 'unselected' in session.
:param request: request
:param pk: primary key of the tag
:return: JSON Response
The response returns the current selection as suitable for the basket.
"""
return set_tag_select_status(request, pk, False)
@api_view(['POST'])
def unselect_all(request):
"""Removes all selections from session.
:param request: request
:return: empty list as JSON Response
"""
request.session['selection'] = []
return Response([])
def thumbnail(request, pk):
"""Returns image data for a topography thumbail
Parameters
----------
request
Returns
-------
HTML Response with image data
"""
try:
pk = int(pk)
except ValueError:
raise Http404()
try:
topo = Topography.objects.get(pk=pk)
except Topography.DoesNotExist:
raise Http404()
if not request.user.has_perm('view_surface', topo.surface):
raise PermissionDenied()
# okay, we have a valid topography and the user is allowed to see it
image = topo.thumbnail
response = HttpResponse(content_type="image/png")
try:
response.write(image.file.read())
except Exception as exc:
_log.warning("Cannot load thumbnail for topography %d. Reason: %s", topo.id, exc)
# return some default image so the client gets sth in any case
with staticfiles_storage.open('images/thumbnail_unavailable.png', mode='rb') as img_file:
response.write(img_file.read())
return response
| 37.485685
| 122
| 0.610526
|
import datetime
import logging
import os.path
import traceback
from io import BytesIO
import django_tables2 as tables
import numpy as np
from bokeh.embed import components
from bokeh.models import DataRange1d, LinearColorMapper, ColorBar, LabelSet, FuncTickFormatter, TapTool, OpenURL
from bokeh.plotting import figure, ColumnDataSource
from django.conf import settings
from django.contrib.auth.mixins import UserPassesTestMixin
from django.core.exceptions import PermissionDenied
from django.core.files import File
from django.core.files.storage import FileSystemStorage
from django.core.files.storage import default_storage
from django.db.models import Q
from django.db import transaction
from django.http import HttpResponse, Http404
from django.shortcuts import redirect, render
from django.urls import reverse, reverse_lazy
from django.utils.decorators import method_decorator
from django.utils.safestring import mark_safe
from django.views.generic import DetailView, UpdateView, CreateView, DeleteView, TemplateView, ListView, FormView
from django.views.generic.edit import FormMixin
from django_tables2 import RequestConfig
from django.contrib.staticfiles.storage import staticfiles_storage
from formtools.wizard.views import SessionWizardView
from guardian.decorators import permission_required_or_403
from guardian.shortcuts import get_users_with_perms, get_objects_for_user, get_anonymous_user
from notifications.signals import notify
from rest_framework.decorators import api_view
from rest_framework.generics import ListAPIView, RetrieveAPIView
from rest_framework.pagination import PageNumberPagination
from rest_framework.response import Response
from rest_framework.renderers import JSONRenderer
from rest_framework.utils.urls import remove_query_param, replace_query_param
from trackstats.models import Metric, Period
from .forms import TopographyFileUploadForm, TopographyMetaDataForm, TopographyWizardUnitsForm, DEFAULT_LICENSE
from .forms import TopographyForm, SurfaceForm, SurfaceShareForm, SurfacePublishForm
from .models import Topography, Surface, TagModel, \
NewPublicationTooFastException, LoadTopographyException, PlotTopographyException
from .serializers import SurfaceSerializer, TagSerializer
from .utils import selected_instances, bandwidths_data, get_topography_reader, tags_for_user, get_reader_infos, \
mailto_link_for_reporting_an_error, current_selection_as_basket_items, filtered_surfaces, \
filtered_topographies, get_search_term, get_category, get_sharing_status, get_tree_mode, \
get_permission_table_data
from ..usage_stats.utils import increase_statistics_by_date, increase_statistics_by_date_and_object
from ..users.models import User
from ..users.utils import get_default_group
from ..publication.models import Publication, MAX_LEN_AUTHORS_FIELD
from .containers import write_surface_container
from ..taskapp.tasks import renew_squeezed_datafile, renew_topography_thumbnail, renew_analyses_related_to_topography
# create dicts with labels and option values for Select tab
CATEGORY_FILTER_CHOICES = {'all': 'All categories',
**{cc[0]: cc[1] + " only" for cc in Surface.CATEGORY_CHOICES}}
SHARING_STATUS_FILTER_CHOICES = {
'all': 'All accessible surfaces',
'own': 'Only own surfaces',
'shared': 'Only surfaces shared with you',
'published': 'Only surfaces published by anyone',
}
TREE_MODE_CHOICES = ['surface list', 'tag tree']
MAX_PAGE_SIZE = 100
DEFAULT_PAGE_SIZE = 10
DEFAULT_SELECT_TAB_STATE = {
'search_term': '', # empty string means: no search
'category': 'all',
'sharing_status': 'all',
'tree_mode': 'surface list',
'page_size': 10,
'current_page': 1,
# all these values are the default if no filter has been applied
# and the page is loaded the first time
}
MEASUREMENT_TIME_INFO_FIELD = 'acquisition_time'
_log = logging.getLogger(__name__)
surface_view_permission_required = method_decorator(
permission_required_or_403('manager.view_surface', ('manager.Surface', 'pk', 'pk'))
# translates to:
#
# In order to access, a specific permission is required. This permission
# is 'view_surface' for a specific surface. Which surface? This is calculated
# from view argument 'pk' (the last element in tuple), which is used to get a
# 'manager.Surface' instance (first element in tuple) with field 'pk' with same value as
# last element in tuple (the view argument 'pk').
#
# Or in pseudocode:
#
# s = Surface.objects.get(pk=view.kwargs['pk'])
# assert request.user.has_perm('view_surface', s)
)
surface_update_permission_required = method_decorator(
permission_required_or_403('manager.change_surface', ('manager.Surface', 'pk', 'pk'))
)
surface_delete_permission_required = method_decorator(
permission_required_or_403('manager.delete_surface', ('manager.Surface', 'pk', 'pk'))
)
surface_share_permission_required = method_decorator(
permission_required_or_403('manager.share_surface', ('manager.Surface', 'pk', 'pk'))
)
surface_publish_permission_required = method_decorator(
permission_required_or_403('manager.publish_surface', ('manager.Surface', 'pk', 'pk'))
)
class TopographyPermissionMixin(UserPassesTestMixin):
redirect_field_name = None
def has_surface_permissions(self, perms):
if 'pk' not in self.kwargs:
return True
try:
topo = Topography.objects.get(pk=self.kwargs['pk'])
except Topography.DoesNotExist:
raise Http404()
return all(self.request.user.has_perm(perm, topo.surface)
for perm in perms)
def test_func(self):
return NotImplementedError()
class TopographyViewPermissionMixin(TopographyPermissionMixin):
def test_func(self):
return self.has_surface_permissions(['view_surface'])
class TopographyUpdatePermissionMixin(TopographyPermissionMixin):
def test_func(self):
return self.has_surface_permissions(['view_surface', 'change_surface'])
class ORCIDUserRequiredMixin(UserPassesTestMixin):
def test_func(self):
return not self.request.user.is_anonymous
#
# Using a wizard because we need intermediate calculations
#
# There are 3 forms, used in 3 steps (0,1, then 2):
#
# 0: loading of the topography file
# 1: choosing the data source, add measurement date and a description
# 2: adding physical size and units (for data which is not available in the file, for 1D or 2D)
#
# Maybe an alternative would be to use AJAX calls as described here (under "GET"):
#
# https://sixfeetup.com/blog/making-your-django-templates-ajax-y
#
class TopographyCreateWizard(ORCIDUserRequiredMixin, SessionWizardView):
form_list = [TopographyFileUploadForm, TopographyMetaDataForm, TopographyWizardUnitsForm]
template_name = 'manager/topography_wizard.html'
file_storage = FileSystemStorage(location=os.path.join(settings.MEDIA_ROOT, 'topographies/wizard'))
def get_form_initial(self, step):
initial = {}
if step in ['upload']:
#
# Pass surface in order to
# - have it later in done() method (for upload)
#
# make sure that the surface exists and belongs to the current user
try:
surface = Surface.objects.get(id=int(self.kwargs['surface_id']))
except Surface.DoesNotExist:
raise PermissionDenied()
if not self.request.user.has_perm('change_surface', surface):
raise PermissionDenied()
initial['surface'] = surface
if step in ['metadata', 'units']:
# provide datafile attribute from first step
step0_data = self.get_cleaned_data_for_step('upload')
datafile = step0_data['datafile']
channel_infos = step0_data['channel_infos']
if step == 'metadata':
initial['name'] = os.path.basename(datafile.name) # the original file name
# Use the latest data available on all channels as initial measurement date, if any - see GH #433
measurement_dates = []
for ch in channel_infos:
try:
measurement_time_str = ch.info[MEASUREMENT_TIME_INFO_FIELD]
measurement_time = datetime.datetime.strptime(measurement_time_str, '%Y-%m-%d %H:%M:%S')
measurement_dates.append(measurement_time.date()) # timezone is not known an not taken into account
except KeyError:
# measurement time not available in channel
pass
except ValueError as exc:
_log.info(f'Found measurement timestamp in file {datafile.name}, but could not parse: {exc}')
initial['measurement_date'] = max(measurement_dates, default=None)
if step in ['units']:
step1_data = self.get_cleaned_data_for_step('metadata') or {'data_source': 0}
# in case the form doesn't validate, the first data source is chosen, workaround for GH 691
channel = int(step1_data['data_source'])
channel_info = channel_infos[channel]
#
# Set initial size
#
has_2_dim = channel_info.dim == 2
physical_sizes = channel_info.physical_sizes
physical_sizes_is_None = (physical_sizes is None) or (physical_sizes == (None,)) \
or (physical_sizes == (None, None))
# workaround for GH 299 in PyCo and GH 446 in TopoBank
if physical_sizes_is_None:
initial_size_x, initial_size_y = None, None
# both database fields are always set, also for 1D topographies
elif has_2_dim:
initial_size_x, initial_size_y = physical_sizes
else:
initial_size_x, = physical_sizes # size is always a tuple
initial_size_y = None # needed for database field
initial['size_x'] = initial_size_x
initial['size_y'] = initial_size_y
initial['size_editable'] = physical_sizes_is_None
initial['is_periodic'] = False # so far, this is not returned by the readers
#
# Set unit
#
initial['unit'] = channel_info.unit
initial['unit_editable'] = initial['unit'] is None
#
# Set initial height scale factor
#
height_scale_factor_missing = channel_info.height_scale_factor is None # missing in file
initial['height_scale_editable'] = height_scale_factor_missing
initial['height_scale'] = 1 if height_scale_factor_missing else channel_info.height_scale_factor
#
# Set initial detrend mode
#
initial['detrend_mode'] = 'center'
#
# Set resolution (only for having the data later in the done method)
#
# TODO Can this be passed to done() differently? Creating the reader again later e.g.?
#
if has_2_dim:
initial['resolution_x'], initial['resolution_y'] = channel_info.nb_grid_pts
else:
initial['resolution_x'], = channel_info.nb_grid_pts
initial['resolution_y'] = None
return initial
def get_form_kwargs(self, step=None):
kwargs = super().get_form_kwargs(step)
if step in ['metadata', 'units']:
# provide datafile attribute and reader from first step
step0_data = self.get_cleaned_data_for_step('upload')
channel_infos = step0_data['channel_infos']
if step == 'metadata':
def clean_channel_name(s):
"""Restrict data shown in the dropdown for the channel name.
:param s: channel name as found in the file
:return: string without NULL characters, 100 chars maximum
"""
if s is None:
return "(unknown)"
return s.strip('\0')[:100]
#
# Set data source choices based on file contents
#
kwargs['data_source_choices'] = [(k, clean_channel_name(channel_info.name)) for k, channel_info in
enumerate(channel_infos)
if not (('unit' in channel_info.info)
and isinstance(channel_info.info['unit'], tuple))]
#
# Set surface in order to check for duplicate topography names
#
kwargs['surface'] = step0_data['surface']
kwargs['autocomplete_tags'] = tags_for_user(self.request.user)
if step in ['units']:
step1_data = self.get_cleaned_data_for_step('metadata') or {'data_source': 0}
# in case the form doesn't validate, the first data source is chosen, workaround for GH 691
# TODO: why can this happen? handle differently?
channel = int(step1_data['data_source'])
channel_info = channel_infos[channel]
has_2_dim = channel_info.dim == 2
no_sizes_given = channel_info.physical_sizes is None
# only allow periodic topographies in case of 2 dimension
kwargs['allow_periodic'] = has_2_dim and no_sizes_given # TODO simplify in 'no_sizes_given'?
kwargs['has_size_y'] = has_2_dim # TODO find common term, now we have 'has_size_y' and 'has_2_dim'
return kwargs
def get_context_data(self, form, **kwargs):
context = super().get_context_data(form, **kwargs)
surface = Surface.objects.get(id=int(self.kwargs['surface_id']))
context['surface'] = surface
redirect_in_get = self.request.GET.get("redirect")
redirect_in_post = self.request.POST.get("redirect")
if redirect_in_get:
context.update({'cancel_action': redirect_in_get})
elif redirect_in_post:
context.update({'cancel_action': redirect_in_post})
#
# We want to display information about readers directly on upload page
#
if self.steps.current == "upload":
context['reader_infos'] = get_reader_infos()
#
# Add context needed for tabs
#
context['extra_tabs'] = [
{
'title': f"{surface}",
'icon': "gem",
'icon_style_prefix': 'far',
'href': reverse('manager:surface-detail', kwargs=dict(pk=surface.pk)),
'active': False,
'tooltip': f"Properties of surface '{surface.label}'"
},
{
'title': f"Add topography",
'icon': "plus-square",
'icon_style_prefix': 'far',
'href': self.request.path,
'active': True,
'tooltip': f"Adding a topography to surface '{surface.label}'"
}
]
return context
def done(self, form_list, **kwargs):
"""Finally use the form data when after finishing the wizard.
:param form_list: list of forms
:param kwargs:
:return: HTTPResponse
"""
#
# collect all data from forms
#
d = dict((k, v) for form in form_list for k, v in form.cleaned_data.items())
#
# Check whether given surface can be altered by this user
#
surface = d['surface']
if not self.request.user.has_perm('change_surface', surface):
raise PermissionDenied()
#
# move file to the permanent storage (wizard's files will be deleted)
#
new_path = os.path.join(self.request.user.get_media_path(),
os.path.basename(d['datafile'].name))
with d['datafile'].open(mode='rb') as datafile:
d['datafile'] = default_storage.save(new_path, File(datafile))
#
# Set the topography's creator to the current user uploading the file
#
d['creator'] = self.request.user
#
# Remove helper data
#
del d['channel_infos']
del d['resolution_value']
del d['resolution_unit']
del d['tip_radius_value']
del d['tip_radius_unit']
#
# create topography in database
#
instance = Topography(**d)
instance.save()
# we save once so the member variables like "data_source"
# have the correct type for the next step
# try to load topography once in order to
# check whether it can be loaded - we don't want a corrupt
# topography file in the system:
topo = Topography.objects.get(id=instance.id)
try:
# While loading we're also saving a squeezed form, so it
# can be loaded faster the next time
topo.renew_squeezed_datafile()
except Exception as exc:
_log.warning("Cannot read topography from file '{}', exception: {}".format(
d['datafile'], str(exc)
))
_log.warning("Topography {} was created, but will be deleted now.".format(topo.id))
topo.delete()
#
# Redirect to an error page
#
return redirect('manager:topography-corrupted', surface_id=surface.id)
#
# Ok, we can work with this data.
# Trigger some calculations in background.
#
transaction.on_commit(lambda: renew_topography_thumbnail.delay(topo.id))
transaction.on_commit(lambda: renew_analyses_related_to_topography.delay(topo.id))
#
# Notify other others with access to the topography
#
other_users = get_users_with_perms(topo.surface).filter(~Q(id=self.request.user.id))
for u in other_users:
notify.send(sender=self.request.user, verb='create', target=topo, recipient=u,
description=f"User '{self.request.user.name}' has created the topography '{topo.name}' " + \
f"in surface '{topo.surface.name}'.",
href=reverse('manager:topography-detail', kwargs=dict(pk=topo.pk)))
#
# The topography could be correctly loaded and we show a page with details
#
return redirect('manager:topography-detail', pk=topo.pk)
class CorruptedTopographyView(TemplateView):
template_name = "manager/topography_corrupted.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
surface = Surface.objects.get(id=kwargs['surface_id'])
context['surface'] = surface
#
# Add context needed for tabs
#
context['extra_tabs'] = [
{
'title': f"{surface}",
'icon': "gem",
'icon_style_prefix': 'far',
'href': reverse('manager:surface-detail', kwargs=dict(pk=surface.pk)),
'active': False,
'tooltip': f"Properties of surface '{surface.label}'"
},
{
'title': f"Corrupted File",
'icon': "flash",
'href': self.request.path,
'active': True,
'tooltip': f"Failure while uploading a new file"
}
]
return context
class TopographyUpdateView(TopographyUpdatePermissionMixin, UpdateView):
model = Topography
form_class = TopographyForm
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
topo = self.object
kwargs['has_size_y'] = topo.size_y is not None
kwargs['autocomplete_tags'] = tags_for_user(self.request.user)
toporeader = get_topography_reader(topo.datafile, format=topo.datafile_format)
channel_info = toporeader.channels[topo.data_source]
has_2_dim = channel_info.dim == 2
no_sizes_given = channel_info.physical_sizes is None
kwargs['allow_periodic'] = has_2_dim and no_sizes_given
return kwargs
def form_valid(self, form):
topo = self.object
user = self.request.user
notification_msg = f"User {user} changed topography '{topo.name}'. Changed fields: {','.join(form.changed_data)}."
#
# If a significant field changes, renew squeezed datafile, all analyses, and also thumbnail
#
# changed_dict = topo.tracker.changed() # key: field name, value: previous field value
changed_fields = form.changed_data
_log.debug("These fields have been changed according to form: %s", changed_fields)
significant_fields = {'size_x', 'size_y', 'unit', 'is_periodic', 'height_scale',
'detrend_mode', 'datafile', 'data_source',
'instrument_type', # , 'instrument_parameters'
# 'tip_radius_value', 'tip_radius_unit',
}
significant_fields_with_changes = set(changed_fields).intersection(significant_fields)
# check instrument_parameters manually, since this is not detected properly
if form.cleaned_data['instrument_parameters'] != form.initial['instrument_parameters']:
significant_fields_with_changes.add('instrument_parameters')
_log.info("Instrument parameters changed:")
_log.info(" before: %s", form.initial['instrument_parameters'])
_log.info(" after: %s", form.cleaned_data['instrument_parameters'])
if len(significant_fields_with_changes) > 0:
_log.info(f"During edit of topography id={topo.id} some significant fields changed: " +
f"{significant_fields_with_changes}.")
_log.info("Renewing squeezed datafile...")
topo.renew_squeezed_datafile() # cannot be done in background, other steps depend on this, see GH #590
_log.info("Triggering renewal of thumbnail in background...")
transaction.on_commit(lambda: renew_topography_thumbnail.delay(topo.id))
_log.info("Triggering renewal of analyses in background...")
transaction.on_commit(lambda: renew_analyses_related_to_topography.delay(topo.id))
notification_msg += f"\nBecause significant fields have changed, all related analyses are recalculated now."
else:
_log.info("Changes not significant for renewal of thumbnails or analysis results.")
#
# notify other users
#
other_users = get_users_with_perms(topo.surface).filter(~Q(id=user.id))
for u in other_users:
notify.send(sender=user, verb='change', target=topo,
recipient=u,
description=notification_msg,
href=reverse('manager:topography-detail', kwargs=dict(pk=topo.pk)))
return super().form_valid(form)
def get_success_url(self):
if "save-stay" in self.request.POST:
return reverse('manager:topography-update', kwargs=dict(pk=self.object.pk))
else:
return reverse('manager:topography-detail', kwargs=dict(pk=self.object.pk))
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
topo = self.object
try:
context['topography_next'] = topo.get_next_by_measurement_date(surface=topo.surface).id
except Topography.DoesNotExist:
context['topography_next'] = topo.id
try:
context['topography_prev'] = topo.get_previous_by_measurement_date(surface=topo.surface).id
except Topography.DoesNotExist:
context['topography_prev'] = topo.id
#
# Add context needed for tabs
#
context['extra_tabs'] = [
{
'title': f"{topo.surface.label}",
'icon': "gem",
'icon_style_prefix': 'far',
'href': reverse('manager:surface-detail', kwargs=dict(pk=topo.surface.pk)),
'active': False,
'tooltip': f"Properties of surface '{topo.surface.label}'"
},
{
'title': f"{topo.name}",
'icon': "file",
'icon_style_prefix': 'far',
'href': reverse('manager:topography-detail', kwargs=dict(pk=topo.pk)),
'active': False,
'tooltip': f"Properties of topography '{topo.name}'"
},
{
'title': f"Edit Topography",
'icon': "pencil",
'href': self.request.path,
'active': True,
'tooltip': f"Editing topography '{topo.name}'"
}
]
return context
def topography_plot(request, pk):
"""Render an HTML snippet with topography plot"""
try:
pk = int(pk)
topo = Topography.objects.get(pk=pk)
assert request.user.has_perm('view_surface', topo.surface)
except (ValueError, Topography.DoesNotExist, AssertionError):
raise PermissionDenied() # This should be shown independent of whether the surface exists
errors = [] # list of dicts with keys 'message' and 'link'
context = {}
plotted = False
try:
plot = topo.get_plot()
plotted = True
except LoadTopographyException as exc:
err_message = "Topography '{}' (id: {}) cannot be loaded unexpectedly.".format(
topo.name, topo.id)
_log.error(err_message)
link = mailto_link_for_reporting_an_error(f"Failure loading topography (id: {topo.id})",
"Plotting measurement",
err_message,
traceback.format_exc())
errors.append(dict(message=err_message, link=link))
except PlotTopographyException as exc:
err_message = "Topography '{}' (id: {}) cannot be plotted.".format(topo.name, topo.id)
_log.error(err_message)
link = mailto_link_for_reporting_an_error(f"Failure plotting measurement (id: {topo.id})",
"Plotting measurement",
err_message,
traceback.format_exc())
errors.append(dict(message=err_message, link=link))
if plotted:
script, div = components(plot)
context['image_plot_script'] = script
context['image_plot_div'] = div
context['errors'] = errors
return render(request, 'manager/topography_plot.html', context=context)
class TopographyDetailView(TopographyViewPermissionMixin, DetailView):
model = Topography
context_object_name = 'topography'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
topo = self.object
try:
context['topography_next'] = topo.get_next_by_measurement_date(surface=topo.surface).id
except Topography.DoesNotExist:
context['topography_next'] = topo.id
try:
context['topography_prev'] = topo.get_previous_by_measurement_date(surface=topo.surface).id
except Topography.DoesNotExist:
context['topography_prev'] = topo.id
#
# Add context needed for tabs
#
context['extra_tabs'] = [
{
'title': f"{topo.surface.label}",
'icon': "gem",
'icon_style_prefix': 'far',
'href': reverse('manager:surface-detail', kwargs=dict(pk=topo.surface.pk)),
'active': False,
'login_required': False,
'tooltip': f"Properties of surface '{topo.surface.label}'"
},
{
'title': f"{topo.name}",
'icon': "file",
'icon_style_prefix': 'far',
'href': self.request.path,
'active': True,
'login_required': False,
'tooltip': f"Properties of topography '{topo.name}'"
}
]
return context
class TopographyDeleteView(TopographyUpdatePermissionMixin, DeleteView):
model = Topography
context_object_name = 'topography'
success_url = reverse_lazy('manager:select')
def get_success_url(self):
user = self.request.user
topo = self.object
surface = topo.surface
link = reverse('manager:surface-detail', kwargs=dict(pk=surface.pk))
#
# notify other users
#
other_users = get_users_with_perms(surface).filter(~Q(id=user.id))
for u in other_users:
notify.send(sender=user, verb="delete",
recipient=u,
description=f"User '{user.name}' deleted topography '{topo.name}' " + \
f"from surface '{surface.name}'.",
href=link)
return link
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
topo = self.object
surface = topo.surface
context['extra_tabs'] = [
{
'title': f"{topo.surface.label}",
'icon': "gem",
'icon_style_prefix': 'far',
'href': reverse('manager:surface-detail', kwargs=dict(pk=topo.surface.pk)),
'active': False,
'tooltip': f"Properties of surface '{topo.surface.label}'"
},
{
'title': f"{topo.name}",
'icon': "file",
'icon_style_prefix': 'far',
'href': reverse('manager:topography-detail', kwargs=dict(pk=topo.pk)),
'active': False,
'tooltip': f"Properties of topography '{topo.name}'"
},
{
'title': f"Delete Topography?",
'icon': "trash",
'href': self.request.path,
'active': True,
'tooltip': f"Conforming deletion of topography '{topo.name}'"
}
]
return context
class SelectView(TemplateView):
template_name = "manager/select.html"
def dispatch(self, request, *args, **kwargs):
# count this view event for statistics
metric = Metric.objects.SEARCH_VIEW_COUNT
increase_statistics_by_date(metric, period=Period.DAY)
return super().dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
session = self.request.session
search_term = get_search_term(self.request)
if search_term:
# When searching, we want the default select tab state except for
# the search term, which is taken from thr request parameters.
# If not using the default select tab state, this can result
# in "Load Error!" on the page (#543) because e.g. page 2
# is not available in the result.
select_tab_state = DEFAULT_SELECT_TAB_STATE.copy()
select_tab_state['search_term'] = search_term
else:
# .. otherwise keep search term from session variable 'select_tab_state'
# and all other state settings
select_tab_state = session.get('select_tab_state',
default=DEFAULT_SELECT_TAB_STATE.copy())
# key: tree mode
context['base_urls'] = {
'surface list': self.request.build_absolute_uri(reverse('manager:search')),
'tag tree': self.request.build_absolute_uri(reverse('manager:tag-list')),
}
context['category_filter_choices'] = CATEGORY_FILTER_CHOICES.copy()
if self.request.user.is_anonymous:
# Anonymous user have only one choice
context['sharing_status_filter_choices'] = {
'published': SHARING_STATUS_FILTER_CHOICES['published']
}
select_tab_state['sharing_status'] = 'published' # this only choice should be selected
else:
context['sharing_status_filter_choices'] = SHARING_STATUS_FILTER_CHOICES.copy()
context['select_tab_state'] = select_tab_state.copy()
# The session needs a default for the state of the select tab
session['select_tab_state'] = select_tab_state
return context
class SurfaceCreateView(ORCIDUserRequiredMixin, CreateView):
model = Surface
form_class = SurfaceForm
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['autocomplete_tags'] = tags_for_user(self.request.user)
return kwargs
def get_initial(self, *args, **kwargs):
initial = super(SurfaceCreateView, self).get_initial()
initial = initial.copy()
initial['creator'] = self.request.user
return initial
def get_success_url(self):
return reverse('manager:surface-detail', kwargs=dict(pk=self.object.pk))
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['extra_tabs'] = [
{
'title': f"Create surface",
'icon': "plus-square",
'icon_style_prefix': 'far',
'href': self.request.path,
'active': True,
'tooltip': "Creating a new surface"
}
]
return context
class SurfaceDetailView(DetailView):
model = Surface
context_object_name = 'surface'
@surface_view_permission_required
def dispatch(self, request, *args, **kwargs):
return super().dispatch(request, *args, *kwargs)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
surface = self.object
#
# Count this event for statistics
#
increase_statistics_by_date_and_object(Metric.objects.SURFACE_VIEW_COUNT,
period=Period.DAY, obj=surface)
#
# bandwidth data
#
bw_data = bandwidths_data(surface.topography_set.all())
# filter out all entries with errors and display error messages
bw_data_with_errors = [x for x in bw_data if x['error_message'] is not None]
bw_data_without_errors = [x for x in bw_data if x['error_message'] is None]
context['bandwidths_data_with_errors'] = bw_data_with_errors
#
# Plot bandwidths with bokeh
#
if len(bw_data_without_errors) > 0:
bar_height = 0.95
bw_left = [bw['lower_bound'] for bw in bw_data_without_errors]
bw_right = [bw['upper_bound'] for bw in bw_data_without_errors]
bw_center = np.exp((np.log(bw_left)+np.log(bw_right))/2) # we want to center on log scale
bw_names = [bw['topography'].name for bw in bw_data_without_errors]
bw_topography_links = [bw['link'] for bw in bw_data_without_errors]
bw_thumbnail_links = [reverse('manager:topography-thumbnail',
kwargs=dict(pk=bw['topography'].pk))
for bw in bw_data_without_errors]
bw_y = range(0, len(bw_data_without_errors))
bw_source = ColumnDataSource(dict(y=bw_y, left=bw_left, right=bw_right, center=bw_center,
name=bw_names,
topography_link=bw_topography_links,
thumbnail_link=bw_thumbnail_links))
x_range = (min(bw_left), max(bw_right))
TOOL_TIPS = """
<div class="bandwidth-hover-box">
<img src="@thumbnail_link" height="80" width="80" alt="Thumbnail is missing, sorry">
</img>
<span>@name</span>
</div>
"""
plot = figure(x_range=x_range,
x_axis_label="Bandwidth",
x_axis_type="log",
sizing_mode='stretch_width',
tools=["tap", "hover"],
toolbar_location=None,
tooltips=TOOL_TIPS)
hbar_renderer = plot.hbar(y="y", left="left", right="right", height=bar_height,
name='bandwidths', source=bw_source)
hbar_renderer.nonselection_glyph = None # makes glyph invariant on selection
plot.yaxis.visible = False
plot.grid.visible = False
plot.outline_line_color = None
plot.xaxis.formatter = FuncTickFormatter(code="return siSuffixMeters(2)(tick)")
# make that 1 single topography does not look like a block
if len(bw_data_without_errors) == 1:
plot.y_range.end = bar_height * 1.5
# make clicking a bar going opening a new page
taptool = plot.select(type=TapTool)
taptool.callback = OpenURL(url="@topography_link", same_tab=True)
# include plot into response
bw_plot_script, bw_plot_div = components(plot)
context['plot_script'] = bw_plot_script
context['plot_div'] = bw_plot_div
#
# permission data
#
ACTIONS = ['view', 'change', 'delete', 'share'] # defines the order of permissions in table
surface_perms_table = get_permission_table_data(surface, self.request.user, ACTIONS)
context['permission_table'] = {
'head': [''] + ACTIONS,
'body': surface_perms_table
}
#
# Build tab information
#
context['extra_tabs'] = [
{
'title': surface.label,
'icon': "gem",
'icon_style_prefix': 'far',
'href': self.request.path,
'active': True,
'login_required': False,
'tooltip': f"Properties of surface '{surface.label}'"
}
]
#
# Build urls for version selection in dropdown
#
def version_label_from_publication(pub):
return f'Version {pub.version} ({pub.datetime.date()})' if pub else 'Work in progress'
if surface.is_published:
original_surface = surface.publication.original_surface
context['this_version_label'] = version_label_from_publication(surface.publication)
context['publication_url'] = self.request.build_absolute_uri(surface.publication.get_absolute_url())
context['license_info'] = settings.CC_LICENSE_INFOS[surface.publication.license]
else:
original_surface = surface
context['this_version_label'] = version_label_from_publication(None)
publications = Publication.objects.filter(original_surface=original_surface).order_by('version')
version_dropdown_items = []
if self.request.user.has_perm('view_surface', original_surface):
# Only add link to original surface if user is allowed to view
version_dropdown_items.append({
'label': version_label_from_publication(None),
'surface': original_surface,
})
for pub in publications:
version_dropdown_items.append({
'label': version_label_from_publication(pub),
'surface': pub.surface,
})
context['version_dropdown_items'] = version_dropdown_items
version_badge_text = ''
if surface.is_published:
if context['this_version_label'] != version_dropdown_items[-1]['label']:
version_badge_text += 'Newer version available'
elif len(publications) > 0:
version_badge_text += 'Published versions available'
context['version_badge_text'] = version_badge_text
# add formats to show citations for
context['citation_flavors'] = [
('Text format with link', 'html', False), # title, flavor, use <pre><code>...</code></pre>
('RIS format', 'ris', True),
('BibTeX format', 'bibtex', True),
('BibLaTeX format', 'biblatex', True),
]
return context
class SurfaceUpdateView(UpdateView):
model = Surface
form_class = SurfaceForm
@surface_update_permission_required
def dispatch(self, request, *args, **kwargs):
return super().dispatch(request, *args, *kwargs)
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['autocomplete_tags'] = tags_for_user(self.request.user)
return kwargs
def form_valid(self, form):
surface = self.object
user = self.request.user
notification_msg = f"User {user} changed surface '{surface.name}'. Changed fields: {','.join(form.changed_data)}."
#
# notify other users
#
other_users = get_users_with_perms(surface).filter(~Q(id=user.id))
for u in other_users:
notify.send(sender=user, verb='change', target=surface,
recipient=u,
description=notification_msg,
href=reverse('manager:surface-detail', kwargs=dict(pk=surface.pk)))
return super().form_valid(form)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
surface = self.object
context['extra_tabs'] = [
{
'title': f"{surface.label}",
'icon': "gem",
'icon_style_prefix': 'far',
'href': reverse('manager:surface-detail', kwargs=dict(pk=surface.pk)),
'active': False,
'tooltip': f"Properties of surface '{surface.label}'"
},
{
'title': f"Edit surface",
'icon': "pencil",
'href': self.request.path,
'active': True,
'tooltip': f"Editing surface '{surface.label}'"
}
]
return context
def get_success_url(self):
return reverse('manager:surface-detail', kwargs=dict(pk=self.object.pk))
class SurfaceDeleteView(DeleteView):
model = Surface
context_object_name = 'surface'
success_url = reverse_lazy('manager:select')
@surface_delete_permission_required
def dispatch(self, request, *args, **kwargs):
return super().dispatch(request, *args, *kwargs)
def get_success_url(self):
user = self.request.user
surface = self.object
link = reverse('manager:select')
#
# notify other users
#
other_users = get_users_with_perms(surface).filter(~Q(id=user.id))
for u in other_users:
notify.send(sender=user, verb="delete",
recipient=u,
description=f"User '{user.name}' deleted surface '{surface.name}'.",
href=link)
return link
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
surface = self.object
#
# Add context needed for tabs
#
context['extra_tabs'] = [
{
'title': f"{surface.label}",
'icon': "gem",
'icon_style_prefix': 'far',
'href': reverse('manager:surface-detail', kwargs=dict(pk=surface.pk)),
'active': False,
'tooltip': f"Properties of surface '{surface.label}'"
},
{
'title': f"Delete Surface?",
'icon': "trash",
'href': self.request.path,
'active': True,
'tooltip': f"Conforming deletion of surface '{surface.label}'"
}
]
return context
class SurfaceShareView(FormMixin, DetailView):
model = Surface
context_object_name = 'surface'
template_name = "manager/share.html"
form_class = SurfaceShareForm
@surface_share_permission_required
def dispatch(self, request, *args, **kwargs):
return super().dispatch(request, *args, *kwargs)
def get_success_url(self):
return reverse('manager:surface-detail', kwargs=dict(pk=self.object.pk))
def post(self, request, *args, **kwargs):
self.object = self.get_object()
form = self.get_form()
if form.is_valid():
return self.form_valid(form)
else:
return self.form_invalid(form)
def form_valid(self, form):
if 'save' in self.request.POST:
users = form.cleaned_data.get('users', [])
allow_change = form.cleaned_data.get('allow_change', False)
surface = self.object
for user in users:
_log.info("Sharing surface {} with user {} (allow change? {}).".format(
surface.pk, user.username, allow_change))
surface.share(user, allow_change=allow_change)
#
# Notify user about the shared surface
#
notification_message = f"{self.request.user} has shared surface '{surface.name}' with you"
notify.send(self.request.user, recipient=user,
verb="share", # TODO Does verb follow activity stream defintions?
target=surface,
public=False,
description=notification_message,
href=surface.get_absolute_url())
if allow_change:
notify.send(self.request.user, recipient=user, verb="allow change",
target=surface, public=False,
description=f"""
You are allowed to change the surface '{surface.name}' shared by {self.request.user}
""",
href=surface.get_absolute_url())
return super().form_valid(form)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
surface = self.object
context['extra_tabs'] = [
{
'title': f"{surface.label}",
'icon': "gem",
'icon_style_prefix': 'far',
'href': reverse('manager:surface-detail', kwargs=dict(pk=surface.pk)),
'active': False,
'tooltip': f"Properties of surface '{surface.label}'"
},
{
'title': f"Share surface?",
'icon': "share-alt",
'href': self.request.path,
'active': True,
'tooltip': f"Sharing surface '{surface.label}'"
}
]
context['surface'] = surface
context['instance_label'] = surface.label
context['instance_type_label'] = "surface"
context['cancel_url'] = reverse('manager:surface-detail', kwargs=dict(pk=surface.pk))
return context
class PublicationsTable(tables.Table):
publication = tables.Column(linkify=True, verbose_name='Surface', order_by='surface__name')
num_topographies = tables.Column(verbose_name='# Measurements')
authors = tables.Column(verbose_name="Authors")
license = tables.Column(verbose_name="License")
datetime = tables.Column(verbose_name="Publication Date")
version = tables.Column(verbose_name="Version")
def render_publication(self, value):
return value.surface.name
def render_datetime(self, value):
return value.date()
def render_license(self, value, record):
return mark_safe(f"""
<a href="{settings.CC_LICENSE_INFOS[value]['description_url']}" target="_blank">
{record['publication'].get_license_display()}</a>
""")
class Meta:
orderable = True
class PublicationListView(ListView):
template_name = "manager/publication_list.html"
def get_queryset(self):
return Publication.objects.filter(publisher=self.request.user) # TODO move to publication app?
def get_context_data(self, *args, **kwargs):
context = super().get_context_data(*args, **kwargs)
#
# Create table cells
#
data = [
{
'publication': pub,
'surface': pub.surface,
'num_topographies': pub.surface.num_topographies(),
'authors': pub.authors,
'license': pub.license,
'datetime': pub.datetime,
'version': pub.version
} for pub in self.get_queryset()
]
context['publication_table'] = PublicationsTable(
data=data,
empty_text="You haven't published any surfaces yet.",
request=self.request)
return context
class SurfacePublishView(FormView):
template_name = "manager/surface_publish.html"
form_class = SurfacePublishForm
@surface_publish_permission_required
def dispatch(self, request, *args, **kwargs):
return super().dispatch(request, *args, *kwargs)
def _get_surface(self):
surface_pk = self.kwargs['pk']
return Surface.objects.get(pk=surface_pk)
def get_initial(self):
initial = super().get_initial()
initial['author_0'] = ''
initial['num_author_fields'] = 1
return initial
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
if self.request.method == 'POST':
# The field 'num_author_fields' may have been increased by
# Javascript (Vuejs) on the client in order to add new authors.
# This should be sent to the form in order to know
# how many fields the form should have and how many author names
# should be combined. So this is passed here:
kwargs['num_author_fields'] = int(self.request.POST.get('num_author_fields'))
return kwargs
def get_success_url(self):
return reverse('manager:publications')
def form_valid(self, form):
license = form.cleaned_data.get('license')
authors = form.cleaned_data.get('authors')
surface = self._get_surface()
try:
surface.publish(license, authors)
except NewPublicationTooFastException as exc:
return redirect("manager:surface-publication-rate-too-high",
pk=surface.pk)
return super().form_valid(form)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
surface = self._get_surface()
context['extra_tabs'] = [
{
'title': f"{surface.label}",
'icon': "gem",
'icon_style_prefix': 'far',
'href': reverse('manager:surface-detail', kwargs=dict(pk=surface.pk)),
'active': False,
'tooltip': f"Properties of surface '{surface.label}'"
},
{
'title': f"Publish surface?",
'icon': "bullhorn",
'href': self.request.path,
'active': True,
'tooltip': f"Publishing surface '{surface.label}'"
}
]
context['surface'] = surface
context['max_len_authors_field'] = MAX_LEN_AUTHORS_FIELD
return context
class PublicationRateTooHighView(TemplateView):
template_name = "manager/publication_rate_too_high.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['min_seconds'] = settings.MIN_SECONDS_BETWEEN_SAME_SURFACE_PUBLICATIONS
surface_pk = self.kwargs['pk']
surface = Surface.objects.get(pk=surface_pk)
context['extra_tabs'] = [
{
'title': f"{surface.label}",
'icon': "gem",
'icon_style_prefix': 'far',
'href': reverse('manager:surface-detail', kwargs=dict(pk=surface.pk)),
'active': False,
'tooltip': f"Properties of surface '{surface.label}'"
},
{
'title': f"Publication rate too high",
'icon': "flash",
'href': self.request.path,
'active': True,
}
]
return context
class SharingInfoTable(tables.Table):
surface = tables.Column(linkify=lambda **kwargs: kwargs['record']['surface'].get_absolute_url(),
accessor='surface__name')
num_topographies = tables.Column(verbose_name='# Measurements')
created_by = tables.Column(linkify=lambda **kwargs: kwargs['record']['created_by'].get_absolute_url(),
accessor='created_by__name')
shared_with = tables.Column(linkify=lambda **kwargs: kwargs['record']['shared_with'].get_absolute_url(),
accessor='shared_with__name')
allow_change = tables.BooleanColumn()
selected = tables.CheckBoxColumn(attrs={
'th__input': {'class': 'select-all-checkbox'},
'td__input': {'class': 'select-checkbox'},
})
def __init__(self, *args, **kwargs):
self._request = kwargs['request']
super().__init__(*args, **kwargs)
# def render_surface(self, value):
# return value.label
# def render_created_by(self, value):
# return self._render_user(value)
#def render_shared_with(self, value):
# return self._render_user(value)
#def _render_user(self, user):
# if self._request.user == user:
# return "You"
# return user.name
class Meta:
orderable = True
order_by = ('surface', 'shared_with')
def sharing_info(request):
if request.user.is_anonymous:
raise PermissionDenied()
#
# Handle POST request if any
#
if (request.method == "POST") and ('selected' in request.POST):
# only do sth if there is a selection
unshare = 'unshare' in request.POST
allow_change = 'allow_change' in request.POST
for s in request.POST.getlist('selected'):
# decode selection string
surface_id, share_with_user_id = s.split(',')
surface_id = int(surface_id)
share_with_user_id = int(share_with_user_id)
surface = Surface.objects.get(id=surface_id)
share_with = User.objects.get(id=share_with_user_id)
if request.user not in [share_with, surface.creator]:
# we don't allow to change shares if the request user is not involved
_log.warning(f"Changing share on surface {surface.id} not allowed for user {request.user}.")
continue
if unshare:
surface.unshare(share_with)
notify.send(sender=request.user, recipient=share_with, verb='unshare', public=False,
description=f"Surface '{surface.name}' from {request.user} is no longer shared with you",
href=reverse('manager:sharing-info'))
elif allow_change and (request.user == surface.creator): # only allow change for surface creator
surface.share(share_with, allow_change=True)
notify.send(sender=request.user, recipient=share_with, verb='allow change', target=surface,
public=False,
description=f"{request.user} has given you permissions to change surface '{surface.name}'",
href=surface.get_absolute_url())
#
# Collect information to display
#
# Get all surfaces, which are visible, but exclude the published surfaces
surfaces = get_objects_for_user(request.user, 'view_surface', klass=Surface).filter(publication=None)
tmp = []
for s in surfaces:
surface_perms = get_users_with_perms(s, attach_perms=True)
# is now a dict of the form
# <User: joe>: ['view_surface'], <User: dan>: ['view_surface', 'change_surface']}
surface_users = sorted(surface_perms.keys(), key=lambda u: u.name if u else '')
for u in surface_users:
# Leave out these shares:
#
# - share of a user with himself as creator (trivial)
# - ignore user if anonymous
# - shares where the request user is not involved
#
if (u != s.creator) and (not u.is_anonymous) and \
((u == request.user) or (s.creator == request.user)):
allow_change = ('change_surface' in surface_perms[u])
tmp.append((s, u, allow_change))
#
# Create table cells
#
data = [
{
'surface': surface,
'num_topographies': surface.num_topographies(),
'created_by': surface.creator,
'shared_with': shared_with,
'allow_change': allow_change,
'selected': "{},{}".format(surface.id, shared_with.id),
} for surface, shared_with, allow_change in tmp
]
#
# Build table and render result
#
sharing_info_table = SharingInfoTable(data=data,
empty_text="No surfaces shared by or with you.",
request=request)
RequestConfig(request).configure(sharing_info_table)
# sharing_info_table.order_by('num_topographies')
return render(request,
template_name='manager/sharing_info.html',
context={'sharing_info_table': sharing_info_table})
def download_surface(request, surface_id):
"""Returns a file comprised from topographies contained in a surface.
:param request:
:param surface_id: surface id
:return:
"""
#
# Check existence and permissions for given surface
#
try:
surface = Surface.objects.get(id=surface_id)
except Surface.DoesNotExist:
raise PermissionDenied()
if not request.user.has_perm('view_surface', surface):
raise PermissionDenied()
content_data = None
#
# If the surface has been published, there might be a container file already.
# If yes:
# Is there already a container?
# Then it instead of creating a new container.from
# If no, save the container in the publication later.
# If no: create a container for this surface on the fly
#
renew_publication_container = False
if surface.is_published:
pub = surface.publication
# noinspection PyBroadException
try:
with pub.container.open() as cf:
content_data = cf.read()
_log.debug(f"Read container for published surface {pub.short_url} from storage.")
except Exception: # not interested here, why it fails
renew_publication_container = True
if content_data is None:
container_bytes = BytesIO()
_log.info(f"Preparing container of surface id={surface_id} for download..")
write_surface_container(container_bytes, [surface], request=request)
content_data = container_bytes.getvalue()
if renew_publication_container:
try:
container_bytes.seek(0)
_log.info(f"Saving container for publication with URL {pub.short_url} to storage for later..")
pub.container.save(pub.container_storage_path, container_bytes)
except (OSError, BlockingIOError) as exc:
_log.error(f"Cannot save container for publication {pub.short_url} to storage. "
f"Reason: {exc}")
# Prepare response object.
response = HttpResponse(content_data,
content_type='application/x-zip-compressed')
response['Content-Disposition'] = 'attachment; filename="{}"'.format('surface.zip')
increase_statistics_by_date_and_object(Metric.objects.SURFACE_DOWNLOAD_COUNT,
period=Period.DAY, obj=surface)
return response
def download_selection_as_surfaces(request):
"""Returns a file comprised from surfaces related to the selection.
:param request: current request
:return:
"""
from .utils import current_selection_as_surface_list
surfaces = current_selection_as_surface_list(request)
container_bytes = BytesIO()
write_surface_container(container_bytes, surfaces, request=request)
# Prepare response object.
response = HttpResponse(container_bytes.getvalue(),
content_type='application/x-zip-compressed')
response['Content-Disposition'] = 'attachment; filename="{}"'.format('surface.zip')
# increase download count for each surface
for surf in surfaces:
increase_statistics_by_date_and_object(Metric.objects.SURFACE_DOWNLOAD_COUNT,
period=Period.DAY, obj=surf)
return response
#######################################################################################
# Views for REST interface
#######################################################################################
class SurfaceSearchPaginator(PageNumberPagination):
page_size = DEFAULT_PAGE_SIZE
page_query_param = 'page'
page_size_query_param = 'page_size'
max_page_size = MAX_PAGE_SIZE
def get_paginated_response(self, data):
#
# Save information about requested data in session
#
session = self.request.session
select_tab_state = session.get('select_tab_state', DEFAULT_SELECT_TAB_STATE.copy())
# not using the keyword argument "default" here, because in some tests,
# the session is a simple dict and no real session dict. A simple
# dict's .get() has no keyword argument 'default', although it can be given
# as second parameter.
select_tab_state['search_term'] = get_search_term(self.request)
select_tab_state['category'] = get_category(self.request)
select_tab_state['sharing_status'] = get_sharing_status(self.request)
select_tab_state['tree_mode'] = get_tree_mode(self.request)
page_size = self.get_page_size(self.request)
select_tab_state[self.page_size_query_param] = page_size
select_tab_state['current_page'] = self.page.number
_log.debug("Setting select tab state set in paginator: %s", select_tab_state)
session['select_tab_state'] = select_tab_state
return Response({
'num_items': self.page.paginator.count,
'num_pages': self.page.paginator.num_pages,
'page_range': list(self.page.paginator.page_range),
'page_urls': list(self.get_page_urls()),
'current_page': self.page.number,
'num_items_on_current_page': len(self.page.object_list),
'page_size': page_size,
'search_term': select_tab_state['search_term'],
'category': select_tab_state['category'],
'sharing_status': select_tab_state['sharing_status'],
'tree_mode': select_tab_state['tree_mode'],
'page_results': data
})
def get_page_urls(self):
base_url = self.request.build_absolute_uri()
urls = []
for page_no in self.page.paginator.page_range:
if page_no == 1:
url = remove_query_param(base_url, self.page_query_param)
else:
url = replace_query_param(base_url, self.page_query_param, page_no)
# always add page size, so requests for other pages have it
url = replace_query_param(url, self.page_size_query_param, self.get_page_size(self.request))
urls.append(url)
return urls
class TagTreeView(ListAPIView):
"""
Generate tree of tags with surfaces and topographies underneath.
"""
serializer_class = TagSerializer
pagination_class = SurfaceSearchPaginator
def get_queryset(self):
surfaces = filtered_surfaces(self.request)
topographies = filtered_topographies(self.request, surfaces)
return tags_for_user(self.request.user, surfaces, topographies).filter(parent=None)
# Only top level are collected, the children are added in the serializer.
#
# TODO The filtered surfaces and topographies are calculated twice here, not sure how to circumvent this.
# Maybe by caching with request argument?
def get_serializer_context(self):
context = super().get_serializer_context()
context['selected_instances'] = selected_instances(self.request)
context['request'] = self.request
surfaces = filtered_surfaces(self.request)
topographies = filtered_topographies(self.request, surfaces)
tags = tags_for_user(self.request.user, surfaces, topographies)
context['tags_for_user'] = tags
#
# also pass filtered surfaces and topographies the user has access to
#
context['surfaces'] = surfaces
context['topographies'] = topographies
return context
class SurfaceListView(ListAPIView):
"""
List all surfaces with topographies underneath.
"""
serializer_class = SurfaceSerializer
pagination_class = SurfaceSearchPaginator
def get_queryset(self):
return filtered_surfaces(self.request)
def get_serializer_context(self):
context = super().get_serializer_context()
context['selected_instances'] = selected_instances(self.request)
context['request'] = self.request
return context
def _selection_set(request):
return set(request.session.get('selection', []))
def _surface_key(pk): # TODO use such a function everywhere: instance_key_for_selection()
return 'surface-{}'.format(pk)
def _topography_key(pk):
return 'topography-{}'.format(pk)
def _tag_key(pk):
return 'tag-{}'.format(pk)
def set_surface_select_status(request, pk, select_status):
"""Marks the given surface as 'selected' in session or checks this.
:param request: request
:param pk: primary key of the surface
:param select_status: True if surface should be selected, False if it should be unselected
:return: JSON Response
The response returns the current selection as suitable for the basket.
"""
try:
pk = int(pk)
surface = Surface.objects.get(pk=pk)
assert request.user.has_perm('view_surface', surface)
except (ValueError, Surface.DoesNotExist, AssertionError):
raise PermissionDenied() # This should be shown independent of whether the surface exists
surface_key = _surface_key(pk)
selection = _selection_set(request)
is_selected = surface_key in selection
if request.method == 'POST':
if select_status:
# surface should be selected
selection.add(surface_key)
elif is_selected:
selection.remove(surface_key)
request.session['selection'] = list(selection)
data = current_selection_as_basket_items(request)
return Response(data)
@api_view(['POST'])
def select_surface(request, pk):
"""Marks the given surface as 'selected' in session.
:param request: request
:param pk: primary key of the surface
:return: JSON Response
The response returns the current selection as suitable for the basket.
"""
return set_surface_select_status(request, pk, True)
@api_view(['POST'])
def unselect_surface(request, pk):
"""Marks the given surface as 'unselected' in session.
:param request: request
:param pk: primary key of the surface
:return: JSON Response
The response returns the current selection as suitable for the basket.
"""
return set_surface_select_status(request, pk, False)
def set_topography_select_status(request, pk, select_status):
"""Marks the given topography as 'selected' or 'unselected' in session.
:param request: request
:param pk: primary key of the surface
:param select_status: True or False, True means "mark as selected", False means "mark as unselected"
:return: JSON Response
The response returns the current selection as suitable for the basket.
"""
try:
pk = int(pk)
topo = Topography.objects.get(pk=pk)
assert request.user.has_perm('view_surface', topo.surface)
except (ValueError, Topography.DoesNotExist, AssertionError):
raise PermissionDenied() # This should be shown independent of whether the surface exists
topography_key = _topography_key(pk)
selection = _selection_set(request)
is_selected = topography_key in selection
if request.method == 'POST':
if select_status:
# topography should be selected
selection.add(topography_key)
elif is_selected:
selection.remove(topography_key)
request.session['selection'] = list(selection)
data = current_selection_as_basket_items(request)
return Response(data)
@api_view(['POST'])
def select_topography(request, pk):
"""Marks the given topography as 'selected' in session.
:param request: request
:param pk: primary key of the surface
:return: JSON Response
The response returns the current selection as suitable for the basket.
"""
return set_topography_select_status(request, pk, True)
@api_view(['POST'])
def unselect_topography(request, pk):
"""Marks the given topography as 'selected' in session.
:param request: request
:param pk: primary key of the surface
:return: JSON Response
The response returns the current selection as suitable for the basket.
"""
return set_topography_select_status(request, pk, False)
def set_tag_select_status(request, pk, select_status):
"""Marks the given tag as 'selected' in session or checks this.
:param request: request
:param pk: primary key of the tag
:param select_status: True if tag should be selected, False if it should be unselected
:return: JSON Response
The response returns the current selection as suitable for the basket.
"""
try:
pk = int(pk)
tag = TagModel.objects.get(pk=pk)
except ValueError:
raise PermissionDenied()
if not tag in tags_for_user(request.user):
raise PermissionDenied()
tag_key = _tag_key(pk)
selection = _selection_set(request)
is_selected = tag_key in selection
if request.method == 'POST':
if select_status:
# tag should be selected
selection.add(tag_key)
elif is_selected:
selection.remove(tag_key)
request.session['selection'] = list(selection)
data = current_selection_as_basket_items(request)
return Response(data)
@api_view(['POST'])
def select_tag(request, pk):
"""Marks the given tag as 'selected' in session.
:param request: request
:param pk: primary key of the tag
:return: JSON Response
The response returns the current selection as suitable for the basket.
"""
return set_tag_select_status(request, pk, True)
@api_view(['POST'])
def unselect_tag(request, pk):
"""Marks the given tag as 'unselected' in session.
:param request: request
:param pk: primary key of the tag
:return: JSON Response
The response returns the current selection as suitable for the basket.
"""
return set_tag_select_status(request, pk, False)
@api_view(['POST'])
def unselect_all(request):
"""Removes all selections from session.
:param request: request
:return: empty list as JSON Response
"""
request.session['selection'] = []
return Response([])
def thumbnail(request, pk):
"""Returns image data for a topography thumbail
Parameters
----------
request
Returns
-------
HTML Response with image data
"""
try:
pk = int(pk)
except ValueError:
raise Http404()
try:
topo = Topography.objects.get(pk=pk)
except Topography.DoesNotExist:
raise Http404()
if not request.user.has_perm('view_surface', topo.surface):
raise PermissionDenied()
# okay, we have a valid topography and the user is allowed to see it
image = topo.thumbnail
response = HttpResponse(content_type="image/png")
try:
response.write(image.file.read())
except Exception as exc:
_log.warning("Cannot load thumbnail for topography %d. Reason: %s", topo.id, exc)
# return some default image so the client gets sth in any case
with staticfiles_storage.open('images/thumbnail_unavailable.png', mode='rb') as img_file:
response.write(img_file.read())
return response
| 45,573
| 8,545
| 782
|
bbd02b18aafbd116aa8ea088f0e6678bdd955802
| 851
|
py
|
Python
|
server/main.py
|
yuweiliandrew/openrtist
|
4b6b17e77587751593d5e529b154e60513de3236
|
[
"Apache-2.0"
] | null | null | null |
server/main.py
|
yuweiliandrew/openrtist
|
4b6b17e77587751593d5e529b154e60513de3236
|
[
"Apache-2.0"
] | null | null | null |
server/main.py
|
yuweiliandrew/openrtist
|
4b6b17e77587751593d5e529b154e60513de3236
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
from gabriel_server.network_engine import server_runner
import logging
import argparse
import importlib
DEFAULT_PORT = 9099
DEFAULT_NUM_TOKENS = 2
INPUT_QUEUE_MAXSIZE = 60
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
if __name__ == "__main__":
main()
| 25.029412
| 99
| 0.720329
|
#!/usr/bin/env python3
from gabriel_server.network_engine import server_runner
import logging
import argparse
import importlib
DEFAULT_PORT = 9099
DEFAULT_NUM_TOKENS = 2
INPUT_QUEUE_MAXSIZE = 60
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
"-t", "--tokens", type=int, default=DEFAULT_NUM_TOKENS,
help="number of tokens")
parser.add_argument(
"-p", "--port", type=int, default=DEFAULT_PORT, help="Set port number")
args = parser.parse_args()
server_runner.run(websocket_port=args.port, zmq_address='tcp://*:5555', num_tokens=args.tokens,
input_queue_maxsize=INPUT_QUEUE_MAXSIZE)
if __name__ == "__main__":
main()
| 512
| 0
| 23
|
4845208dc2008cff6748df7a14a9be766a0ca341
| 891
|
py
|
Python
|
tests/models/product/product_relation_types/test_model.py
|
simonsobs/acondbs
|
6ca11c2889d827ecdb2b54d0cf3b94b8cdd281e6
|
[
"MIT"
] | null | null | null |
tests/models/product/product_relation_types/test_model.py
|
simonsobs/acondbs
|
6ca11c2889d827ecdb2b54d0cf3b94b8cdd281e6
|
[
"MIT"
] | 24
|
2020-04-02T19:29:07.000Z
|
2022-03-08T03:05:43.000Z
|
tests/models/product/product_relation_types/test_model.py
|
simonsobs/acondbs
|
6ca11c2889d827ecdb2b54d0cf3b94b8cdd281e6
|
[
"MIT"
] | 1
|
2020-04-08T15:48:28.000Z
|
2020-04-08T15:48:28.000Z
|
from acondbs.db.sa import sa
from acondbs.models import ProductRelationType
##__________________________________________________________________||
##__________________________________________________________________||
| 28.741935
| 74
| 0.65881
|
from acondbs.db.sa import sa
from acondbs.models import ProductRelationType
##__________________________________________________________________||
def test_column(app_empty):
app = app_empty
with app.app_context():
model = ProductRelationType(
name="parent",
indef_article="a",
singular="parent",
plural="parents",
)
sa.session.add(model)
sa.session.commit()
assert model.type_id
type_id = model.type_id
with app.app_context():
model = ProductRelationType.query.filter_by(type_id=type_id).one()
assert model.type_id == type_id
assert model.name == "parent"
assert model.indef_article == "a"
assert model.singular == "parent"
assert model.plural == "parents"
##__________________________________________________________________||
| 647
| 0
| 22
|
1c9ae6c11296e6a1e6f7c38c20040bce04faf76f
| 2,330
|
py
|
Python
|
examples/my_line_rules.py
|
l0nax/gitlint
|
df84afed929af588191aa93534842fab80e117dd
|
[
"MIT"
] | null | null | null |
examples/my_line_rules.py
|
l0nax/gitlint
|
df84afed929af588191aa93534842fab80e117dd
|
[
"MIT"
] | null | null | null |
examples/my_line_rules.py
|
l0nax/gitlint
|
df84afed929af588191aa93534842fab80e117dd
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from gitlint.rules import LineRule, RuleViolation, CommitMessageTitle
from gitlint.options import ListOption
"""
Full details on user-defined rules: https://jorisroovers.com/gitlint/user_defined_rules
The SpecialChars class below is an example of a user-defined LineRule. Line rules are gitlint rules that only act on a
single line at once. Once the rule is discovered, gitlint will automatically take care of applying this rule
against each line of the commit message title or body (whether it is applied to the title or body is determined by the
`target` attribute of the class).
A LineRule contrasts with a CommitRule (see examples/my_commit_rules.py) in that a commit rule is only applied once on
an entire commit. This allows commit rules to implement more complex checks that span multiple lines and/or checks
that should only be done once per gitlint run.
While every LineRule can be implemented as a CommitRule, it's usually easier and more concise to go with a LineRule if
that fits your needs.
"""
class SpecialChars(LineRule):
""" This rule will enforce that the commit message title does not contain any of the following characters:
$^%@!*() """
# A rule MUST have a human friendly name
name = "title-no-special-chars"
# A rule MUST have a *unique* id, we recommend starting with UL (for User-defined Line-rule), but this can
# really be anything.
id = "UL1"
# A line-rule MUST have a target (not required for CommitRules).
target = CommitMessageTitle
# A rule MAY have an option_spec if its behavior should be configurable.
options_spec = [ListOption('special-chars', ['$', '^', '%', '@', '!', '*', '(', ')'],
"Comma separated list of characters that should not occur in the title")]
| 44.807692
| 118
| 0.704292
|
# -*- coding: utf-8 -*-
from gitlint.rules import LineRule, RuleViolation, CommitMessageTitle
from gitlint.options import ListOption
"""
Full details on user-defined rules: https://jorisroovers.com/gitlint/user_defined_rules
The SpecialChars class below is an example of a user-defined LineRule. Line rules are gitlint rules that only act on a
single line at once. Once the rule is discovered, gitlint will automatically take care of applying this rule
against each line of the commit message title or body (whether it is applied to the title or body is determined by the
`target` attribute of the class).
A LineRule contrasts with a CommitRule (see examples/my_commit_rules.py) in that a commit rule is only applied once on
an entire commit. This allows commit rules to implement more complex checks that span multiple lines and/or checks
that should only be done once per gitlint run.
While every LineRule can be implemented as a CommitRule, it's usually easier and more concise to go with a LineRule if
that fits your needs.
"""
class SpecialChars(LineRule):
""" This rule will enforce that the commit message title does not contain any of the following characters:
$^%@!*() """
# A rule MUST have a human friendly name
name = "title-no-special-chars"
# A rule MUST have a *unique* id, we recommend starting with UL (for User-defined Line-rule), but this can
# really be anything.
id = "UL1"
# A line-rule MUST have a target (not required for CommitRules).
target = CommitMessageTitle
# A rule MAY have an option_spec if its behavior should be configurable.
options_spec = [ListOption('special-chars', ['$', '^', '%', '@', '!', '*', '(', ')'],
"Comma separated list of characters that should not occur in the title")]
def validate(self, line, _commit):
self.log.debug("SpecialChars: This line will be visible when running `gitlint --debug`")
violations = []
# options can be accessed by looking them up by their name in self.options
for char in self.options['special-chars'].value:
if char in line:
violation = RuleViolation(self.id, "Title contains the special character '{0}'".format(char), line)
violations.append(violation)
return violations
| 492
| 0
| 27
|
4581b4010635dca053e4b3dabc1023c2a4d1faf8
| 2,117
|
py
|
Python
|
src/subsystems/backarm.py
|
quis345/DeepSpace2019
|
917d120c4173e8085578c3fc7f98ecfffd8b6e5b
|
[
"MIT"
] | null | null | null |
src/subsystems/backarm.py
|
quis345/DeepSpace2019
|
917d120c4173e8085578c3fc7f98ecfffd8b6e5b
|
[
"MIT"
] | null | null | null |
src/subsystems/backarm.py
|
quis345/DeepSpace2019
|
917d120c4173e8085578c3fc7f98ecfffd8b6e5b
|
[
"MIT"
] | null | null | null |
import ctre
from wpilib import SmartDashboard as Dash
from wpilib.command import Subsystem
from constants import Constants
from utils import singleton, units, lazytalonsrx, pidf
class BackArm(Subsystem, metaclass=singleton.Singleton):
"""The back arm subsystem controls the back arm motors and encoders."""
def init(self):
"""Initialize the back arm motors. This is not in the constructor to make the calling explicit in the robotInit to the robot simulator."""
self.s_motor = lazytalonsrx.LazyTalonSRX(Constants.BS_MOTOR_ID)
self.m_motor = lazytalonsrx.LazyTalonSRX(Constants.BM_MOTOR_ID)
self.s_motor.initialize(
inverted=True, encoder=True, phase=False, name="Back Arm Slave")
self.m_motor.initialize(
inverted=False, encoder=True, phase=False, name="Back Arm Master")
self.s_motor.follow(self.m_motor)
self.initPIDF()
def initPIDF(self):
"""Initialize the arm motor pidf gains."""
self.m_motor.setMotionMagicConfig(
Constants.BACK_ARM_CRUISE_VELOCITY * (192) * (10/360), Constants.BACK_ARM_ACCELERATION * (192) * (10/360))
self.m_motor.setPIDF(0, Constants.BACK_ARM_KP, Constants.BACK_ARM_KI,
Constants.BACK_ARM_KD, Constants.BACK_ARM_KF)
def zeroSensors(self):
"""Set the encoder positions to 0."""
self.m_motor.zero()
def getAngle(self):
"""Get the angle of the arm in degrees."""
return self.m_motor.getPosition() / (192) / (10/360)
def setAngle(self, angle):
"""Set the angle of the arm in degrees."""
ticks = angle * (192) * (10/360)
self.m_motor.setMotionMagicSetpoint(ticks)
| 36.5
| 146
| 0.66462
|
import ctre
from wpilib import SmartDashboard as Dash
from wpilib.command import Subsystem
from constants import Constants
from utils import singleton, units, lazytalonsrx, pidf
class BackArm(Subsystem, metaclass=singleton.Singleton):
"""The back arm subsystem controls the back arm motors and encoders."""
def __init__(self):
super().__init__()
def init(self):
"""Initialize the back arm motors. This is not in the constructor to make the calling explicit in the robotInit to the robot simulator."""
self.s_motor = lazytalonsrx.LazyTalonSRX(Constants.BS_MOTOR_ID)
self.m_motor = lazytalonsrx.LazyTalonSRX(Constants.BM_MOTOR_ID)
self.s_motor.initialize(
inverted=True, encoder=True, phase=False, name="Back Arm Slave")
self.m_motor.initialize(
inverted=False, encoder=True, phase=False, name="Back Arm Master")
self.s_motor.follow(self.m_motor)
self.initPIDF()
def initPIDF(self):
"""Initialize the arm motor pidf gains."""
self.m_motor.setMotionMagicConfig(
Constants.BACK_ARM_CRUISE_VELOCITY * (192) * (10/360), Constants.BACK_ARM_ACCELERATION * (192) * (10/360))
self.m_motor.setPIDF(0, Constants.BACK_ARM_KP, Constants.BACK_ARM_KI,
Constants.BACK_ARM_KD, Constants.BACK_ARM_KF)
def zeroSensors(self):
"""Set the encoder positions to 0."""
self.m_motor.zero()
def outputToDashboard(self):
self.s_motor.outputToDashboard()
self.m_motor.outputToDashboard()
Dash.putNumber("Back Arm Angle", self.getAngle())
def getAngle(self):
"""Get the angle of the arm in degrees."""
return self.m_motor.getPosition() / (192) / (10/360)
def setAngle(self, angle):
"""Set the angle of the arm in degrees."""
ticks = angle * (192) * (10/360)
self.m_motor.setMotionMagicSetpoint(ticks)
def periodic(self):
self.outputToDashboard()
def reset(self):
self.m_motor.setPercentOutput(0)
self.zeroSensors()
self.initPIDF()
| 290
| 0
| 108
|
cd830753bb5761a968e2d3653e6ef40be79f29eb
| 11,760
|
py
|
Python
|
layers/densenet.py
|
IlikeBB/Object-Detection-for-M-NBI
|
650fa1ca7b8860785f0a838dab0301a9cba121d6
|
[
"MIT"
] | null | null | null |
layers/densenet.py
|
IlikeBB/Object-Detection-for-M-NBI
|
650fa1ca7b8860785f0a838dab0301a9cba121d6
|
[
"MIT"
] | null | null | null |
layers/densenet.py
|
IlikeBB/Object-Detection-for-M-NBI
|
650fa1ca7b8860785f0a838dab0301a9cba121d6
|
[
"MIT"
] | null | null | null |
import re
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as cp
from collections import OrderedDict
import torch.utils.model_zoo as model_zoo
__all__ = ['DenseNet', 'densenet50', 'densenet121', 'densenet169', 'densenet201', 'densenet161']
model_urls = {
'densenet121': 'https://download.pytorch.org/models/densenet121-a639ec97.pth',
'densenet169': 'https://download.pytorch.org/models/densenet169-b2777c0a.pth',
'densenet201': 'https://download.pytorch.org/models/densenet201-c1103571.pth',
'densenet161': 'https://download.pytorch.org/models/densenet161-8d451a50.pth',
}
class DenseNet(nn.Module):
r"""Densenet-BC model class, based on
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_
Args:
growth_rate (int) - how many filters to add each layer (`k` in paper)
block_config (list of 4 ints) - how many layers in each pooling block
num_init_features (int) - the number of filters to learn in the first convolution layer
bn_size (int) - multiplicative factor for number of bottle neck layers
(i.e. bn_size * k features in the bottleneck layer)
drop_rate (float) - dropout rate after each dense layer
num_classes (int) - number of classification classes
memory_efficient (bool) - If True, uses checkpointing. Much more memory efficient,
but slower. Default: *False*. See `"paper" <https://arxiv.org/pdf/1707.06990.pdf>`_
"""
def densenet50(pretrained=False, progress=True, **kwargs):
r"""Densenet-121 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
memory_efficient (bool) - If True, uses checkpointing. Much more memory efficient,
but slower. Default: *False*. See `"paper" <https://arxiv.org/pdf/1707.06990.pdf>`_
"""
return _densenet('densenet50', 32, (6, 12, 24, 16), 64, pretrained, progress,
**kwargs)
def densenet121(pretrained=False, progress=True, **kwargs):
r"""Densenet-121 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
memory_efficient (bool) - If True, uses checkpointing. Much more memory efficient,
but slower. Default: *False*. See `"paper" <https://arxiv.org/pdf/1707.06990.pdf>`_
"""
return _densenet('densenet121', 32, (6, 12, 24, 16), 64, pretrained, progress,
**kwargs)
def densenet161(pretrained=False, progress=True, **kwargs):
r"""Densenet-161 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
memory_efficient (bool) - If True, uses checkpointing. Much more memory efficient,
but slower. Default: *False*. See `"paper" <https://arxiv.org/pdf/1707.06990.pdf>`_
"""
return _densenet('densenet161', 48, (6, 12, 36, 24), 96, pretrained, progress,
**kwargs)
def densenet169(pretrained=False, progress=True, **kwargs):
r"""Densenet-169 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
memory_efficient (bool) - If True, uses checkpointing. Much more memory efficient,
but slower. Default: *False*. See `"paper" <https://arxiv.org/pdf/1707.06990.pdf>`_
"""
return _densenet('densenet169', 32, (6, 12, 32, 32), 64, pretrained, progress,
**kwargs)
def densenet201(pretrained=False, progress=True, **kwargs):
r"""Densenet-201 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
memory_efficient (bool) - If True, uses checkpointing. Much more memory efficient,
but slower. Default: *False*. See `"paper" <https://arxiv.org/pdf/1707.06990.pdf>`_
"""
return _densenet('densenet201', 32, (6, 12, 48, 32), 64, pretrained, progress,
**kwargs)
| 44.714829
| 192
| 0.63835
|
import re
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as cp
from collections import OrderedDict
import torch.utils.model_zoo as model_zoo
__all__ = ['DenseNet', 'densenet50', 'densenet121', 'densenet169', 'densenet201', 'densenet161']
model_urls = {
'densenet121': 'https://download.pytorch.org/models/densenet121-a639ec97.pth',
'densenet169': 'https://download.pytorch.org/models/densenet169-b2777c0a.pth',
'densenet201': 'https://download.pytorch.org/models/densenet201-c1103571.pth',
'densenet161': 'https://download.pytorch.org/models/densenet161-8d451a50.pth',
}
def _bn_function_factory(norm, relu, conv):
def bn_function(*inputs):
concated_features = torch.cat(inputs, 1)
bottleneck_output = conv(relu(norm(concated_features)))
return bottleneck_output
return bn_function
class _DenseLayer(nn.Sequential):
def __init__(self, num_input_features, growth_rate, bn_size, drop_rate, memory_efficient=False):
super(_DenseLayer, self).__init__()
self.add_module('norm1', nn.BatchNorm2d(num_input_features)),
self.add_module('relu1', nn.LeakyReLU(inplace=True)),
self.add_module('conv1', nn.Conv2d(num_input_features, bn_size *
growth_rate, kernel_size=1, stride=1,
bias=False)),
self.add_module('norm2', nn.BatchNorm2d(bn_size * growth_rate)),
self.add_module('relu2', nn.LeakyReLU(inplace=True)),
self.add_module('conv2', nn.Conv2d(bn_size * growth_rate, growth_rate,
kernel_size=3, stride=1, padding=1,
bias=False)),
self.drop_rate = drop_rate
self.memory_efficient = memory_efficient
def forward(self, *prev_features):
bn_function = _bn_function_factory(self.norm1, self.relu1, self.conv1)
if self.memory_efficient and any(prev_feature.requires_grad for prev_feature in prev_features):
bottleneck_output = cp.checkpoint(bn_function, *prev_features)
else:
bottleneck_output = bn_function(*prev_features)
new_features = self.conv2(self.relu2(self.norm2(bottleneck_output)))
if self.drop_rate > 0:
new_features = F.dropout(new_features, p=self.drop_rate,
training=self.training)
return new_features
class _DenseBlock(nn.Module):
def __init__(self, num_layers, num_input_features, bn_size, growth_rate, drop_rate, memory_efficient=False):
super(_DenseBlock, self).__init__()
for i in range(num_layers):
layer = _DenseLayer(
num_input_features + i * growth_rate,
growth_rate=growth_rate,
bn_size=bn_size,
drop_rate=drop_rate,
memory_efficient=memory_efficient,
)
self.add_module('denselayer%d' % (i + 1), layer)
def forward(self, init_features):
features = [init_features]
for name, layer in self.named_children():
new_features = layer(*features)
features.append(new_features)
return torch.cat(features, 1)
class DenseNet(nn.Module):
r"""Densenet-BC model class, based on
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_
Args:
growth_rate (int) - how many filters to add each layer (`k` in paper)
block_config (list of 4 ints) - how many layers in each pooling block
num_init_features (int) - the number of filters to learn in the first convolution layer
bn_size (int) - multiplicative factor for number of bottle neck layers
(i.e. bn_size * k features in the bottleneck layer)
drop_rate (float) - dropout rate after each dense layer
num_classes (int) - number of classification classes
memory_efficient (bool) - If True, uses checkpointing. Much more memory efficient,
but slower. Default: *False*. See `"paper" <https://arxiv.org/pdf/1707.06990.pdf>`_
"""
def __init__(self, growth_rate=32, block_config=(6, 12, 24, 16),
num_init_features=64, bn_size=4, drop_rate=0, num_classes=1000, memory_efficient=False):
super(DenseNet, self).__init__()
# First convolution
self.features = nn.Sequential(OrderedDict([
('conv0', nn.Conv2d(3, num_init_features, kernel_size=7, stride=2,
padding=3, bias=False)),
('norm0', nn.BatchNorm2d(num_init_features)),
('relu0', nn.LeakyReLU(inplace=True)),
('pool0', nn.MaxPool2d(kernel_size=3, stride=2, padding=1)),
]))
# Each denseblock
num_features = num_init_features
self.trans = []
self.layer1 = _DenseBlock(num_layers=block_config[0], num_input_features=num_features, bn_size=bn_size, growth_rate=growth_rate, drop_rate=drop_rate, memory_efficient=memory_efficient)
num_features = num_features + block_config[0] * growth_rate
self.bn1=nn.BatchNorm2d(num_features)
self.relu1=nn.LeakyReLU(inplace=True)
self.conv1=nn.Conv2d(num_features, num_features//2, kernel_size=1, stride=1, bias=False)
self.avg1=nn.AvgPool2d(kernel_size=2, stride=2)
num_features = num_features // 2
self.layer2 = _DenseBlock(num_layers=block_config[1], num_input_features=num_features, bn_size=bn_size, growth_rate=growth_rate, drop_rate=drop_rate, memory_efficient=memory_efficient)
num_features = num_features + block_config[1] * growth_rate
self.bn2=nn.BatchNorm2d(num_features)
self.relu2=nn.LeakyReLU(inplace=True)
self.conv2=nn.Conv2d(num_features, num_features//2, kernel_size=1, stride=1, bias=False)
self.avg2=nn.AvgPool2d(kernel_size=2, stride=1)
num_features = num_features // 2
self.layer3 = _DenseBlock(num_layers=block_config[2], num_input_features=num_features, bn_size=bn_size, growth_rate=growth_rate, drop_rate=drop_rate, memory_efficient=memory_efficient)
num_features = num_features + block_config[2] * growth_rate
self.bn3=nn.BatchNorm2d(num_features)
self.relu3=nn.LeakyReLU(inplace=True)
self.conv3=nn.Conv2d(num_features, num_features//2, kernel_size=1, stride=1, bias=False)
self.avg3=nn.AvgPool2d(kernel_size=2, stride=2)
num_features = num_features // 2
self.layer4 = _DenseBlock(num_layers=block_config[3], num_input_features=num_features, bn_size=bn_size, growth_rate=growth_rate, drop_rate=drop_rate, memory_efficient=memory_efficient)
# Final batch norm
self.bn4 = nn.BatchNorm2d(num_features)
# Linear layer
self.classifier = nn.Linear(num_features, num_classes)
# Official init from torch repo.
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.constant_(m.bias, 0)
def forward(self, x, out_inds=[2,4]):
x = self.features(x)
out = []
# import pdb
# pdb.set_trace()
for i,_layer in enumerate([self.layer1,self.layer2,self.layer3,self.layer4]):
x = _layer(x)
if i+1 in out_inds:
out.append(x)
if i == 0:
x = self.bn1(x)
x = self.relu1(x)
x = self.conv1(x)
x = self.avg1(x)
elif i == 1:
x = self.bn2(x)
x = self.relu2(x)
x = self.conv2(x)
# x = self.avg2(x)
elif i == 2:
x = self.bn3(x)
x = self.relu3(x)
x = self.conv3(x)
x = self.avg3(x)
return out
def _densenet(arch, growth_rate, block_config, num_init_features, pretrained, progress,
**kwargs):
model = DenseNet(growth_rate, block_config, num_init_features, **kwargs)
if pretrained:
_load_state_dict(model, model_urls[arch], progress)
return model
def densenet50(pretrained=False, progress=True, **kwargs):
r"""Densenet-121 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
memory_efficient (bool) - If True, uses checkpointing. Much more memory efficient,
but slower. Default: *False*. See `"paper" <https://arxiv.org/pdf/1707.06990.pdf>`_
"""
return _densenet('densenet50', 32, (6, 12, 24, 16), 64, pretrained, progress,
**kwargs)
def densenet121(pretrained=False, progress=True, **kwargs):
r"""Densenet-121 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
memory_efficient (bool) - If True, uses checkpointing. Much more memory efficient,
but slower. Default: *False*. See `"paper" <https://arxiv.org/pdf/1707.06990.pdf>`_
"""
return _densenet('densenet121', 32, (6, 12, 24, 16), 64, pretrained, progress,
**kwargs)
def densenet161(pretrained=False, progress=True, **kwargs):
r"""Densenet-161 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
memory_efficient (bool) - If True, uses checkpointing. Much more memory efficient,
but slower. Default: *False*. See `"paper" <https://arxiv.org/pdf/1707.06990.pdf>`_
"""
return _densenet('densenet161', 48, (6, 12, 36, 24), 96, pretrained, progress,
**kwargs)
def densenet169(pretrained=False, progress=True, **kwargs):
r"""Densenet-169 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
memory_efficient (bool) - If True, uses checkpointing. Much more memory efficient,
but slower. Default: *False*. See `"paper" <https://arxiv.org/pdf/1707.06990.pdf>`_
"""
return _densenet('densenet169', 32, (6, 12, 32, 32), 64, pretrained, progress,
**kwargs)
def densenet201(pretrained=False, progress=True, **kwargs):
r"""Densenet-201 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
memory_efficient (bool) - If True, uses checkpointing. Much more memory efficient,
but slower. Default: *False*. See `"paper" <https://arxiv.org/pdf/1707.06990.pdf>`_
"""
return _densenet('densenet201', 32, (6, 12, 48, 32), 64, pretrained, progress,
**kwargs)
| 6,645
| 20
| 260
|
4aae359bb47d841d355e338ca35c46c6de27cdca
| 2,782
|
py
|
Python
|
rfdmovie/models.py
|
Microndgt/rfdmovies
|
befba9ac7667d8234976b7fda7fa800beb5f05c7
|
[
"MIT"
] | 18
|
2018-02-12T04:11:17.000Z
|
2019-10-10T01:44:31.000Z
|
rfdmovie/models.py
|
Microndgt/rfdmovies
|
befba9ac7667d8234976b7fda7fa800beb5f05c7
|
[
"MIT"
] | 1
|
2021-06-01T21:46:26.000Z
|
2021-06-01T21:46:26.000Z
|
rfdmovie/models.py
|
tangzhoutz/rfdmovies-client
|
befba9ac7667d8234976b7fda7fa800beb5f05c7
|
[
"MIT"
] | null | null | null |
from sqlalchemy import ARRAY
from sqlalchemy import Column
from sqlalchemy.types import String, Integer, BigInteger, Float, Date, Text
from rfdmovie.db import BaseModel
from rfdmovie.utils import generate_timestamp
| 33.518072
| 90
| 0.626887
|
from sqlalchemy import ARRAY
from sqlalchemy import Column
from sqlalchemy.types import String, Integer, BigInteger, Float, Date, Text
from rfdmovie.db import BaseModel
from rfdmovie.utils import generate_timestamp
class Movie(BaseModel):
__tablename__ = 'movie'
id = Column(Integer, primary_key=True)
name = Column(String)
release_time = Column(Date) # "1999-09-09"
rate = Column(Float)
rate_num = Column(BigInteger)
desc = Column(Text)
countries = Column(ARRAY(String))
image_url = Column(String)
types = Column(ARRAY(String))
director = Column(String)
actors = Column(ARRAY(String))
douban_url = Column(String)
keywords = Column(ARRAY(String))
comments = Column(ARRAY(String))
languages = Column(ARRAY(String))
duration = Column(Integer)
grade_five = Column(Float, default=0)
grade_four = Column(Float, default=0)
grade_three = Column(Float, default=0)
grade_two = Column(Float, default=0)
grade_one = Column(Float, default=0)
created_utc = Column(Integer, default=generate_timestamp)
updated_utc = Column(Integer, default=generate_timestamp, onupdate=generate_timestamp)
def to_dict(self):
return {
"id": self.id,
"name": self.name,
"release_time": self.release_time,
"rate": self.rate,
"rate_num": self.rate_num,
"desc": self.desc,
"countries": self.countries,
"image_url": self.image_url,
"types": self.types,
"director": self.director,
"actors": self.actors,
"douban_url": self.douban_url,
"keywords": self.keywords,
"comments": self.comments,
"languages": self.languages,
"duration": self.duration,
"grade_five": self.grade_five,
"grade_four": self.grade_four,
"grade_three": self.grade_three,
"grade_two": self.grade_two,
"grade_one": self.grade_one,
"created_utc": self.created_utc,
"updated_utc": self.updated_utc
}
class Download(BaseModel):
__tablename__ = 'download'
id = Column(Integer, primary_key=True)
name = Column(String)
page_link = Column(String)
download_urls = Column(ARRAY(String), default=[])
created_utc = Column(Integer, default=generate_timestamp)
updated_utc = Column(Integer, default=generate_timestamp, onupdate=generate_timestamp)
def to_dict(self):
return {
"id": self.id,
"name": self.name,
"page_link": self.page_link,
"download_urls": self.download_urls,
"created_utc": self.created_utc,
"updated_utc": self.updated_utc
}
| 1,183
| 1,335
| 46
|
87fdd5cc0a847ca325cf3cc9964514e6fe985f25
| 5,539
|
py
|
Python
|
track/centroidtracker.py
|
yxtj/VideoServing
|
52d1c1c97021f11cc4d77c181ac1144fe3a789ce
|
[
"MIT"
] | null | null | null |
track/centroidtracker.py
|
yxtj/VideoServing
|
52d1c1c97021f11cc4d77c181ac1144fe3a789ce
|
[
"MIT"
] | null | null | null |
track/centroidtracker.py
|
yxtj/VideoServing
|
52d1c1c97021f11cc4d77c181ac1144fe3a789ce
|
[
"MIT"
] | null | null | null |
# modified from:
# https://www.pyimagesearch.com/2018/07/23/simple-object-tracking-with-opencv/
from scipy.spatial import distance as dist
import numpy as np
| 40.727941
| 78
| 0.608955
|
# modified from:
# https://www.pyimagesearch.com/2018/07/23/simple-object-tracking-with-opencv/
from scipy.spatial import distance as dist
import numpy as np
class CentroidTracker():
def __init__(self, maxDisappeared=50):
# initialize the next unique object ID along with two ordered
# dictionaries used to keep track of mapping a given object
# ID to its centroid and number of consecutive frames it has
# been marked as "disappeared", respectively
self.nextObjectID = 0
self.objects = {}
self.disappeared = {}
# store the number of maximum consecutive frames a given
# object is allowed to be marked as "disappeared" until we
# need to deregister the object from tracking
self.maxDisappeared = maxDisappeared
def reset(self):
self.nextObjectID = 0
self.objects = {}
self.disappeared = {}
def get_state(self):
return self.nextObjectID, self.objects.copy(), self.disappeared.copy()
def set_state(self, state):
self.nextObjectID = state[0]
self.objects = state[1]
self.disappeared = state[2]
def set_max_disappear(self, n):
self.maxDisappeared = n
def register(self, centroid, box):
# when registering an object we use the next available object
# ID to store the centroid
self.objects[self.nextObjectID] = (centroid, box)
self.disappeared[self.nextObjectID] = 0
self.nextObjectID += 1
def deregister(self, objectID):
# to deregister an object ID we delete the object ID from
# both of our respective dictionaries
del self.objects[objectID]
del self.disappeared[objectID]
def bbox2centroid(self, rect):
return (rect[:2]+rect[2:]) / 2
def bboxes2centroids(self, rects):
return np.apply_along_axis(lambda r:(r[:2]+r[2:])/2, 1, rects)
def get_result(self):
return { id:b for id,(c,b) in self.objects.items() }
def update(self, boxes):
if len(boxes) == 0:
for objectID in list(self.disappeared.keys()):
self.disappeared[objectID] += 1
if self.disappeared[objectID] > self.maxDisappeared:
self.deregister(objectID)
return self.get_result()
# initialize an array of input centroids for the current frame
#inputCentroids = self.bboxes2centroids(boxes)
if isinstance(boxes, list):
boxes = np.array(boxes)
inputCentroids = (boxes[:,:2] + boxes[:,2:]) / 2
if len(self.objects) == 0:
for c,b in zip(inputCentroids, boxes):
self.register(c, b)
return self.get_result()
# grab the set of object IDs and corresponding centroids
objectIDs = list(self.objects.keys())
objectCentroids, objectBoxes = list(zip(*self.objects.values()))
objectCentroids = np.array(objectCentroids)
objectBoxes = np.array(objectBoxes)
D = dist.cdist(np.array(objectCentroids), inputCentroids)
# in order to perform this matching we must (1) find the
# smallest value in each row and then (2) sort the row
# indexes based on their minimum values so that the row
# with the smallest value is at the *front* of the index
# list
rows = D.min(axis=1).argsort()
# next, we perform a similar process on the columns by
# finding the smallest value in each column and then
# sorting using the previously computed row index list
cols = D.argmin(axis=1)[rows]
# in order to determine if we need to update, register,
# or deregister an object we need to keep track of which
# of the rows and column indexes we have already examined
usedRows = set()
usedCols = set()
# loop over the combination of the (row, column) index tuples
for (row, col) in zip(rows, cols):
# if we have already examined either the row or
# column value before, ignore it
if row in usedRows or col in usedCols:
continue
# otherwise, grab the object ID for the current row,
# set its new centroid, and reset the disappeared counter
objectID = objectIDs[row]
self.objects[objectID] = (inputCentroids[col], boxes[col])
self.disappeared[objectID] = 0
# indicate that we have examined each of the row and
# column indexes, respectively
usedRows.add(row)
usedCols.add(col)
# compute both the row and column index we have NOT yet examined
unusedRows = set(range(0, D.shape[0])).difference(usedRows)
unusedCols = set(range(0, D.shape[1])).difference(usedCols)
# unmatched existing objects
for row in unusedRows:
objectID = objectIDs[row]
self.disappeared[objectID] += 1
# check to see if the number of consecutive
# frames the object has been marked "disappeared"
# for warrants deregistering the object
if self.disappeared[objectID] > self.maxDisappeared:
self.deregister(objectID)
# unmatched new objects
for col in unusedCols:
self.register(inputCentroids[col], boxes[col])
return self.get_result()
| 4,998
| 3
| 363
|
5e9c4f1b493d16c09e4c154dee47c2a179feb368
| 2,371
|
py
|
Python
|
trader/messaging/bus.py
|
9600dev/mmr
|
b08e63b7044f2b2061d8679b216822c82d309c86
|
[
"Apache-2.0"
] | 12
|
2021-09-22T21:19:23.000Z
|
2022-01-03T21:38:47.000Z
|
trader/messaging/bus.py
|
9600dev/mmr
|
b08e63b7044f2b2061d8679b216822c82d309c86
|
[
"Apache-2.0"
] | null | null | null |
trader/messaging/bus.py
|
9600dev/mmr
|
b08e63b7044f2b2061d8679b216822c82d309c86
|
[
"Apache-2.0"
] | 3
|
2021-09-05T23:26:13.000Z
|
2022-03-25T01:01:22.000Z
|
import lightbus
from lightbus.api import Api, Event
from dataclasses import dataclass
from ib_insync.objects import Position, PortfolioItem
from ib_insync.contract import Contract
from ib_insync.order import Order, Trade
from trader.container import Container
from trader.trading.trading_runtime import Action, Trader
from trader.data.universe import Universe
from trader.common.helpers import DictHelper
from typing import List, Dict, Tuple, Optional
bus = lightbus.create(config_file=Container().config()['lightbus_config_file'])
bus.client.register_api(TraderServiceApi())
| 38.241935
| 101
| 0.716997
|
import lightbus
from lightbus.api import Api, Event
from dataclasses import dataclass
from ib_insync.objects import Position, PortfolioItem
from ib_insync.contract import Contract
from ib_insync.order import Order, Trade
from trader.container import Container
from trader.trading.trading_runtime import Action, Trader
from trader.data.universe import Universe
from trader.common.helpers import DictHelper
from typing import List, Dict, Tuple, Optional
class TraderServiceApi(Api):
# this resolves a singleton trader instance, which if instantiated from
# the trader runtime, will have all the things needed to reflect on the current
# state of the trading system.
# If it's resolved from outside the runtime (i.e. from bus.py import *) it still
# fires up properly.
trader = Container().resolve(Trader)
class Meta:
name = 'service'
# json can't encode Dict's with keys that aren't primitive and hashable
# so we often have to convert to weird containers like List[Tuple]
async def get_positions(self) -> List[Position]:
return self.trader.portfolio.get_positions()
async def get_portfolio(self) -> List[PortfolioItem]:
return self.trader.client.ib.portfolio()
# return self.trader.portfolio.get_portfolio_items()
async def get_universes(self) -> Dict[str, int]:
return self.trader.universe_accessor.list_universes_count()
async def get_trades(self) -> Dict[int, List[Trade]]:
return self.trader.book.get_trades()
async def get_orders(self) -> Dict[int, List[Order]]:
return self.trader.book.get_orders()
async def cancel_order(self, order_id: int) -> Optional[Trade]:
return self.trader.cancel_order(order_id)
async def temp_place_order(self, contract: Contract, action: str, equity_amount: float) -> Trade:
act = Action.BUY if 'BUY' in action else Action.SELL
cached_observer = await self.trader.temp_handle_order(
contract=contract,
action=act,
equity_amount=equity_amount,
delayed=True,
debug=True
)
return await cached_observer.wait_value()
async def reconnect(self):
return self.trader.reconnect()
bus = lightbus.create(config_file=Container().config()['lightbus_config_file'])
bus.client.register_api(TraderServiceApi())
| 1,012
| 758
| 23
|
d5080d8c67c0d3338d1a3b44c3d780a559c00923
| 9,169
|
py
|
Python
|
sdk/tests/router/config/resource_request_test.py
|
LeonLnj/turing
|
93817f5cfb40d056a707bd85e9265b5cafdaeb94
|
[
"Apache-2.0"
] | 1
|
2021-12-26T09:04:13.000Z
|
2021-12-26T09:04:13.000Z
|
sdk/tests/router/config/resource_request_test.py
|
LeonLnj/turing
|
93817f5cfb40d056a707bd85e9265b5cafdaeb94
|
[
"Apache-2.0"
] | null | null | null |
sdk/tests/router/config/resource_request_test.py
|
LeonLnj/turing
|
93817f5cfb40d056a707bd85e9265b5cafdaeb94
|
[
"Apache-2.0"
] | null | null | null |
import pytest
from turing.generated.exceptions import ApiValueError
from turing.router.config.resource_request import (ResourceRequest, InvalidReplicaCountException)
@pytest.mark.parametrize(
"min_replica,max_replica,cpu_request,memory_request,expected", [
pytest.param(
1,
3,
"100m",
"512Mi",
"generic_resource_request"
)
])
@pytest.mark.parametrize(
"min_replica,max_replica,cpu_request,memory_request,expected", [
pytest.param(
ResourceRequest.min_allowed_replica - 1,
3,
"100m",
"512Mi",
InvalidReplicaCountException
)
])
@pytest.mark.parametrize(
"min_replica,max_replica,cpu_request,memory_request,expected", [
pytest.param(
1,
ResourceRequest.max_allowed_replica + 1,
"100m",
"512Mi",
InvalidReplicaCountException
)
])
@pytest.mark.parametrize(
"min_replica,max_replica,cpu_request,memory_request,expected", [
pytest.param(
5,
4,
"100m",
"512Mi",
InvalidReplicaCountException
)
])
@pytest.mark.parametrize(
"min_replica,max_replica,cpu_request,memory_request,expected", [
pytest.param(
2,
5,
"3m",
"512Mi",
ApiValueError
)
])
@pytest.mark.parametrize(
"min_replica,max_replica,cpu_request,memory_request,expected", [
pytest.param(
2,
5,
"33m",
"512Ri",
ApiValueError
)
])
@pytest.mark.parametrize(
"new_min_replica,min_replica,max_replica,cpu_request,memory_request,expected", [
pytest.param(
1,
2,
3,
"100m",
"512Mi",
"generic_resource_request"
)
])
@pytest.mark.parametrize(
"new_max_replica,min_replica,max_replica,cpu_request,memory_request,expected", [
pytest.param(
3,
1,
6,
"100m",
"512Mi",
"generic_resource_request"
)
])
@pytest.mark.parametrize(
"new_min_replica,min_replica,max_replica,cpu_request,memory_request,expected", [
pytest.param(
-1,
1,
3,
"100m",
"512Mi",
InvalidReplicaCountException
)
])
@pytest.mark.parametrize(
"new_max_replica,min_replica,max_replica,cpu_request,memory_request,expected", [
pytest.param(
50,
1,
3,
"100m",
"512Mi",
InvalidReplicaCountException
)
])
@pytest.mark.parametrize(
"new_min_replica,min_replica,max_replica,cpu_request,memory_request,expected", [
pytest.param(
5,
1,
3,
"100m",
"512Mi",
InvalidReplicaCountException
)
])
@pytest.mark.parametrize(
"new_max_replica,min_replica,max_replica,cpu_request,memory_request,expected", [
pytest.param(
4,
5,
10,
"100m",
"512Mi",
InvalidReplicaCountException
)
])
@pytest.mark.parametrize(
"min_replica,max_replica,cpu_request,memory_request,expected", [
pytest.param(
1,
3,
"3m",
"512Mi",
ApiValueError
)
])
@pytest.mark.parametrize(
"min_replica,max_replica,cpu_request,memory_request,expected", [
pytest.param(
1,
3,
"100m",
"512Ri",
ApiValueError
)
])
| 23.450128
| 97
| 0.583815
|
import pytest
from turing.generated.exceptions import ApiValueError
from turing.router.config.resource_request import (ResourceRequest, InvalidReplicaCountException)
@pytest.mark.parametrize(
"min_replica,max_replica,cpu_request,memory_request,expected", [
pytest.param(
1,
3,
"100m",
"512Mi",
"generic_resource_request"
)
])
def test_create_resource_request_with_valid_params(
min_replica,
max_replica,
cpu_request,
memory_request,
expected,
request
):
actual = ResourceRequest(
min_replica,
max_replica,
cpu_request,
memory_request
).to_open_api()
assert actual == request.getfixturevalue(expected)
@pytest.mark.parametrize(
"min_replica,max_replica,cpu_request,memory_request,expected", [
pytest.param(
ResourceRequest.min_allowed_replica - 1,
3,
"100m",
"512Mi",
InvalidReplicaCountException
)
])
def test_create_resource_request_with_min_replica_below_min_allowed(
min_replica,
max_replica,
cpu_request,
memory_request,
expected,
):
with pytest.raises(expected):
ResourceRequest(
min_replica,
max_replica,
cpu_request,
memory_request
)
@pytest.mark.parametrize(
"min_replica,max_replica,cpu_request,memory_request,expected", [
pytest.param(
1,
ResourceRequest.max_allowed_replica + 1,
"100m",
"512Mi",
InvalidReplicaCountException
)
])
def test_create_resource_request_with_max_replica_above_max_allowed(
min_replica,
max_replica,
cpu_request,
memory_request,
expected,
):
with pytest.raises(expected):
ResourceRequest(
min_replica,
max_replica,
cpu_request,
memory_request
)
@pytest.mark.parametrize(
"min_replica,max_replica,cpu_request,memory_request,expected", [
pytest.param(
5,
4,
"100m",
"512Mi",
InvalidReplicaCountException
)
])
def test_create_resource_request_with_min_replica_greater_than_max_replica(
min_replica,
max_replica,
cpu_request,
memory_request,
expected,
):
with pytest.raises(expected):
ResourceRequest(
min_replica,
max_replica,
cpu_request,
memory_request
)
@pytest.mark.parametrize(
"min_replica,max_replica,cpu_request,memory_request,expected", [
pytest.param(
2,
5,
"3m",
"512Mi",
ApiValueError
)
])
def test_create_resource_request_with_invalid_cpu_request_string(
min_replica,
max_replica,
cpu_request,
memory_request,
expected,
):
with pytest.raises(expected):
ResourceRequest(
min_replica,
max_replica,
cpu_request,
memory_request
).to_open_api()
@pytest.mark.parametrize(
"min_replica,max_replica,cpu_request,memory_request,expected", [
pytest.param(
2,
5,
"33m",
"512Ri",
ApiValueError
)
])
def test_create_resource_request_with_invalid_memory_request_string(
min_replica,
max_replica,
cpu_request,
memory_request,
expected,
):
with pytest.raises(expected):
ResourceRequest(
min_replica,
max_replica,
cpu_request,
memory_request
).to_open_api()
@pytest.mark.parametrize(
"new_min_replica,min_replica,max_replica,cpu_request,memory_request,expected", [
pytest.param(
1,
2,
3,
"100m",
"512Mi",
"generic_resource_request"
)
])
def test_create_resource_request_with_valid_min_replica(
new_min_replica,
min_replica,
max_replica,
cpu_request,
memory_request,
expected,
request
):
actual = ResourceRequest(
min_replica,
max_replica,
cpu_request,
memory_request
)
actual.min_replica = new_min_replica
assert actual.to_open_api() == request.getfixturevalue(expected)
@pytest.mark.parametrize(
"new_max_replica,min_replica,max_replica,cpu_request,memory_request,expected", [
pytest.param(
3,
1,
6,
"100m",
"512Mi",
"generic_resource_request"
)
])
def test_create_resource_request_with_valid_max_replica(
new_max_replica,
min_replica,
max_replica,
cpu_request,
memory_request,
expected,
request
):
actual = ResourceRequest(
min_replica,
max_replica,
cpu_request,
memory_request
)
actual.max_replica = new_max_replica
assert actual.to_open_api() == request.getfixturevalue(expected)
@pytest.mark.parametrize(
"new_min_replica,min_replica,max_replica,cpu_request,memory_request,expected", [
pytest.param(
-1,
1,
3,
"100m",
"512Mi",
InvalidReplicaCountException
)
])
def test_set_resource_request_with_min_replica_below_min_allowed(
new_min_replica,
min_replica,
max_replica,
cpu_request,
memory_request,
expected
):
actual = ResourceRequest(
min_replica,
max_replica,
cpu_request,
memory_request
)
with pytest.raises(expected):
actual.min_replica = new_min_replica
@pytest.mark.parametrize(
"new_max_replica,min_replica,max_replica,cpu_request,memory_request,expected", [
pytest.param(
50,
1,
3,
"100m",
"512Mi",
InvalidReplicaCountException
)
])
def test_set_resource_request_with_max_replica_above_max_allowed(
new_max_replica,
min_replica,
max_replica,
cpu_request,
memory_request,
expected
):
actual = ResourceRequest(
min_replica,
max_replica,
cpu_request,
memory_request
)
with pytest.raises(expected):
actual.max_replica = new_max_replica
@pytest.mark.parametrize(
"new_min_replica,min_replica,max_replica,cpu_request,memory_request,expected", [
pytest.param(
5,
1,
3,
"100m",
"512Mi",
InvalidReplicaCountException
)
])
def test_set_resource_request_with_min_replica_greater_than_max_replica(
new_min_replica,
min_replica,
max_replica,
cpu_request,
memory_request,
expected
):
actual = ResourceRequest(
min_replica,
max_replica,
cpu_request,
memory_request
)
with pytest.raises(expected):
actual.min_replica = new_min_replica
@pytest.mark.parametrize(
"new_max_replica,min_replica,max_replica,cpu_request,memory_request,expected", [
pytest.param(
4,
5,
10,
"100m",
"512Mi",
InvalidReplicaCountException
)
])
def test_set_resource_request_with_max_replica_below_min_replica(
new_max_replica,
min_replica,
max_replica,
cpu_request,
memory_request,
expected
):
actual = ResourceRequest(
min_replica,
max_replica,
cpu_request,
memory_request
)
with pytest.raises(expected):
actual.max_replica = new_max_replica
@pytest.mark.parametrize(
"min_replica,max_replica,cpu_request,memory_request,expected", [
pytest.param(
1,
3,
"3m",
"512Mi",
ApiValueError
)
])
def test_create_resource_request_with_invalid_cpu_request_string(
min_replica,
max_replica,
cpu_request,
memory_request,
expected
):
actual = ResourceRequest(
min_replica,
max_replica,
cpu_request,
memory_request
)
with pytest.raises(expected):
actual.to_open_api()
@pytest.mark.parametrize(
"min_replica,max_replica,cpu_request,memory_request,expected", [
pytest.param(
1,
3,
"100m",
"512Ri",
ApiValueError
)
])
def test_create_resource_request_with_invalid_memory_request_string(
min_replica,
max_replica,
cpu_request,
memory_request,
expected
):
actual = ResourceRequest(
min_replica,
max_replica,
cpu_request,
memory_request
)
with pytest.raises(expected):
actual.to_open_api()
| 5,029
| 0
| 308
|
3bf7e94c85fcc3d67cebd0e19a84bfc00aa8605f
| 13,001
|
py
|
Python
|
bs_align/bs_align_utils.py
|
huboqiang/BSseeker2
|
385f88cf78b3efced75798c00f0e7185ac064047
|
[
"MIT"
] | null | null | null |
bs_align/bs_align_utils.py
|
huboqiang/BSseeker2
|
385f88cf78b3efced75798c00f0e7185ac064047
|
[
"MIT"
] | null | null | null |
bs_align/bs_align_utils.py
|
huboqiang/BSseeker2
|
385f88cf78b3efced75798c00f0e7185ac064047
|
[
"MIT"
] | 1
|
2021-11-01T03:21:47.000Z
|
2021-11-01T03:21:47.000Z
|
from bs_utils.utils import *
import re
BAM_MATCH = 0
BAM_INS = 1
BAM_DEL = 2
BAM_SOFTCLIP = 4
CIGAR_OPS = {'M' : BAM_MATCH, 'I' : BAM_INS, 'D' : BAM_DEL, 'S' : BAM_SOFTCLIP}
#----------------------------------------------------------------
"""
Exmaple:
========
Read : ACCGCGTTGATCGAGTACGTACGTGGGTC
Adapter : ....................ACGTGGGTCCCG
========
no_mismatch : the maximum number allowed for mismatches
Algorithm: (allowing 1 mismatch)
========
-Step 1:
ACCGCGTTGATCGAGTACGTACGTGGGTC
||XX
ACGTGGGTCCCG
-Step 2:
ACCGCGTTGATCGAGTACGTACGTGGGTC
X||X
.ACGTGGGTCCCG
-Step 3:
ACCGCGTTGATCGAGTACGTACGTGGGTC
XX
..ACGTGGGTCCCG
-Step ...
-Step N:
ACCGCGTTGATCGAGTACGTACGTGGGTC
|||||||||
....................ACGTGGGTCCCG
Success & return!
========
"""
# Remove the adapter from 3' end
def next_nuc(seq, pos, n):
""" Returns the nucleotide that is n places from pos in seq. Skips gap symbols.
"""
i = pos + 1
while i < len(seq):
if seq[i] != '-':
n -= 1
if n == 0: break
i += 1
if i < len(seq) :
return seq[i]
else :
return 'N'
def cigar_to_alignment(cigar, read_seq, genome_seq):
""" Reconstruct the pairwise alignment based on the CIGAR string and the two sequences
"""
# reconstruct the alignment
r_pos = cigar[0][1] if cigar[0][0] == BAM_SOFTCLIP else 0
g_pos = 0
r_aln = ''
g_aln = ''
for edit_op, count in cigar:
if edit_op == BAM_MATCH:
r_aln += read_seq[r_pos : r_pos + count]
g_aln += genome_seq[g_pos : g_pos + count]
r_pos += count
g_pos += count
elif edit_op == BAM_DEL:
r_aln += '-'*count
g_aln += genome_seq[g_pos : g_pos + count]
g_pos += count
elif edit_op == BAM_INS:
r_aln += read_seq[r_pos : r_pos + count]
g_aln += '-'*count
r_pos += count
return r_aln, g_aln
# return sequence is [start, end), not include 'end'
| 32.748111
| 133
| 0.515576
|
from bs_utils.utils import *
import re
BAM_MATCH = 0
BAM_INS = 1
BAM_DEL = 2
BAM_SOFTCLIP = 4
CIGAR_OPS = {'M' : BAM_MATCH, 'I' : BAM_INS, 'D' : BAM_DEL, 'S' : BAM_SOFTCLIP}
def N_MIS(r,g):
mismatches = 0
if len(r)==len(g):
for i in xrange(len(r)):
if r[i] != g[i] and r[i] != "N" and g[i] != "N" and not(r[i] == 'T' and g[i] == 'C'):
mismatches += 1
return mismatches
#----------------------------------------------------------------
"""
Exmaple:
========
Read : ACCGCGTTGATCGAGTACGTACGTGGGTC
Adapter : ....................ACGTGGGTCCCG
========
no_mismatch : the maximum number allowed for mismatches
Algorithm: (allowing 1 mismatch)
========
-Step 1:
ACCGCGTTGATCGAGTACGTACGTGGGTC
||XX
ACGTGGGTCCCG
-Step 2:
ACCGCGTTGATCGAGTACGTACGTGGGTC
X||X
.ACGTGGGTCCCG
-Step 3:
ACCGCGTTGATCGAGTACGTACGTGGGTC
XX
..ACGTGGGTCCCG
-Step ...
-Step N:
ACCGCGTTGATCGAGTACGTACGTGGGTC
|||||||||
....................ACGTGGGTCCCG
Success & return!
========
"""
# Remove the adapter from 3' end
def RemoveAdapter ( read, adapter, no_mismatch, rm_back=0) :
lr = len(read)
la = len(adapter)
# Check the empty adapter, namely, the reads start with the 2nd base of adapter,
# not including the 'A' base in front of the adapter.
if adapter[2:] == read[0:(la-1)] :
return ""
for i in xrange( lr - no_mismatch ) :
read_pos = i
adapter_pos = 0
count_no_mis = 0
while (adapter_pos < la) and (read_pos < lr) :
if (read[read_pos] == adapter[adapter_pos]) :
read_pos = read_pos + 1
adapter_pos = adapter_pos + 1
else :
count_no_mis = count_no_mis + 1
if count_no_mis > no_mismatch :
break
else :
read_pos = read_pos + 1
adapter_pos = adapter_pos + 1
# while_end
# Cut the extra bases before the adapter
# --C|CG G-- => --CNN+A+<adapter>
# --G GC|C-- --GGC
if adapter_pos == la or read_pos == lr :
if i <= rm_back :
return ''
else :
return read[:(i-rm_back)]
# for_end
return read
def Remove_5end_Adapter ( read, adapter, no_mismatch) :
lr = len(read)
la = len(adapter)
for i in xrange (la - no_mismatch) :
read_pos = 0
adapter_pos = i
count_no_mis = 0
while (adapter_pos < la) and (read_pos < lr) :
if (read[read_pos] == adapter[adapter_pos]) :
adapter_pos = adapter_pos + 1
read_pos = read_pos + 1
else :
count_no_mis = count_no_mis + 1
if count_no_mis > no_mismatch :
break
else :
read_pos = read_pos + 1
adapter_pos = adapter_pos + 1
# while_end
if adapter_pos == la :
return read[(la-i):]
def next_nuc(seq, pos, n):
""" Returns the nucleotide that is n places from pos in seq. Skips gap symbols.
"""
i = pos + 1
while i < len(seq):
if seq[i] != '-':
n -= 1
if n == 0: break
i += 1
if i < len(seq) :
return seq[i]
else :
return 'N'
def methy_seq(read, genome):
H = ['A', 'C', 'T']
m_seq = []
xx = "-"
for i in xrange(len(read)):
if genome[i] == '-':
continue
elif read[i] != 'C' and read[i] != 'T':
xx = "-"
elif read[i] == "T" and genome[i] == "C": #(unmethylated):
nn1 = next_nuc(genome, i, 1)
if nn1 == "G":
xx = "x"
elif nn1 in H :
nn2 = next_nuc(genome, i, 2)
if nn2 == "G":
xx = "y"
elif nn2 in H :
xx = "z"
elif read[i] == "C" and genome[i] == "C": #(methylated):
nn1 = next_nuc(genome, i, 1)
if nn1 == "G":
xx = "X"
elif nn1 in H :
nn2 = next_nuc(genome, i, 2)
if nn2 == "G":
xx = "Y"
elif nn2 in H:
xx = "Z"
else:
xx = "-"
m_seq.append(xx)
return ''.join(m_seq)
def mcounts(mseq, mlst, ulst):
out_mlst=[mlst[0]+mseq.count("X"), mlst[1]+mseq.count("Y"), mlst[2]+mseq.count("Z")]
out_ulst=[ulst[0]+mseq.count("x"), ulst[1]+mseq.count("y"), ulst[2]+mseq.count("z")]
return out_mlst, out_ulst
def process_aligner_output(filename, pair_end = False):
#m = re.search(r'-('+'|'.join(supported_aligners) +')-TMP', filename)
m = re.search(r'-('+'|'.join(supported_aligners) +')-.*TMP', filename)
if m is None:
error('The temporary folder path should contain the name of one of the supported aligners: ' + filename)
format = m.group(1)
try :
input = open(filename)
except IOError:
print "[Error] Cannot open file %s" % filename
exit(-1)
QNAME, FLAG, RNAME, POS, MAPQ, CIGAR, RNEXT, PNEXT, TLEN, SEQ, QUAL = range(11)
def parse_SAM(line):
buf = line.split()
# print buf
flag = int(buf[FLAG])
# skip reads that are not mapped
# skip reads that have probability of being non-unique higher than 1/10
if flag & 0x4 : # or int(buf[MAPQ]) < 10:
return None, None, None, None, None, None
# print "format = ", format
if format == BOWTIE:
mismatches = int([buf[i][5:] for i in xrange(11, len(buf)) if buf[i][:5] == 'NM:i:'][0]) # get the edit distance
# --- bug fixed ------
elif format == BOWTIE2:
if re.search(r'(.)*-e2e-TMP(.*)', filename) is None : # local model
mismatches = 1-int([buf[i][5:] for i in xrange(11, len(buf)) if buf[i][:5] == 'AS:i:'][0])
# print "====local=====\n"
## bowtie2 use AS tag (score) to evaluate the mapping. The higher, the better.
else : # end-to-end model
# print "end-to-end\n"
mismatches = int([buf[i][5:] for i in xrange(11, len(buf)) if buf[i][:5] == 'XM:i:'][0])
# --- Weilong ---------
elif format == SOAP:
mismatches = 1-buf[MAPQ]
# mismatches = 1/float(buf[MAPQ])
## downstream might round (0,1) to 0, so use integer instead
## fixed by Weilong
elif format == RMAP:
# chr16 75728107 75728147 read45 9 -
# chr16 67934919 67934959 read45 9 -
mismatches = buf[4]
return (buf[QNAME], # read ID
buf[RNAME], # reference ID
int(buf[POS]) - 1, # position, 0 based (SAM is 1 based)
mismatches, # number of mismatches
parse_cigar(buf[CIGAR]), # the parsed cigar string
flag & 0x40 # true if it is the first mate in a pair, false if it is the second mate
)
SOAP_QNAME, SOAP_SEQ, SOAP_QUAL, SOAP_NHITS, SOAP_AB, SOAP_LEN, SOAP_STRAND, SOAP_CHR, SOAP_LOCATION, SOAP_MISMATCHES = range(10)
def parse_SOAP(line):
buf = line.split()
return (buf[SOAP_QNAME],
buf[SOAP_CHR],
int(buf[SOAP_LOCATION]) - 1,
int(buf[SOAP_MISMATCHES]),
buf[SOAP_AB],
buf[SOAP_STRAND],
parse_cigar(buf[SOAP_LEN]+'M')
)
# chr16 75728107 75728147 read45 9 -
RMAP_CHR, RMAP_START, RMAP_END, RMAP_QNAME, RMAP_MISMATCH, RMAP_STRAND = range(6)
def parse_RMAP(line):
buf = line.split()
return ( buf[RMAP_QNAME],
buf[RMAP_CHR],
int(buf[RMAP_START]), # to check -1 or not
int(buf[RMAP_END]) - int(buf[RMAP_START]) + 1,
int(buf[RMAP_MISMATCH]),
buf[RMAP_STRAND]
)
if format == BOWTIE or format == BOWTIE2:
if pair_end:
for line in input:
header1, chr1, location1, no_mismatch1, cigar1, _ = parse_SAM(line)
header2, _, location2, no_mismatch2, cigar2, mate_no2 = parse_SAM(input.next())
if header1 and header2:
# flip the location info if the second mate comes first in the alignment file
if mate_no2:
location1, location2 = location2, location1
cigar1, cigar2 = cigar2, cigar1
yield header1, chr1, no_mismatch1 + no_mismatch2, location1, cigar1, location2, cigar2
else:
for line in input:
header, chr, location, no_mismatch, cigar, _ = parse_SAM(line)
if header is not None:
yield header, chr, location, no_mismatch, cigar
elif format == SOAP:
if pair_end:
for line in input:
header1, chr1, location1, no_mismatch1, mate1, strand1, cigar1 = parse_SOAP(line)
header2, _ , location2, no_mismatch2, _, strand2, cigar2 = parse_SOAP(input.next())
if mate1 == 'b':
location1, location2 = location2, location1
strand1, strand2 = strand2, strand1
ciga1, cigar2 = cigar2, cigar1
if header1 and header2 and strand1 == '+' and strand2 == '-':
yield header1, chr1, no_mismatch1 + no_mismatch2, location1, cigar1, location2, cigar2
else:
for line in input:
header, chr, location, no_mismatch, _, strand, cigar = parse_SOAP(line)
if header and strand == '+':
yield header, chr, location, no_mismatch, cigar
elif format == RMAP :
if pair_end :
todo = 0
# to do
else :
for line in input:
header, chr, location, read_len, no_mismatch, strand = parse_RMAP(line)
cigar = str(read_len) + "M"
yield header, chr, location, no_mismatch, cigar
input.close()
def parse_cigar(cigar_string):
i = 0
prev_i = 0
cigar = []
while i < len(cigar_string):
if cigar_string[i] in CIGAR_OPS:
cigar.append((CIGAR_OPS[cigar_string[i]], int(cigar_string[prev_i:i])))
prev_i = i + 1
i += 1
return cigar
def get_read_start_end_and_genome_length(cigar):
r_start = cigar[0][1] if cigar[0][0] == BAM_SOFTCLIP else 0
r_end = r_start
g_len = 0
for edit_op, count in cigar:
if edit_op == BAM_MATCH:
r_end += count
g_len += count
elif edit_op == BAM_INS:
r_end += count
elif edit_op == BAM_DEL:
g_len += count
return r_start, r_end, g_len # return the start and end in the read and the length of the genomic sequence
# r_start : start position on the read
# r_end : end position on the read
# g_len : length of the mapped region on genome
def cigar_to_alignment(cigar, read_seq, genome_seq):
""" Reconstruct the pairwise alignment based on the CIGAR string and the two sequences
"""
# reconstruct the alignment
r_pos = cigar[0][1] if cigar[0][0] == BAM_SOFTCLIP else 0
g_pos = 0
r_aln = ''
g_aln = ''
for edit_op, count in cigar:
if edit_op == BAM_MATCH:
r_aln += read_seq[r_pos : r_pos + count]
g_aln += genome_seq[g_pos : g_pos + count]
r_pos += count
g_pos += count
elif edit_op == BAM_DEL:
r_aln += '-'*count
g_aln += genome_seq[g_pos : g_pos + count]
g_pos += count
elif edit_op == BAM_INS:
r_aln += read_seq[r_pos : r_pos + count]
g_aln += '-'*count
r_pos += count
return r_aln, g_aln
# return sequence is [start, end), not include 'end'
def get_genomic_sequence(genome, start, end, strand = '+'):
if strand != '+' and strand != '-' :
print "[Bug] get_genomic_sequence input should be \'+\' or \'-\'."
exit(-1)
if start > 1:
prev = genome[start-2:start]
elif start == 1:
prev = 'N'+genome[0]
else:
prev = 'NN'
if end < len(genome) - 1:
next = genome[end: end + 2]
elif end == len(genome) - 1:
next = genome[end] + 'N'
else:
next = 'NN'
origin_genome = genome[start:end]
if strand == '-':
# reverse complement everything if strand is '-'
revc = reverse_compl_seq('%s%s%s' % (prev, origin_genome, next))
prev, origin_genome, next = revc[:2], revc[2:-2], revc[-2:]
return origin_genome, next, '%s_%s_%s' % (prev, origin_genome, next)
# next : next two nucleotides
| 10,727
| 0
| 205
|
c3579c902cbdded2585c8b7a27020a60e348e6c2
| 160
|
py
|
Python
|
scripts/portal/evantalk02.py
|
G00dBye/YYMS
|
1de816fc842b6598d5b4b7896b6ab0ee8f7cdcfb
|
[
"MIT"
] | 54
|
2019-04-16T23:24:48.000Z
|
2021-12-18T11:41:50.000Z
|
scripts/portal/evantalk02.py
|
G00dBye/YYMS
|
1de816fc842b6598d5b4b7896b6ab0ee8f7cdcfb
|
[
"MIT"
] | 3
|
2019-05-19T15:19:41.000Z
|
2020-04-27T16:29:16.000Z
|
scripts/portal/evantalk02.py
|
G00dBye/YYMS
|
1de816fc842b6598d5b4b7896b6ab0ee8f7cdcfb
|
[
"MIT"
] | 49
|
2020-11-25T23:29:16.000Z
|
2022-03-26T16:20:24.000Z
|
if not "mo02=o" in sm.getQRValue(22013):
sm.avatarOriented("Effect/OnUserEff.img/guideEffect/evanTutorial/evanBalloon02")
sm.addQRValue(22013, "mo02=o")
| 53.333333
| 84
| 0.75625
|
if not "mo02=o" in sm.getQRValue(22013):
sm.avatarOriented("Effect/OnUserEff.img/guideEffect/evanTutorial/evanBalloon02")
sm.addQRValue(22013, "mo02=o")
| 0
| 0
| 0
|
534e4c177b3b875055cf3bc8aa81353837994a24
| 1,285
|
py
|
Python
|
CS596/code04_lr/mglearn/plot_knn_regression.py
|
ko-takahashi/college
|
c333f1e1767f1206687f5e9b0fb3f0145b2d5d6a
|
[
"MIT"
] | 52
|
2019-02-15T16:37:13.000Z
|
2022-02-17T18:34:30.000Z
|
Data Exploring/Data Exploring 12-25-17/mglearn/plot_knn_regression.py
|
RodeoBlues/Complete-Data-Science-Toolkits
|
c5e83889e24af825ec3baed6e8198debb135f1ff
|
[
"MIT"
] | 2
|
2021-06-19T08:04:01.000Z
|
2021-06-20T16:01:21.000Z
|
Data Exploring/Data Exploring 12-25-17/mglearn/plot_knn_regression.py
|
RodeoBlues/Complete-Data-Science-Toolkits
|
c5e83889e24af825ec3baed6e8198debb135f1ff
|
[
"MIT"
] | 35
|
2019-02-08T02:00:31.000Z
|
2022-03-01T23:17:00.000Z
|
import numpy as np
import matplotlib.pyplot as plt
from sklearn.neighbors import KNeighborsRegressor
from sklearn.metrics import euclidean_distances
from .datasets import make_wave
from .plot_helpers import cm3
| 32.948718
| 76
| 0.60856
|
import numpy as np
import matplotlib.pyplot as plt
from sklearn.neighbors import KNeighborsRegressor
from sklearn.metrics import euclidean_distances
from .datasets import make_wave
from .plot_helpers import cm3
def plot_knn_regression(n_neighbors=1):
X, y = make_wave(n_samples=40)
X_test = np.array([[-1.5], [0.9], [1.5]])
dist = euclidean_distances(X, X_test)
closest = np.argsort(dist, axis=0)
plt.figure(figsize=(10, 6))
reg = KNeighborsRegressor(n_neighbors=n_neighbors).fit(X, y)
y_pred = reg.predict(X_test)
for x, y_, neighbors in zip(X_test, y_pred, closest.T):
for neighbor in neighbors[:n_neighbors]:
plt.arrow(x[0], y_, X[neighbor, 0] - x[0], y[neighbor] - y_,
head_width=0, fc='k', ec='k')
train, = plt.plot(X, y, 'o', c=cm3(0))
test, = plt.plot(X_test, -3 * np.ones(len(X_test)), '*', c=cm3(2),
markersize=20)
pred, = plt.plot(X_test, y_pred, '*', c=cm3(0), markersize=20)
plt.vlines(X_test, -3.1, 3.1, linestyle="--")
plt.legend([train, test, pred],
["training data/target", "test data", "test prediction"],
ncol=3, loc=(.1, 1.025))
plt.ylim(-3.1, 3.1)
plt.xlabel("Feature")
plt.ylabel("Target")
| 1,048
| 0
| 23
|
e5d4fa5bda36ea06fdffa442436c6a00ebf6be36
| 165
|
py
|
Python
|
gravis/_internal/plotting/__init__.py
|
robert-haas/gravis
|
3e63b606141bf78617b1247048a9e3ccca6d5824
|
[
"Apache-2.0"
] | 3
|
2022-01-19T15:57:29.000Z
|
2022-03-06T02:36:44.000Z
|
gravis/_internal/plotting/__init__.py
|
robert-haas/gravis
|
3e63b606141bf78617b1247048a9e3ccca6d5824
|
[
"Apache-2.0"
] | 2
|
2022-03-01T19:29:49.000Z
|
2022-03-01T19:31:08.000Z
|
gravis/_internal/plotting/__init__.py
|
robert-haas/gravis
|
3e63b606141bf78617b1247048a9e3ccca6d5824
|
[
"Apache-2.0"
] | null | null | null |
"""A subpackage containing plotting functions."""
__all__ = [
'd3',
'three',
'vis',
]
from .d3 import d3
from .three import three
from .vis import vis
| 13.75
| 49
| 0.636364
|
"""A subpackage containing plotting functions."""
__all__ = [
'd3',
'three',
'vis',
]
from .d3 import d3
from .three import three
from .vis import vis
| 0
| 0
| 0
|
b74a21a3a4baee8e1ace1d4298da7847fa61cc93
| 68
|
py
|
Python
|
src/obfuscapk/obfuscators/random_manifest/__init__.py
|
Elyorbe/Obfuscapk
|
2ba18df4b5efe44b4fab271ab5ccfe51488fa693
|
[
"MIT"
] | 688
|
2019-08-23T16:43:10.000Z
|
2022-03-29T19:35:07.000Z
|
src/obfuscapk/obfuscators/random_manifest/__init__.py
|
Ktm2590/Obfuscapk
|
4072b6dea88e91b75064a84c7d3adebe005e8207
|
[
"MIT"
] | 124
|
2019-09-07T13:10:59.000Z
|
2022-03-17T16:47:29.000Z
|
src/obfuscapk/obfuscators/random_manifest/__init__.py
|
Ktm2590/Obfuscapk
|
4072b6dea88e91b75064a84c7d3adebe005e8207
|
[
"MIT"
] | 204
|
2019-08-23T14:43:52.000Z
|
2022-03-30T21:04:50.000Z
|
#!/usr/bin/env python3
from .random_manifest import RandomManifest
| 17
| 43
| 0.808824
|
#!/usr/bin/env python3
from .random_manifest import RandomManifest
| 0
| 0
| 0
|
7fc69e8d42affa4c0554d5794c970f2092f0c828
| 2,152
|
py
|
Python
|
subtest/test_coal_mine.py
|
RuneSkovrupHansen/python-testing
|
318d082d1100877dea93cabca1c727199bc6416e
|
[
"MIT"
] | null | null | null |
subtest/test_coal_mine.py
|
RuneSkovrupHansen/python-testing
|
318d082d1100877dea93cabca1c727199bc6416e
|
[
"MIT"
] | null | null | null |
subtest/test_coal_mine.py
|
RuneSkovrupHansen/python-testing
|
318d082d1100877dea93cabca1c727199bc6416e
|
[
"MIT"
] | null | null | null |
#!/bin/bash
from mimetypes import init
import unittest
import coal_mine
class TestCoalMine(unittest.TestCase):
# Test that worker property sets values in range correctly
# Test that worker property sets negative values to zero
# Test that worker property sets values exceeding threshold to threshold
"""Test output for all number of workers
The test is purposely set up to fail since it expects
that the output scales linearly with the number of workers
and the diminishing returns are not taken into accout
Because subTest() is used the test will show all of the
failed example with messages similar to
======================================================================
FAIL: test_output (__main__.TestCoalMine) (workers=6)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/home/rune/python_testing/subtest/test_coal_mine.py", line 34, in test_output
self.assertEqual(self.cm.output, workers*self.cm.BASE_WORKER_OUTPUT)
AssertionError: 87.0 != 90
This makes it easy to identify which sub tests are failing"""
if __name__ == "__main__":
main()
| 37.103448
| 95
| 0.640335
|
#!/bin/bash
from mimetypes import init
import unittest
import coal_mine
class TestCoalMine(unittest.TestCase):
def setUp(self):
self.cm = coal_mine.CoalMine()
# Test that worker property sets values in range correctly
def test_workers_in_range(self):
for workers in range(self.cm.WORKER_THRESHOLD+1):
with self.subTest(workers=workers): # Note that parameter and value name must match
self.cm.workers = workers
self.assertEqual(self.cm.workers, workers)
# Test that worker property sets negative values to zero
def test_workers_negative(self):
self.cm.workers = -1
self.assertEqual(self.cm.workers, 0)
# Test that worker property sets values exceeding threshold to threshold
def test_workers_above_threshold(self):
self.cm.workers = self.cm.WORKER_THRESHOLD+1
self.assertEqual(self.cm.workers, self.cm.WORKER_THRESHOLD)
"""Test output for all number of workers
The test is purposely set up to fail since it expects
that the output scales linearly with the number of workers
and the diminishing returns are not taken into accout
Because subTest() is used the test will show all of the
failed example with messages similar to
======================================================================
FAIL: test_output (__main__.TestCoalMine) (workers=6)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/home/rune/python_testing/subtest/test_coal_mine.py", line 34, in test_output
self.assertEqual(self.cm.output, workers*self.cm.BASE_WORKER_OUTPUT)
AssertionError: 87.0 != 90
This makes it easy to identify which sub tests are failing"""
def test_output(self):
for workers in range(self.cm.WORKER_THRESHOLD+1):
with self.subTest(workers=workers):
self.cm.workers = workers
self.assertEqual(self.cm.output, workers*self.cm.BASE_WORKER_OUTPUT)
def main():
unittest.main()
if __name__ == "__main__":
main()
| 768
| 0
| 163
|
c2a7b745461abcec5d3d58cd180c360982e7bdb3
| 19,014
|
py
|
Python
|
testing/scripts/test_rolling_updates.py
|
pravingadakh/seldon-core
|
a72257fa851033ee09513a0b6b310087eb625e85
|
[
"Apache-2.0"
] | null | null | null |
testing/scripts/test_rolling_updates.py
|
pravingadakh/seldon-core
|
a72257fa851033ee09513a0b6b310087eb625e85
|
[
"Apache-2.0"
] | null | null | null |
testing/scripts/test_rolling_updates.py
|
pravingadakh/seldon-core
|
a72257fa851033ee09513a0b6b310087eb625e85
|
[
"Apache-2.0"
] | null | null | null |
import os
import time
import logging
import pytest
from subprocess import run
from seldon_e2e_utils import (
wait_for_status,
wait_for_rollout,
rest_request_ambassador,
initial_rest_request,
retry_run,
API_AMBASSADOR,
API_ISTIO_GATEWAY,
)
with_api_gateways = pytest.mark.parametrize(
"api_gateway", [API_AMBASSADOR, API_ISTIO_GATEWAY], ids=["ambas", "istio"]
)
@pytest.mark.parametrize(
"from_deployment,to_deployment",
[
("graph1.json", "graph8.json"), # From v1alpha2 to v1
("graph7.json", "graph8.json"), # From v1alpha3 to v1
],
)
| 47.181141
| 88
| 0.591143
|
import os
import time
import logging
import pytest
from subprocess import run
from seldon_e2e_utils import (
wait_for_status,
wait_for_rollout,
rest_request_ambassador,
initial_rest_request,
retry_run,
API_AMBASSADOR,
API_ISTIO_GATEWAY,
)
def to_resources_path(file_name):
return os.path.join("..", "resources", file_name)
with_api_gateways = pytest.mark.parametrize(
"api_gateway", [API_AMBASSADOR, API_ISTIO_GATEWAY], ids=["ambas", "istio"]
)
class TestRollingHttp(object):
@with_api_gateways
# Test updating a model with a new image version as the only change
def test_rolling_update1(self, namespace, api_gateway):
if api_gateway == API_ISTIO_GATEWAY:
retry_run(
f"kubectl create -f ../resources/seldon-gateway.yaml -n {namespace}"
)
retry_run(f"kubectl apply -f ../resources/graph1.json -n {namespace}")
wait_for_status("mymodel", namespace)
wait_for_rollout("mymodel", namespace)
logging.warning("Initial request")
r = initial_rest_request("mymodel", namespace, endpoint=api_gateway)
assert r.status_code == 200
assert r.json()["data"]["tensor"]["values"] == [1.0, 2.0, 3.0, 4.0]
retry_run(f"kubectl apply -f ../resources/graph2.json -n {namespace}")
r = initial_rest_request("mymodel", namespace, endpoint=api_gateway)
assert r.status_code == 200
assert r.json()["data"]["tensor"]["values"] == [1.0, 2.0, 3.0, 4.0]
i = 0
for i in range(100):
r = rest_request_ambassador("mymodel", namespace, api_gateway)
assert r.status_code == 200
res = r.json()
assert (res["data"]["tensor"]["values"] == [1.0, 2.0, 3.0, 4.0]) or (
res["data"]["tensor"]["values"] == [5.0, 6.0, 7.0, 8.0]
)
if (not r.status_code == 200) or (
res["data"]["tensor"]["values"] == [5.0, 6.0, 7.0, 8.0]
):
break
time.sleep(1)
assert i < 100
logging.warning("Success for test_rolling_update1")
run(f"kubectl delete -f ../resources/graph1.json -n {namespace}", shell=True)
run(f"kubectl delete -f ../resources/graph2.json -n {namespace}", shell=True)
@with_api_gateways
# test changing the image version and the name of its container
def test_rolling_update2(self, namespace, api_gateway):
if api_gateway == API_ISTIO_GATEWAY:
retry_run(
f"kubectl create -f ../resources/seldon-gateway.yaml -n {namespace}"
)
retry_run(f"kubectl apply -f ../resources/graph1.json -n {namespace}")
wait_for_status("mymodel", namespace)
wait_for_rollout("mymodel", namespace)
logging.warning("Initial request")
r = initial_rest_request("mymodel", namespace, endpoint=api_gateway)
assert r.status_code == 200
assert r.json()["data"]["tensor"]["values"] == [1.0, 2.0, 3.0, 4.0]
retry_run(f"kubectl apply -f ../resources/graph3.json -n {namespace}")
r = initial_rest_request("mymodel", namespace, endpoint=api_gateway)
assert r.status_code == 200
assert r.json()["data"]["tensor"]["values"] == [1.0, 2.0, 3.0, 4.0]
i = 0
for i in range(100):
r = rest_request_ambassador("mymodel", namespace, api_gateway)
assert r.status_code == 200
res = r.json()
assert (res["data"]["tensor"]["values"] == [1.0, 2.0, 3.0, 4.0]) or (
res["data"]["tensor"]["values"] == [5.0, 6.0, 7.0, 8.0]
)
if (not r.status_code == 200) or (
res["data"]["tensor"]["values"] == [5.0, 6.0, 7.0, 8.0]
):
break
time.sleep(1)
assert i < 100
logging.warning("Success for test_rolling_update2")
run(f"kubectl delete -f ../resources/graph1.json -n {namespace}", shell=True)
run(f"kubectl delete -f ../resources/graph3.json -n {namespace}", shell=True)
@with_api_gateways
# Test updating a model with a new resource request but same image
def test_rolling_update3(self, namespace, api_gateway):
if api_gateway == API_ISTIO_GATEWAY:
retry_run(
f"kubectl create -f ../resources/seldon-gateway.yaml -n {namespace}"
)
retry_run(f"kubectl apply -f ../resources/graph1.json -n {namespace}")
wait_for_status("mymodel", namespace)
wait_for_rollout("mymodel", namespace)
logging.warning("Initial request")
r = initial_rest_request("mymodel", namespace, endpoint=api_gateway)
assert r.status_code == 200
assert r.json()["data"]["tensor"]["values"] == [1.0, 2.0, 3.0, 4.0]
retry_run(f"kubectl apply -f ../resources/graph4.json -n {namespace}")
r = initial_rest_request("mymodel", namespace, endpoint=api_gateway)
assert r.status_code == 200
assert r.json()["data"]["tensor"]["values"] == [1.0, 2.0, 3.0, 4.0]
i = 0
for i in range(50):
r = rest_request_ambassador("mymodel", namespace, api_gateway)
assert r.status_code == 200
res = r.json()
assert res["data"]["tensor"]["values"] == [1.0, 2.0, 3.0, 4.0]
time.sleep(1)
assert i == 49
logging.warning("Success for test_rolling_update3")
run(f"kubectl delete -f ../resources/graph1.json -n {namespace}", shell=True)
run(f"kubectl delete -f ../resources/graph4.json -n {namespace}", shell=True)
@with_api_gateways
# Test updating a model with a multi deployment new model
def test_rolling_update4(self, namespace, api_gateway):
if api_gateway == API_ISTIO_GATEWAY:
retry_run(
f"kubectl create -f ../resources/seldon-gateway.yaml -n {namespace}"
)
retry_run(f"kubectl apply -f ../resources/graph1.json -n {namespace}")
wait_for_status("mymodel", namespace)
wait_for_rollout("mymodel", namespace)
logging.warning("Initial request")
r = initial_rest_request("mymodel", namespace, endpoint=api_gateway)
assert r.status_code == 200
assert r.json()["data"]["tensor"]["values"] == [1.0, 2.0, 3.0, 4.0]
retry_run(f"kubectl apply -f ../resources/graph5.json -n {namespace}")
r = initial_rest_request("mymodel", namespace, endpoint=api_gateway)
assert r.status_code == 200
assert r.json()["data"]["tensor"]["values"] == [1.0, 2.0, 3.0, 4.0]
i = 0
for i in range(50):
r = rest_request_ambassador("mymodel", namespace, api_gateway)
assert r.status_code == 200
res = r.json()
assert res["data"]["tensor"]["values"] == [1.0, 2.0, 3.0, 4.0]
time.sleep(1)
assert i == 49
logging.warning("Success for test_rolling_update4")
run(f"kubectl delete -f ../resources/graph1.json -n {namespace}", shell=True)
run(f"kubectl delete -f ../resources/graph5.json -n {namespace}", shell=True)
@with_api_gateways
# Test updating a model to a multi predictor model
def test_rolling_update5(self, namespace, api_gateway):
if api_gateway == API_ISTIO_GATEWAY:
retry_run(
f"kubectl create -f ../resources/seldon-gateway.yaml -n {namespace}"
)
retry_run(f"kubectl apply -f ../resources/graph1.json -n {namespace}")
wait_for_status("mymodel", namespace)
wait_for_rollout("mymodel", namespace)
logging.warning("Initial request")
r = initial_rest_request("mymodel", namespace, endpoint=api_gateway)
assert r.status_code == 200
assert r.json()["data"]["tensor"]["values"] == [1.0, 2.0, 3.0, 4.0]
retry_run(f"kubectl apply -f ../resources/graph6.json -n {namespace}")
r = initial_rest_request("mymodel", namespace, endpoint=api_gateway)
assert r.status_code == 200
assert r.json()["data"]["tensor"]["values"] == [1.0, 2.0, 3.0, 4.0]
i = 0
for i in range(50):
r = rest_request_ambassador("mymodel", namespace, api_gateway)
assert r.status_code == 200
res = r.json()
assert (res["data"]["tensor"]["values"] == [1.0, 2.0, 3.0, 4.0]) or (
res["data"]["tensor"]["values"] == [5.0, 6.0, 7.0, 8.0]
)
if (not r.status_code == 200) or (
res["data"]["tensor"]["values"] == [5.0, 6.0, 7.0, 8.0]
):
break
time.sleep(1)
assert i < 100
logging.warning("Success for test_rolling_update5")
run(f"kubectl delete -f ../resources/graph1.json -n {namespace}", shell=True)
run(f"kubectl delete -f ../resources/graph6.json -n {namespace}", shell=True)
@with_api_gateways
# Test updating a model with a new image version as the only change
def test_rolling_update6(self, namespace, api_gateway):
if api_gateway == API_ISTIO_GATEWAY:
retry_run(
f"kubectl create -f ../resources/seldon-gateway.yaml -n {namespace}"
)
retry_run(f"kubectl apply -f ../resources/graph1svc.json -n {namespace}")
wait_for_status("mymodel", namespace)
wait_for_rollout("mymodel", namespace, expected_deployments=2)
logging.warning("Initial request")
r = initial_rest_request("mymodel", namespace, endpoint=api_gateway)
assert r.status_code == 200
assert r.json()["data"]["tensor"]["values"] == [1.0, 2.0, 3.0, 4.0]
retry_run(f"kubectl apply -f ../resources/graph2svc.json -n {namespace}")
r = initial_rest_request("mymodel", namespace, endpoint=api_gateway)
assert r.status_code == 200
assert r.json()["data"]["tensor"]["values"] == [1.0, 2.0, 3.0, 4.0]
i = 0
for i in range(100):
r = rest_request_ambassador("mymodel", namespace, api_gateway)
assert r.status_code == 200
res = r.json()
assert (res["data"]["tensor"]["values"] == [1.0, 2.0, 3.0, 4.0]) or (
res["data"]["tensor"]["values"] == [5.0, 6.0, 7.0, 8.0]
)
if (not r.status_code == 200) or (
res["data"]["tensor"]["values"] == [5.0, 6.0, 7.0, 8.0]
):
break
time.sleep(1)
assert i < 100
logging.warning("Success for test_rolling_update6")
run(f"kubectl delete -f ../resources/graph1svc.json -n {namespace}", shell=True)
run(f"kubectl delete -f ../resources/graph2svc.json -n {namespace}", shell=True)
@with_api_gateways
# test changing the image version and the name of its container
def test_rolling_update7(self, namespace, api_gateway):
if api_gateway == API_ISTIO_GATEWAY:
retry_run(
f"kubectl create -f ../resources/seldon-gateway.yaml -n {namespace}"
)
retry_run(f"kubectl apply -f ../resources/graph1svc.json -n {namespace}")
wait_for_status("mymodel", namespace)
wait_for_rollout("mymodel", namespace, expected_deployments=2)
logging.warning("Initial request")
r = initial_rest_request("mymodel", namespace, endpoint=api_gateway)
assert r.status_code == 200
assert r.json()["data"]["tensor"]["values"] == [1.0, 2.0, 3.0, 4.0]
retry_run(f"kubectl apply -f ../resources/graph3svc.json -n {namespace}")
r = initial_rest_request("mymodel", namespace, endpoint=api_gateway)
assert r.status_code == 200
assert r.json()["data"]["tensor"]["values"] == [1.0, 2.0, 3.0, 4.0]
i = 0
for i in range(100):
r = rest_request_ambassador("mymodel", namespace, api_gateway)
assert r.status_code == 200
res = r.json()
assert (res["data"]["tensor"]["values"] == [1.0, 2.0, 3.0, 4.0]) or (
res["data"]["tensor"]["values"] == [5.0, 6.0, 7.0, 8.0]
)
if (not r.status_code == 200) or (
res["data"]["tensor"]["values"] == [5.0, 6.0, 7.0, 8.0]
):
break
time.sleep(1)
assert i < 100
logging.warning("Success for test_rolling_update7")
run(f"kubectl delete -f ../resources/graph1svc.json -n {namespace}", shell=True)
run(f"kubectl delete -f ../resources/graph3svc.json -n {namespace}", shell=True)
@with_api_gateways
# Test updating a model with a new resource request but same image
def test_rolling_update8(self, namespace, api_gateway):
if api_gateway == API_ISTIO_GATEWAY:
retry_run(
f"kubectl create -f ../resources/seldon-gateway.yaml -n {namespace}"
)
retry_run(f"kubectl apply -f ../resources/graph1svc.json -n {namespace}")
wait_for_status("mymodel", namespace)
wait_for_rollout("mymodel", namespace, expected_deployments=2)
r = initial_rest_request("mymodel", namespace, endpoint=api_gateway)
assert r.status_code == 200
assert r.json()["data"]["tensor"]["values"] == [1.0, 2.0, 3.0, 4.0]
retry_run(f"kubectl apply -f ../resources/graph4svc.json -n {namespace}")
r = initial_rest_request("mymodel", namespace, endpoint=api_gateway)
assert r.status_code == 200
assert r.json()["data"]["tensor"]["values"] == [1.0, 2.0, 3.0, 4.0]
i = 0
for i in range(50):
r = rest_request_ambassador("mymodel", namespace, api_gateway)
assert r.status_code == 200
res = r.json()
assert res["data"]["tensor"]["values"] == [1.0, 2.0, 3.0, 4.0]
time.sleep(1)
assert i == 49
logging.warning("Success for test_rolling_update8")
run(f"kubectl delete -f ../resources/graph1svc.json -n {namespace}", shell=True)
run(f"kubectl delete -f ../resources/graph4svc.json -n {namespace}", shell=True)
@with_api_gateways
# Test updating a model with a multi deployment new model
def test_rolling_update9(self, namespace, api_gateway):
if api_gateway == API_ISTIO_GATEWAY:
retry_run(
f"kubectl create -f ../resources/seldon-gateway.yaml -n {namespace}"
)
retry_run(f"kubectl apply -f ../resources/graph1svc.json -n {namespace}")
wait_for_status("mymodel", namespace)
wait_for_rollout("mymodel", namespace, expected_deployments=2)
r = initial_rest_request("mymodel", namespace, endpoint=api_gateway)
assert r.status_code == 200
assert r.json()["data"]["tensor"]["values"] == [1.0, 2.0, 3.0, 4.0]
retry_run(f"kubectl apply -f ../resources/graph5svc.json -n {namespace}")
r = initial_rest_request("mymodel", namespace, endpoint=api_gateway)
assert r.status_code == 200
assert r.json()["data"]["tensor"]["values"] == [1.0, 2.0, 3.0, 4.0]
i = 0
for i in range(50):
r = rest_request_ambassador("mymodel", namespace, api_gateway)
assert r.status_code == 200
res = r.json()
assert res["data"]["tensor"]["values"] == [1.0, 2.0, 3.0, 4.0]
time.sleep(1)
assert i == 49
logging.warning("Success for test_rolling_update9")
run(f"kubectl delete -f ../resources/graph1svc.json -n {namespace}", shell=True)
run(f"kubectl delete -f ../resources/graph5svc.json -n {namespace}", shell=True)
@with_api_gateways
# Test updating a model to a multi predictor model
def test_rolling_update10(self, namespace, api_gateway):
if api_gateway == API_ISTIO_GATEWAY:
retry_run(
f"kubectl create -f ../resources/seldon-gateway.yaml -n {namespace}"
)
retry_run(f"kubectl apply -f ../resources/graph1svc.json -n {namespace}")
wait_for_status("mymodel", namespace)
wait_for_rollout("mymodel", namespace, expected_deployments=2)
r = initial_rest_request("mymodel", namespace, endpoint=api_gateway)
assert r.status_code == 200
assert r.json()["data"]["tensor"]["values"] == [1.0, 2.0, 3.0, 4.0]
retry_run(f"kubectl apply -f ../resources/graph6svc.json -n {namespace}")
r = initial_rest_request("mymodel", namespace, endpoint=api_gateway)
assert r.status_code == 200
assert r.json()["data"]["tensor"]["values"] == [1.0, 2.0, 3.0, 4.0]
i = 0
for i in range(50):
r = rest_request_ambassador("mymodel", namespace, api_gateway)
assert r.status_code == 200
res = r.json()
assert (res["data"]["tensor"]["values"] == [1.0, 2.0, 3.0, 4.0]) or (
res["data"]["tensor"]["values"] == [5.0, 6.0, 7.0, 8.0]
)
if (not r.status_code == 200) or (
res["data"]["tensor"]["values"] == [5.0, 6.0, 7.0, 8.0]
):
break
time.sleep(1)
assert i < 100
logging.warning("Success for test_rolling_update10")
run(f"kubectl delete -f ../resources/graph1svc.json -n {namespace}", shell=True)
run(f"kubectl delete -f ../resources/graph6svc.json -n {namespace}", shell=True)
@pytest.mark.parametrize(
"from_deployment,to_deployment",
[
("graph1.json", "graph8.json"), # From v1alpha2 to v1
("graph7.json", "graph8.json"), # From v1alpha3 to v1
],
)
def test_rolling_update_deployment(namespace, from_deployment, to_deployment):
from_file_path = to_resources_path(from_deployment)
retry_run(f"kubectl apply -f {from_file_path} -n {namespace}")
# Note that this is not yet parametrised!
wait_for_status("mymodel", namespace)
wait_for_rollout("mymodel", namespace)
logging.warning("Initial request")
r = initial_rest_request("mymodel", namespace)
assert r.status_code == 200
assert r.json()["data"]["tensor"]["values"] == [1.0, 2.0, 3.0, 4.0]
to_file_path = to_resources_path(to_deployment)
retry_run(f"kubectl apply -f {to_file_path} -n {namespace}")
r = initial_rest_request("mymodel", namespace)
assert r.status_code == 200
assert r.json()["data"]["tensor"]["values"] == [1.0, 2.0, 3.0, 4.0]
i = 0
for i in range(100):
r = rest_request_ambassador("mymodel", namespace, API_AMBASSADOR)
assert r.status_code == 200
res = r.json()
assert (res["data"]["tensor"]["values"] == [1.0, 2.0, 3.0, 4.0]) or (
res["data"]["tensor"]["values"] == [5.0, 6.0, 7.0, 8.0]
)
if (not r.status_code == 200) or (
res["data"]["tensor"]["values"] == [5.0, 6.0, 7.0, 8.0]
):
break
time.sleep(1)
assert i < 100
run(f"kubectl delete -f {from_file_path} -n {namespace}", shell=True)
run(f"kubectl delete -f {to_file_path} -n {namespace}", shell=True)
| 17,179
| 1,164
| 68
|
4ce4e405b09d9027494a202ab8deb07dfb728805
| 1,078
|
py
|
Python
|
ingenico/connect/sdk/domain/mandates/definitions/create_mandate_with_return_url.py
|
festicket/connect-sdk-python3
|
c399c6443789dd978f319c89e1ebd387c812a77b
|
[
"MIT"
] | 12
|
2016-09-26T21:46:31.000Z
|
2020-12-23T18:44:54.000Z
|
ingenico/connect/sdk/domain/mandates/definitions/create_mandate_with_return_url.py
|
festicket/connect-sdk-python3
|
c399c6443789dd978f319c89e1ebd387c812a77b
|
[
"MIT"
] | 3
|
2020-05-02T16:53:02.000Z
|
2020-06-02T12:49:51.000Z
|
ingenico/connect/sdk/domain/mandates/definitions/create_mandate_with_return_url.py
|
festicket/connect-sdk-python3
|
c399c6443789dd978f319c89e1ebd387c812a77b
|
[
"MIT"
] | 11
|
2017-07-16T00:55:28.000Z
|
2021-09-24T17:00:49.000Z
|
# -*- coding: utf-8 -*-
#
# This class was auto-generated from the API references found at
# https://epayments-api.developer-ingenico.com/s2sapi/v1/
#
from ingenico.connect.sdk.domain.mandates.definitions.create_mandate_base import CreateMandateBase
| 29.135135
| 98
| 0.681818
|
# -*- coding: utf-8 -*-
#
# This class was auto-generated from the API references found at
# https://epayments-api.developer-ingenico.com/s2sapi/v1/
#
from ingenico.connect.sdk.domain.mandates.definitions.create_mandate_base import CreateMandateBase
class CreateMandateWithReturnUrl(CreateMandateBase):
__return_url = None
@property
def return_url(self):
"""
| Return URL to use if the mandate signing requires redirection.
Type: str
"""
return self.__return_url
@return_url.setter
def return_url(self, value):
self.__return_url = value
def to_dictionary(self):
dictionary = super(CreateMandateWithReturnUrl, self).to_dictionary()
if self.return_url is not None:
dictionary['returnUrl'] = self.return_url
return dictionary
def from_dictionary(self, dictionary):
super(CreateMandateWithReturnUrl, self).from_dictionary(dictionary)
if 'returnUrl' in dictionary:
self.return_url = dictionary['returnUrl']
return self
| 446
| 358
| 23
|
68a2657034c704483c93720e0fd358d8d19b0d25
| 13,534
|
py
|
Python
|
VIGA_METALICA_VERIFICA - Copy.py
|
wmpjrufg/RASD-ALGORITMOS
|
1bdc2c4271c8019733c34333e5e91b70bc3c7604
|
[
"MIT"
] | null | null | null |
VIGA_METALICA_VERIFICA - Copy.py
|
wmpjrufg/RASD-ALGORITMOS
|
1bdc2c4271c8019733c34333e5e91b70bc3c7604
|
[
"MIT"
] | null | null | null |
VIGA_METALICA_VERIFICA - Copy.py
|
wmpjrufg/RASD-ALGORITMOS
|
1bdc2c4271c8019733c34333e5e91b70bc3c7604
|
[
"MIT"
] | null | null | null |
################################################################################
# UNIVERSIDADE FEDERAL DE CATALÃO (UFCAT)
# WANDERLEI MALAQUIAS PEREIRA JUNIOR, ENG. CIVIL / PROF (UFCAT)
# DONIZETTI SOUZA JUNIOR, ENG. CIVIL (UFCAT)
################################################################################
################################################################################
# DESCRIÇÃO ALGORITMO:
# BIBLIOTECA DE VERIFICAÇÃO DE ESTADO LIMITE EM VIGAS METÁLICAS DESENVOLVIDA
# PELO GRUPO DE PESQUISAS E ESTUDOS EM ENGENHARIA (GPEE)
################################################################################
################################################################################
# BIBLIOTECAS PYTHON
import numpy as np
################################################################################
# BIBLIOTECAS DESENVOLVEDORES GPEE
def DEFINICAO_SECAO(LAMBDA, LAMBDA_R, LAMBDA_P):
"""
Está função define se uma seção metálica é compacta, semi-compacta ou esbelta.
Entrada:
LAMBDA | Esbeltez calculada | - | Float
LAMBDA_R | Esbeltez secao compacta calculada | - | Float
LAMBDA_P | Esbeltez secao semicompacta calculada | - | Float
Saída:
TIPO_SEC | Tipo de secao calculada | - | String
"""
if LAMBDA <= LAMBDA_P:
TIPO_SEC = "COMPACTA"
elif (LAMBDA > LAMBDA_P) and (LAMBDA <= LAMBDA_R):
TIPO_SEC = "SEMI-COMPACTA"
elif LAMBDA > LAMBDA_R:
TIPO_SEC = "ESBELTA"
return TIPO_SEC
def MOMENTO_MRD_ALMA(E_S, F_Y, H_W, T_W, Z, B_F, T_F, W_C, W_T, PARAMETRO_PERFIL, TIPO_PERFIL, GAMMA_A1):
"""
Esta função classifica e verifica o momento resistente na alma de um perfil metálico
de acordo com a NBR 8800.
Entrada:
E_S | Módulo de elasticidade do aço | kN/m² | Float
F_Y | Tensão de escoamento do aço | kN/m² | Float
H_W | Altura da alma | m | Float
T_W | Largura da alma | m | Float
T_F | Altura da mesa | m | Float
B_F | Largura da mesa | m | Float
Z | Módulo plastico da seção | m³ | Float
W_C | Módulo plastico da seção compressao | kN/m² | Float
W_T | Módulo plastico da seção tracao | kN/m² | Float
PERFIL | Caracteristica do perfil | | String
GAMMA_A1 | Coeficiente de ponderação | | Float
Saída:
M_RD | Momento resistido de projeto | kN*m | Float
"""
Z = MODULO_PLASTICO(B_F, T_F, H_W, T_W)
# Classificação perfil
LAMBDA = H_W / T_W
LAMBDA_R = 5.70 * (E_S / F_Y) ** 0.5
if PARAMETRO_PERFIL == "DUPLA SIMETRIA":
D = 3.76
elif PARAMETRO_PERFIL == "MONO SIMETRIA":
pass # depois tem que implementar o monosimentrico
LAMBDA_P = D * (E_S / F_Y) ** 0.5
if LAMBDA_P >= LAMBDA_R:
print('SEÇÃO COM λp > λr')
# return Aqui tem que ver como vamos fazer para o código encerrar aqui
TIPO_SEC = DEFINICAO_SECAO(LAMBDA, LAMBDA_R, LAMBDA_P)
# Momento resistente
if TIPO_SEC == "COMPACTA":
M_P = Z * F_Y
M_RD = M_P / GAMMA_A1
elif TIPO_SEC == "SEMI-COMPACTA":
M_P = Z * F_Y
W = min(W_C, W_T)
M_R = W * F_Y
AUX_0 = (LAMBDA - LAMBDA_P) / (LAMBDA_R - LAMBDA_P)
M_RD = (M_P - AUX_0 * (M_P - M_R)) / GAMMA_A1
elif TIPO_SEC == "ESBELTA":
pass
# Momento máximo resistente
W = min(W_C, W_T)
M_RDMAX = 1.50 * W * F_Y / GAMMA_A1
if M_RD > M_RDMAX:
M_RD = M_RDMAX
return M_RD
def MOMENTO_MRD_MESA(E_S, F_Y, H_W, T_W, B_F, T_F, Z, W_C, W_T, PARAMETRO_PERFIL, TIPO_PERFIL, GAMMA_A1):
"""
Esta função classifica e verifica o momento resistente na mesa de um perfil metálico
de acordo com a NBR 8800.
Entrada:
E_S | Módulo de elasticidade do aço | kN/m² | Float
F_Y | Tensão de escoamento do aço | kN/m² | Float
H_W | Altura da alma | m | Float
T_W | Largura da alma | m | Float
T_F | Altura da mesa | m | Float
B_F | Largura da mesa | m | Float
Z | Módulo plastico da seção | m³ | Float
W_C | Módulo plastico da seção compressao | kN/m² | Float
W_T | Módulo plastico da seção tracao | kN/m² | Float
PERFIL | Caracteristica do perfil | - | String
GAMMA_A1 | Coeficiente de ponderação | - | Float
Saída:
M_RD | Momento fletor resistido | kN*m | Float
"""
Z = MODULO_PLASTICO(B_F, T_F, H_W, T_W)
# Classificação perfil
LAMBDA = B_F / (2 * T_F)
LAMBDA_P = 0.38 * (E_S / F_Y) ** 0.5
if TIPO_PERFIL == "SOLDADO":
C = 0.95
AUX_0 = 4 / (H_W / T_W) ** 0.50
K_C = np.clip(AUX_0, 0.35, 0.76)
elif TIPO_PERFIL == "LAMINADO":
C = 0.83
K_C = 1.00
AUX_1 = 0.70 * F_Y / K_C
LAMBDA_R = C * (E_S / AUX_1) ** 0.5
TIPO_SEC = DEFINICAO_SECAO(LAMBDA, LAMBDA_R, LAMBDA_P)
# Momento resistente
if TIPO_SEC == "COMPACTA":
M_P = Z * F_Y
M_RD = M_P / GAMMA_A1
elif TIPO_SEC == "SEMI-COMPACTA":
M_P = Z * F_Y
SIGMA_R = 0.30 * F_Y
M_R = W_C * (F_Y - SIGMA_R)
M_RMAX = W_T * F_Y
if M_R > M_RMAX:
M_R = M_RMAX
AUX_2 = (LAMBDA - LAMBDA_P) / (LAMBDA_R - LAMBDA_P)
M_RD = (M_P - AUX_2 * (M_P - M_R)) / GAMMA_A1
elif TIPO_SEC == "ESBELTA":
if PERFIL == "SOLDADO":
M_N = (0.90 * E_S * K_C * W_C) / LAMDA ** 2
elif PERFIL == "LAMINADO":
M_N = (0.69 * E_S * W_C) / LAMDA ** 2
M_RD = M_N / GAMMA_A1
W = min(W_C, W_T)
# Momento máximo resistente
M_RDMAX = 1.50 * W * F_Y / GAMMA_A1
if M_RD > M_RDMAX:
M_RD = M_RDMAX
return M_RD
def CALCULO_CV(H_W, T_W, E_S, F_Y):
"""
Esta função determina o coeficiente de redução do cisalhamento resistente Cv.
Entrada:
E_S | Módulo de elasticidade do aço | kN/m² | Float
F_Y | Tensão de escoamento do aço | kN/m² | Float
H_W | Altura da alma | m | Float
T_W | Largura da alma | m | Float
Saída:
C_V | Coeficiente de cisalhmento | - | Float
"""
LAMBDA = H_W / T_W
LAMBDA_P = 2.46 * (E_S / F_Y) ** 0.5
LAMBDA_R = 3.06 * (E_S / F_Y) ** 0.5
if LAMBDA <= LAMBDA_P:
C_V = 1
elif (LAMBDA_P < LAMBDA) and (LAMBDA <= LAMBDA_R):
C_V = (2.46 / LAMBDA) * (E_S / F_Y) ** 0.5
elif LAMBDA > LAMBDA_R:
C_V = (7.5 * E_S) / (F_Y * LAMBDA ** 2)
return C_V
def CORTANTE_VRD(H_W, T_W, E_S, F_Y, GAMMA_A1):
"""
Esta função determina o cortante de cálculo para seções metálicas segundo a NBR 8800.
Entrada:
E_S | Módulo de elasticidade do aço | kN/m² | Float
F_Y | Tensão de escoamento do aço | kN/m² | Float
H_W | Altura da alma | m | Float
T_W | Largura da alma | m | Float
GAMMA_A1 | Coeficiente de ponderação | - | Float
Saída:
V_RD | Esforco cortante resistido | kN | Float
"""
A_W = H_W * T_W
C_V = CALCULO_CV(H_W, T_W, E_S, F_Y)
V_RD = (C_V * 0.6 * F_Y * A_W) / GAMMA_A1
return V_RD
def VERIFICACAO_VIGA_METALICA_MOMENTO_FLETOR(VIGA, ESFORCOS):
"""
Esta função verifica o momento fletor resistente de um perfil metálico
de acordo com a NBR 8800.
Entrada:
VIGA | Tags dicionario
| 'E_S' == Módulo de elasticidade do aço | Float
| 'F_Y' == Tensão de escoamento do aço | Float
| 'H_W' == Altura da alma | Float
| 'T_W' == Largura da alma | Float
| 'T_F' == Altura da mesa | Float
| 'B_F' == Largura da mesa | Float
| 'Z' == Módulo plastico da seção | Float
| 'W_C' == Módulo plastico da seção compressao | Float
| 'W_T' == Módulo plastico da seção tracao | Float
| 'S1' == Erro de modelo 1 | Float
| 'S2' == Erro de modelo 2 | Float
| 'PARAMETRO_PERFIL' == Caracteristica do perfil | String
| 'TIPO_PERFIL' == Tipo de perfil analisado | String
| 'GAMMA_A1' == Coeficiente de ponderação | Float
ESFORCOS | Tags dicionario
| 'M_SD' == Momento fletor solicitante | Float
Saída:
R | Momento fletor resistente | Float
S | Momento fletor solicitante | Float
"""
E_S = VIGA['E_S']
F_Y = VIGA['F_Y']
H_W = VIGA['H_W']
T_W = VIGA['T_W']
B_F = VIGA['B_F']
T_F = VIGA['T_F']
Z = MODULO_PLASTICO(B_F, T_F, H_W, T_W)
#Z = VIGA['Z']
INERCIA = INERCIA_CALCULO(B_F, T_F, H_W, T_W)
#INERCIA = VIGA['INERCIA']
S1 = VIGA['S1']
S2 = VIGA['S2']
Y_GC = (( T_F * 2) + H_W) / 2
W_C = INERCIA / Y_GC
W_T = W_C
PARAMETRO_PERFIL = VIGA['PARAMETRO_PERFIL']
TIPO_PERFIL = VIGA['TIPO_PERFIL']
GAMMA_A1 = VIGA['GAMMA_A1']
M_SD = ESFORCOS['M_SD']
#Resistencia momento fletor de projeto
M_RDMESA = MOMENTO_MRD_MESA(E_S, F_Y, H_W, T_W, B_F, T_F, Z, W_C, W_T, PARAMETRO_PERFIL, TIPO_PERFIL, GAMMA_A1)
M_RDALMA = MOMENTO_MRD_ALMA(E_S, F_Y, H_W, T_W, Z, B_F, T_F, W_C, W_T, PARAMETRO_PERFIL, TIPO_PERFIL, GAMMA_A1)
M_RD = min(M_RDMESA, M_RDALMA)
R = S1 * M_RD
S = S2 * M_SD
return(R, S)
def VERIFICACAO_VIGA_METALICA_ESFORCO_CORTANTE(VIGA, ESFORCOS):
"""
Esta função verifica o esforco cortante de um perfil metálico
de acordo com a NBR 8800.
Entrada:
VIGA | Tags dicionario
| 'E_S' == Módulo de elasticidade do aço | Float
| 'F_Y' == Tensão de escoamento do aço | Float
| 'H_W' == Altura da alma | Float
| 'T_W' == Largura da alma | Float
| 'S1' == Erro de modelo 1 | Float
| 'S2' == Erro de modelo 2 | Float
| 'GAMMA_A1' == Coeficiente de ponderação | Float
ESFORCOS | Tags dicionario
| 'V_SD' == Esforco cortante solicitante | Float
Saída:
R | Esforco cortante resistente | Float
S | Esforco cortante solicitante | Float
"""
E_S = VIGA['E_S']
F_Y = VIGA['F_Y']
H_W = VIGA['H_W']
T_W = VIGA['T_W']
S1 = VIGA['S1']
S2 = VIGA['S2']
V_SD = ESFORCOS['V_SD']
GAMMA_A1 = VIGA['GAMMA_A1']
#Resistencia esforco cortante de projeto
V_RD = CORTANTE_VRD(H_W, T_W, E_S, F_Y, GAMMA_A1)
R = S1 * V_RD
S = S2 * V_SD
return(R, S)
def VERIFICACAO_VIGA_METALICA_DEFORMACAO(VIGA, ESFORCOS):
"""
Esta função verifica o deflexao maxima de um perfil metálico
de acordo com a NBR 8800.
Entrada:
VIGA | Tags dicionario
| 'L_MAX' == Largura do elemento | Float
| 'S1' == Erro de modelo 1 | Float
| 'S2' == Erro de modelo 2 | Float
ESFORCOS | Tags dicionario
| 'D_SD' == Deflexao solicitante | Float
Saída:
R | Esforco cortante resistente | Float
S | Esforco cortante solicitante | Float
"""
D_SD = ESFORCOS['D_SD']
S1 = VIGA['S1']
S2 = VIGA['S2']
L_MAX = ESFORCOS['L_MAX']
D_MAX = L_MAX / 350
R = abs(S1 * D_MAX)
S = abs(S2 * D_SD / 100)
return(R, S)
| 36.479784
| 115
| 0.504064
|
################################################################################
# UNIVERSIDADE FEDERAL DE CATALÃO (UFCAT)
# WANDERLEI MALAQUIAS PEREIRA JUNIOR, ENG. CIVIL / PROF (UFCAT)
# DONIZETTI SOUZA JUNIOR, ENG. CIVIL (UFCAT)
################################################################################
################################################################################
# DESCRIÇÃO ALGORITMO:
# BIBLIOTECA DE VERIFICAÇÃO DE ESTADO LIMITE EM VIGAS METÁLICAS DESENVOLVIDA
# PELO GRUPO DE PESQUISAS E ESTUDOS EM ENGENHARIA (GPEE)
################################################################################
################################################################################
# BIBLIOTECAS PYTHON
import numpy as np
################################################################################
# BIBLIOTECAS DESENVOLVEDORES GPEE
def DEFINICAO_SECAO(LAMBDA, LAMBDA_R, LAMBDA_P):
"""
Está função define se uma seção metálica é compacta, semi-compacta ou esbelta.
Entrada:
LAMBDA | Esbeltez calculada | - | Float
LAMBDA_R | Esbeltez secao compacta calculada | - | Float
LAMBDA_P | Esbeltez secao semicompacta calculada | - | Float
Saída:
TIPO_SEC | Tipo de secao calculada | - | String
"""
if LAMBDA <= LAMBDA_P:
TIPO_SEC = "COMPACTA"
elif (LAMBDA > LAMBDA_P) and (LAMBDA <= LAMBDA_R):
TIPO_SEC = "SEMI-COMPACTA"
elif LAMBDA > LAMBDA_R:
TIPO_SEC = "ESBELTA"
return TIPO_SEC
def MOMENTO_MRD_ALMA(E_S, F_Y, H_W, T_W, Z, B_F, T_F, W_C, W_T, PARAMETRO_PERFIL, TIPO_PERFIL, GAMMA_A1):
"""
Esta função classifica e verifica o momento resistente na alma de um perfil metálico
de acordo com a NBR 8800.
Entrada:
E_S | Módulo de elasticidade do aço | kN/m² | Float
F_Y | Tensão de escoamento do aço | kN/m² | Float
H_W | Altura da alma | m | Float
T_W | Largura da alma | m | Float
T_F | Altura da mesa | m | Float
B_F | Largura da mesa | m | Float
Z | Módulo plastico da seção | m³ | Float
W_C | Módulo plastico da seção compressao | kN/m² | Float
W_T | Módulo plastico da seção tracao | kN/m² | Float
PERFIL | Caracteristica do perfil | | String
GAMMA_A1 | Coeficiente de ponderação | | Float
Saída:
M_RD | Momento resistido de projeto | kN*m | Float
"""
Z = MODULO_PLASTICO(B_F, T_F, H_W, T_W)
# Classificação perfil
LAMBDA = H_W / T_W
LAMBDA_R = 5.70 * (E_S / F_Y) ** 0.5
if PARAMETRO_PERFIL == "DUPLA SIMETRIA":
D = 3.76
elif PARAMETRO_PERFIL == "MONO SIMETRIA":
pass # depois tem que implementar o monosimentrico
LAMBDA_P = D * (E_S / F_Y) ** 0.5
if LAMBDA_P >= LAMBDA_R:
print('SEÇÃO COM λp > λr')
# return Aqui tem que ver como vamos fazer para o código encerrar aqui
TIPO_SEC = DEFINICAO_SECAO(LAMBDA, LAMBDA_R, LAMBDA_P)
# Momento resistente
if TIPO_SEC == "COMPACTA":
M_P = Z * F_Y
M_RD = M_P / GAMMA_A1
elif TIPO_SEC == "SEMI-COMPACTA":
M_P = Z * F_Y
W = min(W_C, W_T)
M_R = W * F_Y
AUX_0 = (LAMBDA - LAMBDA_P) / (LAMBDA_R - LAMBDA_P)
M_RD = (M_P - AUX_0 * (M_P - M_R)) / GAMMA_A1
elif TIPO_SEC == "ESBELTA":
pass
# Momento máximo resistente
W = min(W_C, W_T)
M_RDMAX = 1.50 * W * F_Y / GAMMA_A1
if M_RD > M_RDMAX:
M_RD = M_RDMAX
return M_RD
def MOMENTO_MRD_MESA(E_S, F_Y, H_W, T_W, B_F, T_F, Z, W_C, W_T, PARAMETRO_PERFIL, TIPO_PERFIL, GAMMA_A1):
"""
Esta função classifica e verifica o momento resistente na mesa de um perfil metálico
de acordo com a NBR 8800.
Entrada:
E_S | Módulo de elasticidade do aço | kN/m² | Float
F_Y | Tensão de escoamento do aço | kN/m² | Float
H_W | Altura da alma | m | Float
T_W | Largura da alma | m | Float
T_F | Altura da mesa | m | Float
B_F | Largura da mesa | m | Float
Z | Módulo plastico da seção | m³ | Float
W_C | Módulo plastico da seção compressao | kN/m² | Float
W_T | Módulo plastico da seção tracao | kN/m² | Float
PERFIL | Caracteristica do perfil | - | String
GAMMA_A1 | Coeficiente de ponderação | - | Float
Saída:
M_RD | Momento fletor resistido | kN*m | Float
"""
Z = MODULO_PLASTICO(B_F, T_F, H_W, T_W)
# Classificação perfil
LAMBDA = B_F / (2 * T_F)
LAMBDA_P = 0.38 * (E_S / F_Y) ** 0.5
if TIPO_PERFIL == "SOLDADO":
C = 0.95
AUX_0 = 4 / (H_W / T_W) ** 0.50
K_C = np.clip(AUX_0, 0.35, 0.76)
elif TIPO_PERFIL == "LAMINADO":
C = 0.83
K_C = 1.00
AUX_1 = 0.70 * F_Y / K_C
LAMBDA_R = C * (E_S / AUX_1) ** 0.5
TIPO_SEC = DEFINICAO_SECAO(LAMBDA, LAMBDA_R, LAMBDA_P)
# Momento resistente
if TIPO_SEC == "COMPACTA":
M_P = Z * F_Y
M_RD = M_P / GAMMA_A1
elif TIPO_SEC == "SEMI-COMPACTA":
M_P = Z * F_Y
SIGMA_R = 0.30 * F_Y
M_R = W_C * (F_Y - SIGMA_R)
M_RMAX = W_T * F_Y
if M_R > M_RMAX:
M_R = M_RMAX
AUX_2 = (LAMBDA - LAMBDA_P) / (LAMBDA_R - LAMBDA_P)
M_RD = (M_P - AUX_2 * (M_P - M_R)) / GAMMA_A1
elif TIPO_SEC == "ESBELTA":
if PERFIL == "SOLDADO":
M_N = (0.90 * E_S * K_C * W_C) / LAMDA ** 2
elif PERFIL == "LAMINADO":
M_N = (0.69 * E_S * W_C) / LAMDA ** 2
M_RD = M_N / GAMMA_A1
W = min(W_C, W_T)
# Momento máximo resistente
M_RDMAX = 1.50 * W * F_Y / GAMMA_A1
if M_RD > M_RDMAX:
M_RD = M_RDMAX
return M_RD
def CALCULO_CV(H_W, T_W, E_S, F_Y):
"""
Esta função determina o coeficiente de redução do cisalhamento resistente Cv.
Entrada:
E_S | Módulo de elasticidade do aço | kN/m² | Float
F_Y | Tensão de escoamento do aço | kN/m² | Float
H_W | Altura da alma | m | Float
T_W | Largura da alma | m | Float
Saída:
C_V | Coeficiente de cisalhmento | - | Float
"""
LAMBDA = H_W / T_W
LAMBDA_P = 2.46 * (E_S / F_Y) ** 0.5
LAMBDA_R = 3.06 * (E_S / F_Y) ** 0.5
if LAMBDA <= LAMBDA_P:
C_V = 1
elif (LAMBDA_P < LAMBDA) and (LAMBDA <= LAMBDA_R):
C_V = (2.46 / LAMBDA) * (E_S / F_Y) ** 0.5
elif LAMBDA > LAMBDA_R:
C_V = (7.5 * E_S) / (F_Y * LAMBDA ** 2)
return C_V
def CORTANTE_VRD(H_W, T_W, E_S, F_Y, GAMMA_A1):
"""
Esta função determina o cortante de cálculo para seções metálicas segundo a NBR 8800.
Entrada:
E_S | Módulo de elasticidade do aço | kN/m² | Float
F_Y | Tensão de escoamento do aço | kN/m² | Float
H_W | Altura da alma | m | Float
T_W | Largura da alma | m | Float
GAMMA_A1 | Coeficiente de ponderação | - | Float
Saída:
V_RD | Esforco cortante resistido | kN | Float
"""
A_W = H_W * T_W
C_V = CALCULO_CV(H_W, T_W, E_S, F_Y)
V_RD = (C_V * 0.6 * F_Y * A_W) / GAMMA_A1
return V_RD
def VERIFICACAO_VIGA_METALICA_MOMENTO_FLETOR(VIGA, ESFORCOS):
"""
Esta função verifica o momento fletor resistente de um perfil metálico
de acordo com a NBR 8800.
Entrada:
VIGA | Tags dicionario
| 'E_S' == Módulo de elasticidade do aço | Float
| 'F_Y' == Tensão de escoamento do aço | Float
| 'H_W' == Altura da alma | Float
| 'T_W' == Largura da alma | Float
| 'T_F' == Altura da mesa | Float
| 'B_F' == Largura da mesa | Float
| 'Z' == Módulo plastico da seção | Float
| 'W_C' == Módulo plastico da seção compressao | Float
| 'W_T' == Módulo plastico da seção tracao | Float
| 'S1' == Erro de modelo 1 | Float
| 'S2' == Erro de modelo 2 | Float
| 'PARAMETRO_PERFIL' == Caracteristica do perfil | String
| 'TIPO_PERFIL' == Tipo de perfil analisado | String
| 'GAMMA_A1' == Coeficiente de ponderação | Float
ESFORCOS | Tags dicionario
| 'M_SD' == Momento fletor solicitante | Float
Saída:
R | Momento fletor resistente | Float
S | Momento fletor solicitante | Float
"""
E_S = VIGA['E_S']
F_Y = VIGA['F_Y']
H_W = VIGA['H_W']
T_W = VIGA['T_W']
B_F = VIGA['B_F']
T_F = VIGA['T_F']
Z = MODULO_PLASTICO(B_F, T_F, H_W, T_W)
#Z = VIGA['Z']
INERCIA = INERCIA_CALCULO(B_F, T_F, H_W, T_W)
#INERCIA = VIGA['INERCIA']
S1 = VIGA['S1']
S2 = VIGA['S2']
Y_GC = (( T_F * 2) + H_W) / 2
W_C = INERCIA / Y_GC
W_T = W_C
PARAMETRO_PERFIL = VIGA['PARAMETRO_PERFIL']
TIPO_PERFIL = VIGA['TIPO_PERFIL']
GAMMA_A1 = VIGA['GAMMA_A1']
M_SD = ESFORCOS['M_SD']
#Resistencia momento fletor de projeto
M_RDMESA = MOMENTO_MRD_MESA(E_S, F_Y, H_W, T_W, B_F, T_F, Z, W_C, W_T, PARAMETRO_PERFIL, TIPO_PERFIL, GAMMA_A1)
M_RDALMA = MOMENTO_MRD_ALMA(E_S, F_Y, H_W, T_W, Z, B_F, T_F, W_C, W_T, PARAMETRO_PERFIL, TIPO_PERFIL, GAMMA_A1)
M_RD = min(M_RDMESA, M_RDALMA)
R = S1 * M_RD
S = S2 * M_SD
return(R, S)
def VERIFICACAO_VIGA_METALICA_ESFORCO_CORTANTE(VIGA, ESFORCOS):
"""
Esta função verifica o esforco cortante de um perfil metálico
de acordo com a NBR 8800.
Entrada:
VIGA | Tags dicionario
| 'E_S' == Módulo de elasticidade do aço | Float
| 'F_Y' == Tensão de escoamento do aço | Float
| 'H_W' == Altura da alma | Float
| 'T_W' == Largura da alma | Float
| 'S1' == Erro de modelo 1 | Float
| 'S2' == Erro de modelo 2 | Float
| 'GAMMA_A1' == Coeficiente de ponderação | Float
ESFORCOS | Tags dicionario
| 'V_SD' == Esforco cortante solicitante | Float
Saída:
R | Esforco cortante resistente | Float
S | Esforco cortante solicitante | Float
"""
E_S = VIGA['E_S']
F_Y = VIGA['F_Y']
H_W = VIGA['H_W']
T_W = VIGA['T_W']
S1 = VIGA['S1']
S2 = VIGA['S2']
V_SD = ESFORCOS['V_SD']
GAMMA_A1 = VIGA['GAMMA_A1']
#Resistencia esforco cortante de projeto
V_RD = CORTANTE_VRD(H_W, T_W, E_S, F_Y, GAMMA_A1)
R = S1 * V_RD
S = S2 * V_SD
return(R, S)
def VERIFICACAO_VIGA_METALICA_DEFORMACAO(VIGA, ESFORCOS):
"""
Esta função verifica o deflexao maxima de um perfil metálico
de acordo com a NBR 8800.
Entrada:
VIGA | Tags dicionario
| 'L_MAX' == Largura do elemento | Float
| 'S1' == Erro de modelo 1 | Float
| 'S2' == Erro de modelo 2 | Float
ESFORCOS | Tags dicionario
| 'D_SD' == Deflexao solicitante | Float
Saída:
R | Esforco cortante resistente | Float
S | Esforco cortante solicitante | Float
"""
D_SD = ESFORCOS['D_SD']
S1 = VIGA['S1']
S2 = VIGA['S2']
L_MAX = ESFORCOS['L_MAX']
D_MAX = L_MAX / 350
R = abs(S1 * D_MAX)
S = abs(S2 * D_SD / 100)
return(R, S)
def INERCIA_CALCULO(B_F, T_F, H_W, T_W):
CG_A1 = B_F * T_F
CG_Y1 = T_F/2
CG_PT1 = CG_A1 * CG_Y1
CG_A2 = H_W * T_W
CG_Y2 = (T_F)+(H_W/2)
CG_PT2 = CG_A2 * CG_Y2
CG_A3 = B_F * T_F
CG_Y3 = (T_F/2)+(T_F+H_W)
CG_PT3 = CG_A3 * CG_Y3
CG = (CG_PT1+CG_PT2+CG_PT3)/(CG_A1+CG_A2+CG_A3)
INERCIA1_PT1 = (B_F * (T_F**3))/12
INERCIA1_PT2 = (CG - CG_Y1) ** 2
INERCIA1_PT3 = B_F * T_F
INERCIA1 = INERCIA1_PT1 + INERCIA1_PT3 * INERCIA1_PT2
INERCIA2_PT1 = (T_W * (H_W **3))/12
INERCIA2_PT2 = (CG - CG_Y2) ** 2
INERCIA2_PT3 = H_W * T_W
INERCIA2 = INERCIA2_PT1 + INERCIA2_PT3 * INERCIA2_PT2
INERCIA3_PT1 = (B_F * (T_F**3))/12
INERCIA3_PT2 = (CG - CG_Y3) ** 2
INERCIA3_PT3 = B_F * T_F
INERCIA3 = INERCIA3_PT1 + INERCIA3_PT3 * INERCIA3_PT2
INERCIA_TOTAL = (INERCIA1 + INERCIA2 + INERCIA3) * 0.000000000001 #m4
return (INERCIA_TOTAL)
def MODULO_PLASTICO(B_F, T_F, H_W, T_W):
H = (H_W)+(2*T_F)
PT1 = B_F * T_F * (H - T_F)
PT2 = (T_W/4)*(H - 2 * T_F)**2
Z = (PT1 + PT2) * 0.000000001 #m3
return(Z)
| 1,056
| 0
| 46
|
368b9cd21ed0d5c389fc6e20d0fcc7a3b567992b
| 1,671
|
py
|
Python
|
main.py
|
unpamplemoussemignon/e314-music-visualization
|
38fe1f97bfd35751867a51af330148f65b6f21bf
|
[
"CC0-1.0"
] | null | null | null |
main.py
|
unpamplemoussemignon/e314-music-visualization
|
38fe1f97bfd35751867a51af330148f65b6f21bf
|
[
"CC0-1.0"
] | null | null | null |
main.py
|
unpamplemoussemignon/e314-music-visualization
|
38fe1f97bfd35751867a51af330148f65b6f21bf
|
[
"CC0-1.0"
] | null | null | null |
#!/usr/bin/env python
# =================================
# Music Visualizer
# ------------
# Reference to -> [May 2020] - Mina PECHEUX
#
# Based on the work by Yu-Jie Lin
# (Public Domain)
# Github: https://gist.github.com/manugarri/1c0fcfe9619b775bb82de0790ccb88da
import wave
import click
from compute import plt, compute, WIDTH, HEIGHT, \
SAMPLE_SIZE, CHANNELS, RATE, FPS
@click.command(context_settings=dict(help_option_names=['-h', '--help']))
@click.argument('filename', type=str)
@click.option('-m', '--method', help='Method to use for the video processing', required=True,
type=click.Choice(['bars', 'spectrum', 'wave', 'rain'], case_sensitive=False))
@click.option('-c', '--color', help='An hex color or "hue_rotate" to auto-update the color throughout the film',
type=str, default='hue_rotate', show_default=True)
@click.option('--output/--no-output', help='Whether to save the result in a file or display it directly',
default=False, show_default=True)
if __name__ == '__main__':
main()
| 32.764706
| 112
| 0.653501
|
#!/usr/bin/env python
# =================================
# Music Visualizer
# ------------
# Reference to -> [May 2020] - Mina PECHEUX
#
# Based on the work by Yu-Jie Lin
# (Public Domain)
# Github: https://gist.github.com/manugarri/1c0fcfe9619b775bb82de0790ccb88da
import wave
import click
from compute import plt, compute, WIDTH, HEIGHT, \
SAMPLE_SIZE, CHANNELS, RATE, FPS
@click.command(context_settings=dict(help_option_names=['-h', '--help']))
@click.argument('filename', type=str)
@click.option('-m', '--method', help='Method to use for the video processing', required=True,
type=click.Choice(['bars', 'spectrum', 'wave', 'rain'], case_sensitive=False))
@click.option('-c', '--color', help='An hex color or "hue_rotate" to auto-update the color throughout the film',
type=str, default='hue_rotate', show_default=True)
@click.option('--output/--no-output', help='Whether to save the result in a file or display it directly',
default=False, show_default=True)
def main(filename, method, color, output):
dpi = plt.rcParams['figure.dpi']
plt.rcParams['savefig.dpi'] = 300
plt.rcParams['figure.figsize'] = (1.0 * WIDTH / dpi, 1.0 * HEIGHT / dpi)
wf = wave.open(filename + '.wav', 'rb')
assert wf.getnchannels() == CHANNELS
assert wf.getsampwidth() == SAMPLE_SIZE
# assert wf.getframerate() == RATE
fig = plt.figure(facecolor='black', edgecolor='black')
ani = compute(method, color, fig, wf)
if ani is None:
wf.close()
return
if output:
ani.save(filename + '.mp4', fps=FPS, savefig_kwargs={'facecolor':'black'})
else:
plt.show()
wf.close()
if __name__ == '__main__':
main()
| 601
| 0
| 22
|
df2176e417c138b40a9dee3721b7d6cc7dbf0916
| 1,663
|
py
|
Python
|
gerar_inscricaoestadual.py
|
andrellmagalhaes/Open-development
|
718fb1815fbd6dd194f1ec2049f56aa2a533b88e
|
[
"MIT"
] | 1
|
2021-09-13T00:55:08.000Z
|
2021-09-13T00:55:08.000Z
|
gerar_inscricaoestadual.py
|
andrellmagalhaes/Open-development
|
718fb1815fbd6dd194f1ec2049f56aa2a533b88e
|
[
"MIT"
] | null | null | null |
gerar_inscricaoestadual.py
|
andrellmagalhaes/Open-development
|
718fb1815fbd6dd194f1ec2049f56aa2a533b88e
|
[
"MIT"
] | null | null | null |
# IMPORTAÇÕES
import random
import string
# FUNÇÃO QUE GERA NÚMEROS DE INSCRIÇÃO ESTADUAL
| 31.980769
| 79
| 0.629585
|
# IMPORTAÇÕES
import random
import string
# FUNÇÃO QUE GERA NÚMEROS DE INSCRIÇÃO ESTADUAL
def gerar_inscricaoestadual(quantidade=1, tamanho=9):
# VERIFICA SE "QUANTIDADE" OU "TAMANHO" NÃO SÃO DO TIPO INTEIRO
if not type(quantidade) == int or not type(tamanho) == int:
raise TypeError("Os parâmetros fornecidos devem ser do tipo inteiro!")
# VERIFICA SE "QUANTIDADE" É MENOR QUE 1 OU MAIOR QUE 1000
if quantidade < 1 or quantidade > 1000:
raise ValueError("O valor de quantidade deve estar entre 1 e 1000!")
# VERIFICA SE "TAMANHO" É DIFERENTE DE 8 E DE 9
if tamanho not in [8, 9]:
raise ValueError("O valor de tamanho deve ser 8 ou 9!")
# BASE DE CARACTERES PARA A GERAÇÃO DAS INSCRIÇÕES ESTADUAIS
base_caracteres = string.digits
# INSCRIÇÃO ESTADUAL GERADA
ie = ""
# LISTA DE INSCRIÇÕES ESTADUAIS GERADAS
lista_ies = []
# QUANTIDADE DE INSCRIÇÕES ESTADUAIS A SEREM GERADAS
quantidade_ies = quantidade
# TAMANHO DAS INSCRIÇÕES ESTADUAIS A SEREM GERADAS
tamanho_ies = tamanho
# LOOP DE GERAÇÃO DAS INSCRIÇÕES ESTADUAIS
for x in range(quantidade_ies):
for y in range(tamanho_ies):
ie = random.choice(base_caracteres) + ie
# FORMATA A INSCRIÇÃO ESTADUAL GERADA
if tamanho == 8:
ie = ie[:6] + "-" + ie[6:]
if tamanho == 9:
ie = ie[:7] + "-" + ie[7:]
# ADICIONA A INSCRIÇÃO ESTADUAL GERADA NA LISTA DE IES
lista_ies.append(ie)
# REDEFINE "IE"
ie = ""
# RETORNA A LISTA DE IES GERADAS
return lista_ies
| 1,567
| 0
| 23
|
fa7b1e632b12afb4a0c70898ec7ee11e52a3402e
| 2,160
|
py
|
Python
|
tests/tests_unit/test_cdp_client.py
|
cognitedata/cognite-model-hosting
|
89f58e25f0e3c3a37006e60f52246da0b00a0066
|
[
"Apache-2.0"
] | 4
|
2019-05-27T12:51:45.000Z
|
2020-02-26T08:16:30.000Z
|
tests/tests_unit/test_cdp_client.py
|
cognitedata/cognite-model-hosting
|
89f58e25f0e3c3a37006e60f52246da0b00a0066
|
[
"Apache-2.0"
] | 26
|
2019-03-18T15:10:20.000Z
|
2021-06-21T05:47:24.000Z
|
tests/tests_unit/test_cdp_client.py
|
cognitedata/cognite-model-hosting
|
89f58e25f0e3c3a37006e60f52246da0b00a0066
|
[
"Apache-2.0"
] | null | null | null |
import pandas as pd
import pytest
from cognite.client.data_classes import Datapoints, DatapointsList
from cognite.client.testing import monkeypatch_cognite_client
from cognite.model_hosting.data_fetcher._cdp_client import CdpClient, DatapointsFrameQuery
@pytest.fixture
@pytest.fixture
@pytest.fixture
| 31.304348
| 110
| 0.659259
|
import pandas as pd
import pytest
from cognite.client.data_classes import Datapoints, DatapointsList
from cognite.client.testing import monkeypatch_cognite_client
from cognite.model_hosting.data_fetcher._cdp_client import CdpClient, DatapointsFrameQuery
@pytest.fixture
def mock_cogcli_datapoints_retrieve_single():
with monkeypatch_cognite_client() as cogmock:
cogmock.datapoints.retrieve.return_value = Datapoints(
id=1, external_id="1", value=[1, 2, 3], timestamp=[1000, 2000, 3000]
)
yield
def test_get_datapoints_frame_single(mock_cogcli_datapoints_retrieve_single):
client = CdpClient()
res = client.get_datapoints_frame_single(id=1, external_id=None, start=0, end=4000)
assert (3, 1) == res.shape
assert res.columns == ["value"]
@pytest.fixture
def mock_cogcli_datapoints_query():
with monkeypatch_cognite_client() as cogmock:
cogmock.datapoints.query.return_value = [
DatapointsList([Datapoints(id=1, external_id="1", value=[1, 2, 3], timestamp=[1000, 2000, 3000])])
]
yield
def test_get_datapoints_frame_multiple(mock_cogcli_datapoints_query):
client = CdpClient()
res = client.get_datapoints_frame_multiple(
[
DatapointsFrameQuery(
id=1,
external_id=None,
start=0,
end=4000,
aggregate=None,
granularity=None,
include_outside_points=False,
)
]
)
assert (3, 1) == res[0].shape
assert res[0].columns == ["value"]
@pytest.fixture
def mock_cogcli_retrieve_dataframe():
with monkeypatch_cognite_client() as cogmock:
cogmock.datapoints.retrieve_dataframe.return_value = pd.DataFrame(
[[1], [2], [3]], columns=["1"], index=[3000, 4000, 5000]
)
yield
def test_get_datapoints_frame(mock_cogcli_retrieve_dataframe):
client = CdpClient()
res = client.get_datapoints_frame(
time_series=[{"id": 1, "aggregate": "avg"}], granularity="1s", start=0, end=150000
)
assert (3, 1) == res.shape
assert res.columns == ["1"]
| 1,713
| 0
| 135
|
55d1d3e1e8c91e15987eee3a0c33503376cce60d
| 9,287
|
py
|
Python
|
pygame_project/split_balloon/split_balloon_game.py
|
bbjoite09/PythonProject
|
f0ab7a4e6b23758612012ebf7989029e88edf204
|
[
"MIT"
] | 1
|
2021-04-29T08:18:50.000Z
|
2021-04-29T08:18:50.000Z
|
pygame_project/split_balloon/split_balloon_game.py
|
bbjoite09/PythonProject
|
f0ab7a4e6b23758612012ebf7989029e88edf204
|
[
"MIT"
] | 1
|
2021-05-23T16:05:29.000Z
|
2021-05-23T16:05:29.000Z
|
pygame_project/split_balloon/split_balloon_game.py
|
bbjoite09/PythonProject
|
f0ab7a4e6b23758612012ebf7989029e88edf204
|
[
"MIT"
] | null | null | null |
'''
오락실 pang 게임 만들기
[게임 조건]
1. 캐릭터는 화면 아래에 위치, 좌우로만 이동 가능
2. 스페이스를 누르면 무기를 쏘아 올림
3. 큰 공 1개가 나타나서 바운스
4. 무기에 닿으면 공은 작은 크기 2개로 분할, 가장 작은 크기의 공은 사라짐
5. 모든 공을 없애면 게임 종료(성공)
6. 캐릭터는 공에 닿으면 게임 종료(실패)
7. 시간 제한 99초 초과시 게임 종료(실패)
8. FPS는 30으로 고정(필요시 speed 값을 조정)
[게임 이미지]
1. 배경 : 640 * 480(가로, 세로) - background.png
2. 무대 : 640 * 50 - stage.png
3. 캐릭터 : 33 * 60 - character.png
4. 무기 : 20 * 430 weapon.png
5. 풍선 : 160 * 160, 80 * 80, 40 * 40, 20 * 20 - ball1.png ~ ball4/png
'''
import os
import pygame
# 기본 초기화(반드시 해야하는 것들)
# pygame 을 import 하면 반드시 초기화를 해줘야한다.
pygame.init()
# 화면 크기 설정
screen_width = 640
screen_height = 480
screen = pygame.display.set_mode((screen_width, screen_height))
# 화면 타이틀 설정
pygame.display.set_caption("SPLIT BALLOON GAME")
# 폰트 설정
game_font = pygame.font.Font(None, 40)
total_time = 100
start_ticks = pygame.time.get_ticks()
# 게임 종료 메시지
game_result = "GAME OVER"
# 이동할 좌표
character_to_x = 0
# 이동 속도
character_speed = 0.6
# FPS
clock = pygame.time.Clock()
# 1. 사용자 게임 초기화 (배경 화면, 게임 이미지, 좌표, 속도, 폰트 등)
life = 3
current_path = os.path.dirname(__file__)
image_path = os.path.join(current_path, "images")
background = pygame.image.load(os.path.join(image_path, "background.png"))
stage = pygame.image.load(os.path.join(image_path, "stage.png"))
stage_size = stage.get_rect().size
stage_height = stage_size[1]
stage_y_pos = screen_height - stage_height
character = pygame.image.load(os.path.join(image_path, "character.png"))
character_size = character.get_rect().size
character_width = character_size[0]
character_height = character_size[1]
character_x_pos = screen_width / 2 - character_width / 2
character_y_pos = screen_height - character_height - stage_height
weapon = pygame.image.load(os.path.join(image_path, "weapon.png"))
weapon_size = weapon.get_rect().size
weapon_width = weapon_size[0]
# 무기는 한번에 여러 발 발사 가능
weapons = []
# 무기 이동 속도
weapon_speed = 10
# 풍선 만들기(4개 크기에 대해 따로 처리)
balloon_images = [
pygame.image.load(os.path.join(image_path, "balloon1.png")),
pygame.image.load(os.path.join(image_path, "balloon2.png")),
pygame.image.load(os.path.join(image_path, "balloon3.png")),
pygame.image.load(os.path.join(image_path, "balloon4.png"))
]
# 풍선 크기에 따른 최초 스피드
balloon_speed_y = [-18, -15, -12, -9]
# 풍선들
balloons = []
# 최초 발생 큰 풍선 추가
balloons.append({
"pos_x": 50, # 풍선의 x 좌표
"pos_y": 50, # 풍선의 y좌표
"img_idx": 0,
"to_x": 3, # x축 이동 방향
"to_y": -6, # y축 이동 방향
"init_speed_y": balloon_speed_y[0] # y 최초 속도
})
# 사라질 무기와 공 정보 저장 변수
weapon_to_remove = -1
ballons_to_remove = -1
running = True
while running:
dt = clock.tick(30) # 게임화면의 초당 프레임 수
# 2. 이벤트 처리(키보드, 마우스 등)
for event in pygame.event.get():
if event.type == pygame.QUIT: # 창이 닫히는 이벤트가 발생하였는가?
running = False # 게임이 진행되지 않음
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
character_to_x -= character_speed
elif event.key == pygame.K_RIGHT:
character_to_x += character_speed
elif event.key == pygame.K_SPACE:
# 무기 위치 정의
weapon_x_pos = character_x_pos + (character_width / 2) - (weapon_width / 2)
weapon_y_pos = character_y_pos
weapons.append([weapon_x_pos, weapon_y_pos])
if event.type == pygame.KEYUP:
if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT:
character_to_x = 0
character_x_pos += character_to_x * dt
# 3. 게임 캐릭터 위치 정의
if character_x_pos < 0:
character_x_pos = 0
elif character_x_pos > (screen_width - character_width):
character_x_pos = screen_width - character_width
# 무기 이동 조절
weapons = [[w[0], w[1] - weapon_speed] for w in weapons]
# 천장에 닿은 무기 없애기
weapons = [[w[0], w[1]] for w in weapons if w[1] > 0]
# 풍선 위치 정의
for balloon_idx, balloon_val in enumerate(balloons):
balloon_pos_x = balloon_val["pos_x"]
balloon_pos_y = balloon_val["pos_y"]
balloon_img_idx = balloon_val["img_idx"]
balloon_size = balloon_images[balloon_img_idx].get_rect().size
balloon_width = balloon_size[0]
balloon_height = balloon_size[1]
# 좌,우 벽에 닿았을 때 공 위치 변경(튕겨나가는 효과)
if balloon_pos_x < 0 or balloon_pos_x > screen_width - balloon_width:
balloon_val["to_x"] = balloon_val["to_x"] * -1
# 스테이지에 튕겨서 올라가는 효과
if balloon_pos_y >= screen_height - stage_height - balloon_height:
balloon_val["to_y"] = balloon_val["init_speed_y"]
# 그 외의 경우에는 속도를 줄여나감(포물선 효과)
else:
balloon_val["to_y"] += 0.5
balloon_val["pos_x"] += balloon_val["to_x"]
balloon_val["pos_y"] += balloon_val["to_y"]
# 4. 충돌 처리
# 캐릭터 rect 정보 업데이트
character_rect = character.get_rect()
character_rect.left = character_x_pos
character_rect.top = character_y_pos
for balloon_idx, balloon_val in enumerate(balloons):
balloon_pos_x = balloon_val["pos_x"]
balloon_pos_y = balloon_val["pos_y"]
balloon_img_idx = balloon_val["img_idx"]
# 공 rect 정보 업데이트
balloon_rect = balloon_images[balloon_img_idx].get_rect()
balloon_rect.left = balloon_pos_x
balloon_rect.top = balloon_pos_y
# 공과 캐릭터 충돌 처리
if character_rect.colliderect(balloon_rect):
if life == 0:
running = False
break
life -= 1
character_x_pos = 10
character_y_pos = screen_height - character_height - stage_height
# 공과 무기들 충돌 처리
for weapon_idx, weapon_val in enumerate(weapons):
weapon_x_pos = weapon_val[0]
weapon_y_pos = weapon_val[1]
# 무기 rect 정보 업데이트
weapon_rect = weapon.get_rect()
weapon_rect.left = weapon_x_pos
weapon_rect.top = weapon_y_pos
# 충돌 체크
if weapon_rect.colliderect(balloon_rect):
weapon_to_remove = weapon_idx # 해당 무기를 없애기 위한 값 설정
ballons_to_remove = balloon_idx # 해당 풍선을 없애기 위한 값 설정
if balloon_img_idx < 3:
# 현재 공 크기 정보를 가지고 옴
balloon_width = balloon_rect.size[0]
balloon_height = balloon_rect.size[1]
# 나눠진 공 정보
small_balloon_rect = balloon_images[balloon_img_idx + 1].get_rect()
small_balloon_width = small_balloon_rect.size[0]
small_balloon_height = small_balloon_rect.size[1]
# 왼쪽으로 튕겨나가는 작은 공
balloons.append({
"pos_x": balloon_pos_x + (balloon_width / 2) - (small_balloon_width / 2), # 풍선의 x 좌표
"pos_y": balloon_pos_y + (balloon_height / 2) - (small_balloon_height / 2), # 풍선의 y좌표
"img_idx": balloon_img_idx + 1,
"to_x": -3, # x축 이동 방향
"to_y": -6, # y축 이동 방향
"init_speed_y": balloon_speed_y[balloon_img_idx + 1] # y 최초 속도
})
# 오른쪽으로 튕겨나가는 작은 공
balloons.append({
"pos_x": balloon_pos_x + (balloon_width / 2) - (small_balloon_width / 2), # 풍선의 x 좌표
"pos_y": balloon_pos_y + (balloon_height / 2) - (small_balloon_height / 2), # 풍선의 y좌표
"img_idx": balloon_img_idx + 1,
"to_x": +3, # x축 이동 방향
"to_y": -6, # y축 이동 방향
"init_speed_y": balloon_speed_y[balloon_img_idx + 1]
})
break
else :
continue
break
if ballons_to_remove > -1:
del balloons[ballons_to_remove]
ballons_to_remove = -1
if weapon_to_remove > -1:
del weapons[weapon_to_remove]
weapon_to_remove = -1
# 모든 공을 없앤경우 게임 종료
if len(balloons) == 0:
game_result = "Mission Complete"
running = False
# 5. 화면에 그리기 - screen.blit
screen.blit(background, (0, 0))
for weapon_x_pos, weapon_y_pos in weapons:
screen.blit(weapon, (weapon_x_pos, weapon_y_pos))
for idx, val in enumerate(balloons):
balloon_pos_x = val["pos_x"]
balloon_pos_y = val["pos_y"]
balloon_img_idx = val["img_idx"]
screen.blit(balloon_images[balloon_img_idx], (balloon_pos_x, balloon_pos_y))
screen.blit(stage, (0, stage_y_pos))
screen.blit(character, (character_x_pos, character_y_pos))
now_life = game_font.render(str(int(life)), True, (0, 0, 0))
screen.blit(now_life, (screen_width - 30, 10))
elapsed_time = (pygame.time.get_ticks() - start_ticks) / 1000
timer = game_font.render(f"Time : {int(total_time - elapsed_time)}", True, (0, 0, 0))
screen.blit(timer, (10, 10))
# 시간 초과
if total_time - elapsed_time <= 0:
game_result = "Time Over"
running = False
pygame.display.update()
# 게임 오버 메시지 저장
msg = game_font.render(game_result, True, (0, 0, 0))
msg_rect = msg.get_rect(center=(int(screen_width / 2), int(screen_height / 2)))
screen.blit(msg, msg_rect)
pygame.display.update()
pygame.time.delay(2000)
pygame.quit()
| 31.588435
| 110
| 0.604824
|
'''
오락실 pang 게임 만들기
[게임 조건]
1. 캐릭터는 화면 아래에 위치, 좌우로만 이동 가능
2. 스페이스를 누르면 무기를 쏘아 올림
3. 큰 공 1개가 나타나서 바운스
4. 무기에 닿으면 공은 작은 크기 2개로 분할, 가장 작은 크기의 공은 사라짐
5. 모든 공을 없애면 게임 종료(성공)
6. 캐릭터는 공에 닿으면 게임 종료(실패)
7. 시간 제한 99초 초과시 게임 종료(실패)
8. FPS는 30으로 고정(필요시 speed 값을 조정)
[게임 이미지]
1. 배경 : 640 * 480(가로, 세로) - background.png
2. 무대 : 640 * 50 - stage.png
3. 캐릭터 : 33 * 60 - character.png
4. 무기 : 20 * 430 weapon.png
5. 풍선 : 160 * 160, 80 * 80, 40 * 40, 20 * 20 - ball1.png ~ ball4/png
'''
import os
import pygame
# 기본 초기화(반드시 해야하는 것들)
# pygame 을 import 하면 반드시 초기화를 해줘야한다.
pygame.init()
# 화면 크기 설정
screen_width = 640
screen_height = 480
screen = pygame.display.set_mode((screen_width, screen_height))
# 화면 타이틀 설정
pygame.display.set_caption("SPLIT BALLOON GAME")
# 폰트 설정
game_font = pygame.font.Font(None, 40)
total_time = 100
start_ticks = pygame.time.get_ticks()
# 게임 종료 메시지
game_result = "GAME OVER"
# 이동할 좌표
character_to_x = 0
# 이동 속도
character_speed = 0.6
# FPS
clock = pygame.time.Clock()
# 1. 사용자 게임 초기화 (배경 화면, 게임 이미지, 좌표, 속도, 폰트 등)
life = 3
current_path = os.path.dirname(__file__)
image_path = os.path.join(current_path, "images")
background = pygame.image.load(os.path.join(image_path, "background.png"))
stage = pygame.image.load(os.path.join(image_path, "stage.png"))
stage_size = stage.get_rect().size
stage_height = stage_size[1]
stage_y_pos = screen_height - stage_height
character = pygame.image.load(os.path.join(image_path, "character.png"))
character_size = character.get_rect().size
character_width = character_size[0]
character_height = character_size[1]
character_x_pos = screen_width / 2 - character_width / 2
character_y_pos = screen_height - character_height - stage_height
weapon = pygame.image.load(os.path.join(image_path, "weapon.png"))
weapon_size = weapon.get_rect().size
weapon_width = weapon_size[0]
# 무기는 한번에 여러 발 발사 가능
weapons = []
# 무기 이동 속도
weapon_speed = 10
# 풍선 만들기(4개 크기에 대해 따로 처리)
balloon_images = [
pygame.image.load(os.path.join(image_path, "balloon1.png")),
pygame.image.load(os.path.join(image_path, "balloon2.png")),
pygame.image.load(os.path.join(image_path, "balloon3.png")),
pygame.image.load(os.path.join(image_path, "balloon4.png"))
]
# 풍선 크기에 따른 최초 스피드
balloon_speed_y = [-18, -15, -12, -9]
# 풍선들
balloons = []
# 최초 발생 큰 풍선 추가
balloons.append({
"pos_x": 50, # 풍선의 x 좌표
"pos_y": 50, # 풍선의 y좌표
"img_idx": 0,
"to_x": 3, # x축 이동 방향
"to_y": -6, # y축 이동 방향
"init_speed_y": balloon_speed_y[0] # y 최초 속도
})
# 사라질 무기와 공 정보 저장 변수
weapon_to_remove = -1
ballons_to_remove = -1
running = True
while running:
dt = clock.tick(30) # 게임화면의 초당 프레임 수
# 2. 이벤트 처리(키보드, 마우스 등)
for event in pygame.event.get():
if event.type == pygame.QUIT: # 창이 닫히는 이벤트가 발생하였는가?
running = False # 게임이 진행되지 않음
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
character_to_x -= character_speed
elif event.key == pygame.K_RIGHT:
character_to_x += character_speed
elif event.key == pygame.K_SPACE:
# 무기 위치 정의
weapon_x_pos = character_x_pos + (character_width / 2) - (weapon_width / 2)
weapon_y_pos = character_y_pos
weapons.append([weapon_x_pos, weapon_y_pos])
if event.type == pygame.KEYUP:
if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT:
character_to_x = 0
character_x_pos += character_to_x * dt
# 3. 게임 캐릭터 위치 정의
if character_x_pos < 0:
character_x_pos = 0
elif character_x_pos > (screen_width - character_width):
character_x_pos = screen_width - character_width
# 무기 이동 조절
weapons = [[w[0], w[1] - weapon_speed] for w in weapons]
# 천장에 닿은 무기 없애기
weapons = [[w[0], w[1]] for w in weapons if w[1] > 0]
# 풍선 위치 정의
for balloon_idx, balloon_val in enumerate(balloons):
balloon_pos_x = balloon_val["pos_x"]
balloon_pos_y = balloon_val["pos_y"]
balloon_img_idx = balloon_val["img_idx"]
balloon_size = balloon_images[balloon_img_idx].get_rect().size
balloon_width = balloon_size[0]
balloon_height = balloon_size[1]
# 좌,우 벽에 닿았을 때 공 위치 변경(튕겨나가는 효과)
if balloon_pos_x < 0 or balloon_pos_x > screen_width - balloon_width:
balloon_val["to_x"] = balloon_val["to_x"] * -1
# 스테이지에 튕겨서 올라가는 효과
if balloon_pos_y >= screen_height - stage_height - balloon_height:
balloon_val["to_y"] = balloon_val["init_speed_y"]
# 그 외의 경우에는 속도를 줄여나감(포물선 효과)
else:
balloon_val["to_y"] += 0.5
balloon_val["pos_x"] += balloon_val["to_x"]
balloon_val["pos_y"] += balloon_val["to_y"]
# 4. 충돌 처리
# 캐릭터 rect 정보 업데이트
character_rect = character.get_rect()
character_rect.left = character_x_pos
character_rect.top = character_y_pos
for balloon_idx, balloon_val in enumerate(balloons):
balloon_pos_x = balloon_val["pos_x"]
balloon_pos_y = balloon_val["pos_y"]
balloon_img_idx = balloon_val["img_idx"]
# 공 rect 정보 업데이트
balloon_rect = balloon_images[balloon_img_idx].get_rect()
balloon_rect.left = balloon_pos_x
balloon_rect.top = balloon_pos_y
# 공과 캐릭터 충돌 처리
if character_rect.colliderect(balloon_rect):
if life == 0:
running = False
break
life -= 1
character_x_pos = 10
character_y_pos = screen_height - character_height - stage_height
# 공과 무기들 충돌 처리
for weapon_idx, weapon_val in enumerate(weapons):
weapon_x_pos = weapon_val[0]
weapon_y_pos = weapon_val[1]
# 무기 rect 정보 업데이트
weapon_rect = weapon.get_rect()
weapon_rect.left = weapon_x_pos
weapon_rect.top = weapon_y_pos
# 충돌 체크
if weapon_rect.colliderect(balloon_rect):
weapon_to_remove = weapon_idx # 해당 무기를 없애기 위한 값 설정
ballons_to_remove = balloon_idx # 해당 풍선을 없애기 위한 값 설정
if balloon_img_idx < 3:
# 현재 공 크기 정보를 가지고 옴
balloon_width = balloon_rect.size[0]
balloon_height = balloon_rect.size[1]
# 나눠진 공 정보
small_balloon_rect = balloon_images[balloon_img_idx + 1].get_rect()
small_balloon_width = small_balloon_rect.size[0]
small_balloon_height = small_balloon_rect.size[1]
# 왼쪽으로 튕겨나가는 작은 공
balloons.append({
"pos_x": balloon_pos_x + (balloon_width / 2) - (small_balloon_width / 2), # 풍선의 x 좌표
"pos_y": balloon_pos_y + (balloon_height / 2) - (small_balloon_height / 2), # 풍선의 y좌표
"img_idx": balloon_img_idx + 1,
"to_x": -3, # x축 이동 방향
"to_y": -6, # y축 이동 방향
"init_speed_y": balloon_speed_y[balloon_img_idx + 1] # y 최초 속도
})
# 오른쪽으로 튕겨나가는 작은 공
balloons.append({
"pos_x": balloon_pos_x + (balloon_width / 2) - (small_balloon_width / 2), # 풍선의 x 좌표
"pos_y": balloon_pos_y + (balloon_height / 2) - (small_balloon_height / 2), # 풍선의 y좌표
"img_idx": balloon_img_idx + 1,
"to_x": +3, # x축 이동 방향
"to_y": -6, # y축 이동 방향
"init_speed_y": balloon_speed_y[balloon_img_idx + 1]
})
break
else :
continue
break
if ballons_to_remove > -1:
del balloons[ballons_to_remove]
ballons_to_remove = -1
if weapon_to_remove > -1:
del weapons[weapon_to_remove]
weapon_to_remove = -1
# 모든 공을 없앤경우 게임 종료
if len(balloons) == 0:
game_result = "Mission Complete"
running = False
# 5. 화면에 그리기 - screen.blit
screen.blit(background, (0, 0))
for weapon_x_pos, weapon_y_pos in weapons:
screen.blit(weapon, (weapon_x_pos, weapon_y_pos))
for idx, val in enumerate(balloons):
balloon_pos_x = val["pos_x"]
balloon_pos_y = val["pos_y"]
balloon_img_idx = val["img_idx"]
screen.blit(balloon_images[balloon_img_idx], (balloon_pos_x, balloon_pos_y))
screen.blit(stage, (0, stage_y_pos))
screen.blit(character, (character_x_pos, character_y_pos))
now_life = game_font.render(str(int(life)), True, (0, 0, 0))
screen.blit(now_life, (screen_width - 30, 10))
elapsed_time = (pygame.time.get_ticks() - start_ticks) / 1000
timer = game_font.render(f"Time : {int(total_time - elapsed_time)}", True, (0, 0, 0))
screen.blit(timer, (10, 10))
# 시간 초과
if total_time - elapsed_time <= 0:
game_result = "Time Over"
running = False
pygame.display.update()
# 게임 오버 메시지 저장
msg = game_font.render(game_result, True, (0, 0, 0))
msg_rect = msg.get_rect(center=(int(screen_width / 2), int(screen_height / 2)))
screen.blit(msg, msg_rect)
pygame.display.update()
pygame.time.delay(2000)
pygame.quit()
| 0
| 0
| 0
|
6738ee449dfe6c373584329dd36f71a082590223
| 1,845
|
py
|
Python
|
src/roadlr/vgg_post.py
|
deepguider/RoadGPS
|
7db4669a54da98a854886b89b6922fb8c7a60f33
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 2
|
2019-05-22T12:47:34.000Z
|
2019-05-23T15:43:47.000Z
|
src/roadlr/vgg_post.py
|
deepguider/RoadGPS
|
7db4669a54da98a854886b89b6922fb8c7a60f33
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
src/roadlr/vgg_post.py
|
deepguider/RoadGPS
|
7db4669a54da98a854886b89b6922fb8c7a60f33
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 1
|
2019-08-09T06:50:46.000Z
|
2019-08-09T06:50:46.000Z
|
'''VGG11/13/16/19 in Pytorch.'''
import torch
import torch.nn as nn
import torchvision.models as models
import torch.nn.functional as F
from ipdb import set_trace as bp
cfg = {
'VGG16_POST': [512, 512, 'M']
}
| 38.4375
| 118
| 0.552304
|
'''VGG11/13/16/19 in Pytorch.'''
import torch
import torch.nn as nn
import torchvision.models as models
import torch.nn.functional as F
from ipdb import set_trace as bp
cfg = {
'VGG16_POST': [512, 512, 'M']
}
class VGG_POST(nn.Module):
def __init__(self, height, width):
super(VGG_POST, self).__init__() # input ==> encoder( vgg16 : h,w / 2^5 ) ==> h,w / 16
self.feature_post = self._make_layers(cfg['VGG16_POST']) # h, w / 16 / 2 ==> h,w / 32
self.feature_size = (int(height/32) * int(width/32))*512 #
self.avgpool2d = nn.AvgPool2d(kernel_size=1, stride=1) # view() for FC must follow in forward()
self.fc1 = nn.Linear(self.feature_size, 512)
self.fc2 = nn.Linear(512, 128)
self.fc3 = nn.Linear(128, 32)
self.fc4 = nn.Linear(32, 3) # left, right, uncertain, 4*3
def forward(self, x): # x from pretrained VGG16,
# x : torch.Size([16, 512, , 16])
x = self.feature_post(x) # torch.Size([16, 512, 1, 1])
x = self.avgpool2d(x)
x = x.view(x.size(0), -1) # torch.Size([16, 512])
x = F.relu(self.fc1(x)) # 256
x = F.relu(self.fc2(x)) # 128
x = F.relu(self.fc3(x)) # 64
x = F.relu(self.fc4(x)) # 3
return x
def _make_layers(self, cfg):
layers = []
in_channels = 512
for x in cfg:
if x == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
layers += [nn.Conv2d(in_channels, x, kernel_size=3, padding=1),
nn.BatchNorm2d(x),
nn.ReLU(inplace=True)]
in_channels = x
# layers += [nn.AvgPool2d(kernel_size=1, stride=1)] # AvgPool2d is defined indivisual layer for using GradCam
return nn.Sequential(*layers)
| 1,522
| 5
| 103
|
db034706bef1d18d60d07c19f08ed0864ca128c0
| 9,352
|
py
|
Python
|
vafator/tests/test_annotator.py
|
TRON-Bioinformatics/vafator
|
032b9daeba52b6f560b9b3d428b3eceff541d0ee
|
[
"MIT"
] | 2
|
2021-10-17T12:50:13.000Z
|
2021-11-19T06:00:57.000Z
|
vafator/tests/test_annotator.py
|
TRON-Bioinformatics/vafator
|
032b9daeba52b6f560b9b3d428b3eceff541d0ee
|
[
"MIT"
] | 8
|
2021-10-14T10:25:04.000Z
|
2022-02-15T21:14:10.000Z
|
vafator/tests/test_annotator.py
|
TRON-Bioinformatics/vafator
|
032b9daeba52b6f560b9b3d428b3eceff541d0ee
|
[
"MIT"
] | null | null | null |
import os
import pkg_resources
from unittest import TestCase
from cyvcf2 import VCF
from vafator.annotator import Annotator
import vafator.tests.utils as test_utils
import time
from logzero import logger
| 53.747126
| 120
| 0.710543
|
import os
import pkg_resources
from unittest import TestCase
from cyvcf2 import VCF
from vafator.annotator import Annotator
import vafator.tests.utils as test_utils
import time
from logzero import logger
class TestAnnotator(TestCase):
def test_annotator(self):
input_file = pkg_resources.resource_filename(__name__, "resources/test1.vcf")
output_vcf = pkg_resources.resource_filename(__name__, "resources/results/test_annotator1_output.vcf")
bam1 = pkg_resources.resource_filename(__name__, "resources/COLO_829_n1.bam")
bam2 = pkg_resources.resource_filename(__name__, "resources/COLO_829_t1.bam")
annotator = Annotator(
input_vcf=input_file, output_vcf=output_vcf, input_bams={"normal": [bam1], "tumor": [bam2]})
annotator.run()
self.assertTrue(os.path.exists(output_vcf))
n_variants_input = test_utils._get_count_variants(input_file)
n_variants_output = test_utils._get_count_variants(output_vcf)
self.assertTrue(n_variants_input == n_variants_output)
info_annotations = test_utils._get_info_fields(output_vcf)
self.assertTrue("tumor_af" in info_annotations)
self.assertTrue("normal_af" in info_annotations)
self.assertTrue("tumor_ac" in info_annotations)
self.assertTrue("normal_ac" in info_annotations)
self.assertTrue("tumor_dp" in info_annotations)
self.assertTrue("normal_dp" in info_annotations)
def test_annotator_with_multiple_bams(self):
input_file = pkg_resources.resource_filename(__name__, "resources/test1.vcf")
output_vcf = pkg_resources.resource_filename(__name__, "resources/results/test_annotator1_output.vcf")
bam1 = pkg_resources.resource_filename(__name__, "resources/COLO_829_n1.bam")
bam2 = pkg_resources.resource_filename(__name__, "resources/COLO_829_t1.bam")
annotator = Annotator(
input_vcf=input_file, output_vcf=output_vcf, input_bams={"normal": [bam1, bam2], "tumor": [bam1, bam2]})
annotator.run()
self.assertTrue(os.path.exists(output_vcf))
n_variants_input = test_utils._get_count_variants(input_file)
n_variants_output = test_utils._get_count_variants(output_vcf)
self.assertTrue(n_variants_input == n_variants_output)
info_annotations = test_utils._get_info_fields(output_vcf)
self.assertTrue("tumor_af_1" in info_annotations)
self.assertTrue("normal_af_1" in info_annotations)
self.assertTrue("tumor_ac_1" in info_annotations)
self.assertTrue("normal_ac_1" in info_annotations)
self.assertTrue("tumor_dp_1" in info_annotations)
self.assertTrue("normal_dp_1" in info_annotations)
self.assertTrue("tumor_af_2" in info_annotations)
self.assertTrue("normal_af_2" in info_annotations)
self.assertTrue("tumor_ac_2" in info_annotations)
self.assertTrue("normal_ac_2" in info_annotations)
self.assertTrue("tumor_dp_2" in info_annotations)
self.assertTrue("normal_dp_2" in info_annotations)
def test_annotator_with_prefix(self):
input_file = pkg_resources.resource_filename(__name__, "resources/test1.vcf")
output_vcf = pkg_resources.resource_filename(__name__, "resources/results/test_annotator1_output.vcf")
bam1 = pkg_resources.resource_filename(__name__, "resources/COLO_829_n1.bam")
bam2 = pkg_resources.resource_filename(__name__, "resources/COLO_829_t1.bam")
annotator = Annotator(
input_vcf=input_file, output_vcf=output_vcf,
input_bams={"RNA_normal": [bam1, bam2], "RNA_tumor": [bam1, bam2]})
annotator.run()
self.assertTrue(os.path.exists(output_vcf))
n_variants_input = test_utils._get_count_variants(input_file)
n_variants_output = test_utils._get_count_variants(output_vcf)
self.assertTrue(n_variants_input == n_variants_output)
info_annotations = test_utils._get_info_fields(output_vcf)
self.assertTrue("RNA_tumor_af_1" in info_annotations)
self.assertTrue("RNA_normal_af_1" in info_annotations)
self.assertTrue("RNA_tumor_ac_1" in info_annotations)
self.assertTrue("RNA_normal_ac_1" in info_annotations)
self.assertTrue("RNA_tumor_dp_1" in info_annotations)
self.assertTrue("RNA_normal_dp_1" in info_annotations)
self.assertTrue("RNA_tumor_af_2" in info_annotations)
self.assertTrue("RNA_normal_af_2" in info_annotations)
self.assertTrue("RNA_tumor_ac_2" in info_annotations)
self.assertTrue("RNA_normal_ac_2" in info_annotations)
self.assertTrue("RNA_tumor_dp_2" in info_annotations)
self.assertTrue("RNA_normal_dp_2" in info_annotations)
def test_annotator_with_mnvs(self):
input_file = pkg_resources.resource_filename(__name__, "resources/test_tumor_normal.vcf")
output_vcf = pkg_resources.resource_filename(__name__, "resources/results/test_tumor_normal_output.vcf")
bam1 = pkg_resources.resource_filename(__name__, "resources/COLO_829_n1.bam")
bam2 = pkg_resources.resource_filename(__name__, "resources/COLO_829_t1.bam")
annotator = Annotator(
input_vcf=input_file, output_vcf=output_vcf,
input_bams={"RNA_normal": [bam1, bam2], "RNA_tumor": [bam1, bam2]})
annotator.run()
self.assertTrue(os.path.exists(output_vcf))
n_variants_input = test_utils._get_count_variants(input_file)
n_variants_output = test_utils._get_count_variants(output_vcf)
self.assertTrue(n_variants_input == n_variants_output)
info_annotations = test_utils._get_info_fields(output_vcf)
self.assertTrue("RNA_tumor_af_1" in info_annotations)
self.assertTrue("RNA_normal_af_1" in info_annotations)
self.assertTrue("RNA_tumor_ac_1" in info_annotations)
self.assertTrue("RNA_normal_ac_1" in info_annotations)
self.assertTrue("RNA_tumor_dp_1" in info_annotations)
self.assertTrue("RNA_normal_dp_1" in info_annotations)
self.assertTrue("RNA_tumor_af_2" in info_annotations)
self.assertTrue("RNA_normal_af_2" in info_annotations)
self.assertTrue("RNA_tumor_ac_2" in info_annotations)
self.assertTrue("RNA_normal_ac_2" in info_annotations)
self.assertTrue("RNA_tumor_dp_2" in info_annotations)
self.assertTrue("RNA_normal_dp_2" in info_annotations)
def _get_info_at(self, input_file, chromosome, position, annotation):
vcf = VCF(input_file)
self.assertIsNotNone(vcf)
for v in vcf:
if v.POS == position and v.CHROM == chromosome:
vcf.close()
return v.INFO.get(annotation)
vcf.close()
return {}
def test_nist(self):
input_file = pkg_resources.resource_filename(
__name__, "resources/project.NIST.hc.snps.indels.chr1_1000000_2000000.vcf")
output_vcf = pkg_resources.resource_filename(
__name__, "resources/results/project.NIST.hc.snps.indels.chr1_1000000_2000000.vaf.vcf")
bam_file = pkg_resources.resource_filename(
__name__,
"resources/project.NIST_NIST7035_H7AP8ADXX_TAAGGCGA_1_NA12878.bwa.markDuplicates.chr1_1000000_2000000.bam")
start = time.time()
annotator = Annotator(input_vcf=input_file, output_vcf=output_vcf, input_bams={"normal": [bam_file]})
annotator.run()
duration = time.time() - start
logger.info("Duration {} seconds".format(round(duration, 3)))
self.assertTrue(os.path.exists(output_vcf))
n_variants_input = test_utils._get_count_variants(input_file)
n_variants_output = test_utils._get_count_variants(output_vcf)
self.assertTrue(n_variants_input == n_variants_output)
info_annotations = test_utils._get_info_fields(output_vcf)
self.assertTrue("normal_af" in info_annotations)
self.assertTrue("normal_ac" in info_annotations)
self.assertTrue("normal_dp" in info_annotations)
def test_annotator_bams_order(self):
input_file = pkg_resources.resource_filename(__name__, "resources/test1.vcf")
output_vcf = pkg_resources.resource_filename(__name__, "resources/results/test_annotator1_output.vcf")
output_vcf_2 = pkg_resources.resource_filename(__name__, "resources/results/test_annotator2_output.vcf")
bam1 = pkg_resources.resource_filename(__name__, "resources/COLO_829_n1.bam")
bam2 = pkg_resources.resource_filename(__name__, "resources/COLO_829_t1.bam")
Annotator(input_vcf=input_file, output_vcf=output_vcf, input_bams={"normal": [bam1], "tumor": [bam2]}).run()
Annotator(input_vcf=input_file, output_vcf=output_vcf_2, input_bams={"tumor": [bam2], "normal": [bam1]}).run()
self.assertTrue(os.path.exists(output_vcf))
self.assertTrue(os.path.exists(output_vcf_2))
vcf = VCF(output_vcf)
vcf_2 = VCF(output_vcf_2)
for v, v2 in zip(vcf, vcf_2):
self.assertEqual(v.INFO["normal_dp"], v2.INFO["normal_dp"])
self.assertEqual(v.INFO["tumor_dp"], v2.INFO["tumor_dp"])
| 8,901
| 9
| 228
|
817c9d802649e5563674be39ac0c3499f6cde805
| 4,358
|
py
|
Python
|
orio-0.1.0/src/main/tuner/search/exhaustive/exhaustive.py
|
nn4ip/pluto
|
92ace2441b6b8d6b66d1bb7ef3e893df4ff23a4d
|
[
"MIT"
] | 183
|
2017-01-28T17:23:29.000Z
|
2022-03-25T08:58:56.000Z
|
orio-0.1.0/src/main/tuner/search/exhaustive/exhaustive.py
|
nn4ip/pluto
|
92ace2441b6b8d6b66d1bb7ef3e893df4ff23a4d
|
[
"MIT"
] | 70
|
2017-03-29T09:51:04.000Z
|
2021-12-28T07:00:44.000Z
|
orio-0.1.0/src/main/tuner/search/exhaustive/exhaustive.py
|
nn4ip/pluto
|
92ace2441b6b8d6b66d1bb7ef3e893df4ff23a4d
|
[
"MIT"
] | 57
|
2017-03-29T07:27:58.000Z
|
2022-01-14T03:13:39.000Z
|
#
# Implementation of the exhaustive search algorithm
#
import sys, time
import main.tuner.search.search
#-----------------------------------------------------
class Exhaustive(main.tuner.search.search.Search):
'''The search engine that uses an exhaustive search approach'''
def __init__(self, cfrags, axis_names, axis_val_ranges, constraint, time_limit, total_runs,
search_opts, cmd_line_opts, ptcodegen, ptdriver, odriver):
'''To instantiate an exhaustive search engine'''
main.tuner.search.search.Search.__init__(self, cfrags, axis_names, axis_val_ranges,
constraint, time_limit, total_runs, search_opts,
cmd_line_opts, ptcodegen, ptdriver, odriver)
# read all algorithm-specific arguments
self.__readAlgoArgs()
# complain if the total number of search runs is defined (i.e. exhaustive search
# only needs to be run once)
if self.total_runs > 1:
print ('error: the total number of %s search runs must be one (or can be undefined)' %
self.__class__.__name__)
sys.exit(1)
#--------------------------------------------------
def __readAlgoArgs(self):
'''To read all algorithm-specific arguments'''
for vname, rhs in self.search_opts.iteritems():
print ('error: unrecognized %s algorithm-specific argument: "%s"' %
(self.__class__.__name__, vname))
sys.exit(1)
#--------------------------------------------------
def __getNextCoord(self, coord):
'''
Return the next neighboring coordinate to be considered in the search space.
Return None if all coordinates in the search space have been visited.
'''
next_coord = coord[:]
for i in range(0, self.total_dims):
ipoint = next_coord[i]
iuplimit = self.dim_uplimits[i]
if ipoint < iuplimit-1:
next_coord[i] += 1
break
else:
next_coord[i] = 0
if i == self.total_dims - 1:
return None
return next_coord
#--------------------------------------------------
def searchBestCoord(self):
'''
To explore the search space and retun the coordinate that yields the best performance
(i.e. minimum performance cost).
'''
if self.verbose: print '\n----- begin exhaustive search -----'
# record the best coordinate and its best performance cost
best_coord = None
best_perf_cost = self.MAXFLOAT
# start the timer
start_time = time.time()
# start from the origin coordinate (i.e. [0,0,...])
coord = [0] * self.total_dims
# evaluate every coordinate in the search space
while True:
# determine the performance cost of the current coordinate
perf_cost = self.getPerfCost(coord)
if self.verbose: print 'coordinate: %s, cost: %s' % (coord, perf_cost)
# compare to the best result so far
if perf_cost < best_perf_cost:
best_coord = coord
best_perf_cost = perf_cost
if self.verbose: print '>>>> best coordinate found: %s, cost: %s' % (coord,perf_cost)
# check if the time is up
if self.time_limit > 0 and (time.time()-start_time) > self.time_limit:
break
# move to the next coordinate in the search space
coord = self.__getNextCoord(coord)
# check if all coordinates have been visited
if coord == None:
break
# compute the total search time
search_time = time.time() - start_time
if self.verbose: print '----- end exhaustive search -----'
if self.verbose: print '----- begin summary -----'
if self.verbose: print ' best coordinate: %s, cost: %s' % (best_coord, best_perf_cost)
if self.verbose: print ' total search time: %.2f seconds' % search_time
if self.verbose: print '----- end summary -----'
# return the best coordinate
return best_coord
| 36.621849
| 101
| 0.544516
|
#
# Implementation of the exhaustive search algorithm
#
import sys, time
import main.tuner.search.search
#-----------------------------------------------------
class Exhaustive(main.tuner.search.search.Search):
'''The search engine that uses an exhaustive search approach'''
def __init__(self, cfrags, axis_names, axis_val_ranges, constraint, time_limit, total_runs,
search_opts, cmd_line_opts, ptcodegen, ptdriver, odriver):
'''To instantiate an exhaustive search engine'''
main.tuner.search.search.Search.__init__(self, cfrags, axis_names, axis_val_ranges,
constraint, time_limit, total_runs, search_opts,
cmd_line_opts, ptcodegen, ptdriver, odriver)
# read all algorithm-specific arguments
self.__readAlgoArgs()
# complain if the total number of search runs is defined (i.e. exhaustive search
# only needs to be run once)
if self.total_runs > 1:
print ('error: the total number of %s search runs must be one (or can be undefined)' %
self.__class__.__name__)
sys.exit(1)
#--------------------------------------------------
def __readAlgoArgs(self):
'''To read all algorithm-specific arguments'''
for vname, rhs in self.search_opts.iteritems():
print ('error: unrecognized %s algorithm-specific argument: "%s"' %
(self.__class__.__name__, vname))
sys.exit(1)
#--------------------------------------------------
def __getNextCoord(self, coord):
'''
Return the next neighboring coordinate to be considered in the search space.
Return None if all coordinates in the search space have been visited.
'''
next_coord = coord[:]
for i in range(0, self.total_dims):
ipoint = next_coord[i]
iuplimit = self.dim_uplimits[i]
if ipoint < iuplimit-1:
next_coord[i] += 1
break
else:
next_coord[i] = 0
if i == self.total_dims - 1:
return None
return next_coord
#--------------------------------------------------
def searchBestCoord(self):
'''
To explore the search space and retun the coordinate that yields the best performance
(i.e. minimum performance cost).
'''
if self.verbose: print '\n----- begin exhaustive search -----'
# record the best coordinate and its best performance cost
best_coord = None
best_perf_cost = self.MAXFLOAT
# start the timer
start_time = time.time()
# start from the origin coordinate (i.e. [0,0,...])
coord = [0] * self.total_dims
# evaluate every coordinate in the search space
while True:
# determine the performance cost of the current coordinate
perf_cost = self.getPerfCost(coord)
if self.verbose: print 'coordinate: %s, cost: %s' % (coord, perf_cost)
# compare to the best result so far
if perf_cost < best_perf_cost:
best_coord = coord
best_perf_cost = perf_cost
if self.verbose: print '>>>> best coordinate found: %s, cost: %s' % (coord,perf_cost)
# check if the time is up
if self.time_limit > 0 and (time.time()-start_time) > self.time_limit:
break
# move to the next coordinate in the search space
coord = self.__getNextCoord(coord)
# check if all coordinates have been visited
if coord == None:
break
# compute the total search time
search_time = time.time() - start_time
if self.verbose: print '----- end exhaustive search -----'
if self.verbose: print '----- begin summary -----'
if self.verbose: print ' best coordinate: %s, cost: %s' % (best_coord, best_perf_cost)
if self.verbose: print ' total search time: %.2f seconds' % search_time
if self.verbose: print '----- end summary -----'
# return the best coordinate
return best_coord
| 0
| 0
| 0
|
f2dc1850d674cc25fd856fe2c9f15426cb04a079
| 1,010
|
py
|
Python
|
app.py
|
Manasi2001/Smart-Irrigation-System
|
9de4b44256e6f998f7dde0c721ce34bf0000f360
|
[
"MIT"
] | null | null | null |
app.py
|
Manasi2001/Smart-Irrigation-System
|
9de4b44256e6f998f7dde0c721ce34bf0000f360
|
[
"MIT"
] | null | null | null |
app.py
|
Manasi2001/Smart-Irrigation-System
|
9de4b44256e6f998f7dde0c721ce34bf0000f360
|
[
"MIT"
] | null | null | null |
import os
import joblib
classifier = joblib.load(r'smartirrigation.pkl')
# importing Flask and other modules
from flask import Flask, request, render_template
import numpy as np
# Flask constructor
app = Flask(__name__)
# A decorator used to tell the application
# which URL is associated function
@app.route('/', methods = ["GET", "POST"])
if __name__=='__main__':
port = int(os.environ.get("PORT", 5000))
app.run(host='0.0.0.0', port=port)
| 31.5625
| 67
| 0.622772
|
import os
import joblib
classifier = joblib.load(r'smartirrigation.pkl')
# importing Flask and other modules
from flask import Flask, request, render_template
import numpy as np
# Flask constructor
app = Flask(__name__)
# A decorator used to tell the application
# which URL is associated function
@app.route('/', methods = ["GET", "POST"])
def green_hacks():
if request.method == "POST":
m_c = request.form.get("m_c")
temp = request.form.get("temp")
p = classifier.predict(np.array([m_c, temp]).reshape(-2,2))
if p == 0:
output = "No need for the pump to be on."
return render_template('index.html', output=output)
elif p == 1:
output = "Switch on your pump immediately."
return render_template('index.html', output=output)
return render_template('index.html')
if __name__=='__main__':
port = int(os.environ.get("PORT", 5000))
app.run(host='0.0.0.0', port=port)
| 507
| 0
| 23
|
4f076ea07d8ac8daa548c2e9e7e0dff9a07548bd
| 4,761
|
py
|
Python
|
bebotPlatform/settings.py
|
ElitosGon/bebotPlatform
|
f10b37ecc6cc748a719efc639faa9a2907357682
|
[
"Apache-2.0"
] | null | null | null |
bebotPlatform/settings.py
|
ElitosGon/bebotPlatform
|
f10b37ecc6cc748a719efc639faa9a2907357682
|
[
"Apache-2.0"
] | 6
|
2020-06-05T17:08:29.000Z
|
2022-03-11T23:14:15.000Z
|
bebotPlatform/settings.py
|
ElitosGon/bebotPlatform
|
f10b37ecc6cc748a719efc639faa9a2907357682
|
[
"Apache-2.0"
] | null | null | null |
"""
Django settings for bebotPlatform project.
Generated by 'django-admin startproject' using Django 2.0.1.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ugettext
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
PROJECT_ROOT = os.path.abspath(os.path.join(BASE_DIR, '..'))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'w7hotrp-fj11%$fp1gc)uoqk3zwx@5nmucgy-2sd&aht4gt80c'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['localhost', '127.0.0.1','142.93.189.104']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.humanize',
'django.contrib.sites',
'webPlatform',
'vote',
'actstream',
'notifications',
]
SITE_ID = 1
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.locale.LocaleMiddleware',
]
ROOT_URLCONF = 'bebotPlatform.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, "templates")],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.i18n',
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'bebotPlatform.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'bebotDB',
'USER': 'bebot',
'PASSWORD': 'bebot',
'HOST': 'localhost',
'PORT': '',
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
TIME_ZONE = 'America/Santiago'
USE_I18N = True
USE_L10N = True
USE_TZ = False
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
]
# File handler
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
# Language
LANGUAGE_CODE = 'es'
LANGUAGES = [
('es', _('Spanish'))
]
LOCALE_PATH = (os.path.join(BASE_DIR,'locale'))
# Email setting
EMAIL_USE_TLS = True
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_PORT = 25
EMAIL_HOST_USER = 'BeBot.Project@gmail.com'
EMAIL_HOST_PASSWORD = 'bebotproject'
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
SMTP_ENABLED = True
EMAIL_HOST_MEDGO = 'BeBot.Project@gmail.com'
TEMPLATED_EMAIL_TEMPLATE_DIR = 'templated_email/' #use '' for top level template dir, ensure there is a trailing slash
TEMPLATED_EMAIL_FILE_EXTENSION = 'email'
# Images Avatar
DJANGORESIZED_DEFAULT_KEEP_META = True
DJANGORESIZED_DEFAULT_FORCE_FORMAT = 'JPEG'
# Google
GOOGLE_RECAPTCHA_SECRET_KEY = '6LfuJEAUAAAAAJdnw0LxAKSlMbhEeYt8ijfoUNyl'
# ACTSTREAM
ACTSTREAM_SETTINGS = {
'FETCH_RELATIONS': True,
'USE_PREFETCH': True,
'USE_JSONFIELD': True,
'GFK_FETCH_DEPTH': 1,
}
# Notification
NOTIFICATIONS_SOFT_DELETE=True
| 26.45
| 118
| 0.705944
|
"""
Django settings for bebotPlatform project.
Generated by 'django-admin startproject' using Django 2.0.1.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ugettext
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
PROJECT_ROOT = os.path.abspath(os.path.join(BASE_DIR, '..'))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'w7hotrp-fj11%$fp1gc)uoqk3zwx@5nmucgy-2sd&aht4gt80c'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['localhost', '127.0.0.1','142.93.189.104']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.humanize',
'django.contrib.sites',
'webPlatform',
'vote',
'actstream',
'notifications',
]
SITE_ID = 1
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.locale.LocaleMiddleware',
]
ROOT_URLCONF = 'bebotPlatform.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, "templates")],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.i18n',
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'bebotPlatform.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'bebotDB',
'USER': 'bebot',
'PASSWORD': 'bebot',
'HOST': 'localhost',
'PORT': '',
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
TIME_ZONE = 'America/Santiago'
USE_I18N = True
USE_L10N = True
USE_TZ = False
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
]
# File handler
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
# Language
LANGUAGE_CODE = 'es'
LANGUAGES = [
('es', _('Spanish'))
]
LOCALE_PATH = (os.path.join(BASE_DIR,'locale'))
# Email setting
EMAIL_USE_TLS = True
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_PORT = 25
EMAIL_HOST_USER = 'BeBot.Project@gmail.com'
EMAIL_HOST_PASSWORD = 'bebotproject'
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
SMTP_ENABLED = True
EMAIL_HOST_MEDGO = 'BeBot.Project@gmail.com'
TEMPLATED_EMAIL_TEMPLATE_DIR = 'templated_email/' #use '' for top level template dir, ensure there is a trailing slash
TEMPLATED_EMAIL_FILE_EXTENSION = 'email'
# Images Avatar
DJANGORESIZED_DEFAULT_KEEP_META = True
DJANGORESIZED_DEFAULT_FORCE_FORMAT = 'JPEG'
# Google
GOOGLE_RECAPTCHA_SECRET_KEY = '6LfuJEAUAAAAAJdnw0LxAKSlMbhEeYt8ijfoUNyl'
# ACTSTREAM
ACTSTREAM_SETTINGS = {
'FETCH_RELATIONS': True,
'USE_PREFETCH': True,
'USE_JSONFIELD': True,
'GFK_FETCH_DEPTH': 1,
}
# Notification
NOTIFICATIONS_SOFT_DELETE=True
| 0
| 0
| 0
|
e19efff62172edb798c2b0bad7a319dbf88a5318
| 3,862
|
py
|
Python
|
flowsa/Census_CBP.py
|
cchiq/flowsa
|
fc21e8da7c3ba66ca4ae4a0c72f568af7ef5e6c0
|
[
"CC0-1.0"
] | null | null | null |
flowsa/Census_CBP.py
|
cchiq/flowsa
|
fc21e8da7c3ba66ca4ae4a0c72f568af7ef5e6c0
|
[
"CC0-1.0"
] | null | null | null |
flowsa/Census_CBP.py
|
cchiq/flowsa
|
fc21e8da7c3ba66ca4ae4a0c72f568af7ef5e6c0
|
[
"CC0-1.0"
] | null | null | null |
# Census_CBP.py (flowsa)
# !/usr/bin/env python3
# coding=utf-8
"""
Pulls County Business Patterns data in NAICS from the Census Bureau
Writes out to various FlowBySector class files for these data items
EMP = Number of employees, Class = Employment
PAYANN = Annual payroll ($1,000), Class = Money
ESTAB = Number of establishments, Class = Other
This script is designed to run with a configuration parameter
--year = 'year' e.g. 2015
"""
import pandas as pd
import numpy as np
import json
#from flowsa.datapull import build_url, make_http_request, load_from_requests_response
from flowsa.common import log, flow_by_activity_fields, get_all_state_FIPS_2, datapath
| 38.62
| 86
| 0.631538
|
# Census_CBP.py (flowsa)
# !/usr/bin/env python3
# coding=utf-8
"""
Pulls County Business Patterns data in NAICS from the Census Bureau
Writes out to various FlowBySector class files for these data items
EMP = Number of employees, Class = Employment
PAYANN = Annual payroll ($1,000), Class = Money
ESTAB = Number of establishments, Class = Other
This script is designed to run with a configuration parameter
--year = 'year' e.g. 2015
"""
import pandas as pd
import numpy as np
import json
#from flowsa.datapull import build_url, make_http_request, load_from_requests_response
from flowsa.common import log, flow_by_activity_fields, get_all_state_FIPS_2, datapath
def Census_CBP_URL_helper(build_url, config, args):
urls_census = []
FIPS_2 = get_all_state_FIPS_2()['FIPS_2']
for c in FIPS_2:
url = build_url
url = url.replace("__stateFIPS__", c)
# specified NAICS code year depends on year of data
if args["year"] in ['2017']:
url = url.replace("__NAICS__", "NAICS2017")
if args["year"] in ['2012', '2013', '2014', '2015', '2016']:
url = url.replace("__NAICS__", "NAICS2012")
if args["year"] in ['2010', '2011']:
url = url.replace("__NAICS__", "NAICS2007")
urls_census.append(url)
return urls_census
def census_cbp_call(url, cbp_response, args):
cbp_json = json.loads(cbp_response.text)
# convert response to dataframe
df_census = pd.DataFrame(data=cbp_json[1:len(cbp_json)], columns=cbp_json[0])
return df_census
def census_cbp_parse(dataframe_list, args):
# concat dataframes
df = pd.concat(dataframe_list, sort=False)
# Add year
df['Year'] = args["year"]
# convert county='999' to line for full state
df.loc[df['county'] == '999', 'county'] = '000'
# Make FIPS as a combo of state and county codes
df['Location'] = df['state'] + df['county']
# now drop them
df = df.drop(columns=['state', 'county'])
# rename NAICS column and add NAICS year as description
if 'NAICS2007' in df.columns:
df = df.rename(columns={"NAICS2007": "ActivityProducedBy"})
df['Description'] = 'NAICS2007'
if 'NAICS2012' in df.columns:
df = df.rename(columns={"NAICS2012": "ActivityProducedBy"})
df['Description'] = 'NAICS2012'
if 'NAICS2017' in df.columns:
df = df.rename(columns={"NAICS2017": "ActivityProducedBy"})
df['Description'] = 'NAICS2017'
# drop all sectors record
df = df[df['ActivityProducedBy'] != "00"]
# rename columns
df = df.rename(columns={'ESTAB': 'Number of establishments',
'EMP': 'Number of employees',
'PAYANN': 'Annual payroll'})
# use "melt" fxn to convert colummns into rows
df = df.melt(id_vars=["Location", "ActivityProducedBy", "Year", "Description"],
var_name="FlowName",
value_name="FlowAmount")
# specify unit based on flowname
df['Unit'] = np.where(df["FlowName"] == 'Annual payroll', "USD", "p")
# specify class
df.loc[df['FlowName'] == 'Number of employees', 'Class'] = 'Employment'
df.loc[df['FlowName'] == 'Number of establishments', 'Class'] = 'Other'
df.loc[df['FlowName'] == 'Annual payroll', 'Class'] = 'Money'
# add location system based on year of data
if args['year'] >= '2019':
df['LocationSystem'] = 'FIPS_2019'
elif '2015' <= args['year'] < '2019':
df['LocationSystem'] = 'FIPS_2015'
elif '2013' <= args['year'] < '2015':
df['LocationSystem'] = 'FIPS_2013'
elif '2010' <= args['year'] < '2013':
df['LocationSystem'] = 'FIPS_2010'
# hard code data
df['SourceName'] = 'Census_CBP'
# Add tmp DQ scores
df['DataReliability'] = 5
df['DataCollection'] = 5
df['Compartment'] = None
return df
| 3,124
| 0
| 69
|
5dcd64d4cbce7570663b74758a9b37abc2510d4e
| 1,363
|
py
|
Python
|
packages/pyright-internal/src/tests/samples/variadicTypeVar2.py
|
sasano8/pyright
|
e804f324ee5dbd25fd37a258791b3fd944addecd
|
[
"MIT"
] | 4,391
|
2019-05-07T01:18:57.000Z
|
2022-03-31T20:45:44.000Z
|
packages/pyright-internal/src/tests/samples/variadicTypeVar2.py
|
sasano8/pyright
|
e804f324ee5dbd25fd37a258791b3fd944addecd
|
[
"MIT"
] | 2,740
|
2019-05-07T03:29:30.000Z
|
2022-03-31T12:57:46.000Z
|
packages/pyright-internal/src/tests/samples/variadicTypeVar2.py
|
sasano8/pyright
|
e804f324ee5dbd25fd37a258791b3fd944addecd
|
[
"MIT"
] | 455
|
2019-05-07T12:55:14.000Z
|
2022-03-31T17:09:15.000Z
|
# This sample tests various conditions under which Unpack
# can and cannot be used.
# pyright: reportMissingModuleSource=false
from typing import Generic, List, Tuple, TypeVar, Union
from typing_extensions import TypeVarTuple, Unpack
_T = TypeVar("_T")
_Xs = TypeVarTuple("_Xs")
# This should generate an error.
x: List[Unpack[_Xs]] = []
# This should generate an error.
y: Unpack[_Xs] = ()
# This should generate an error.
z: Unpack = ()
# This should generate two errors because _Xs must be unpacked.
# def func1(value: Array[*_Xs]) -> Tuple[complex, *_Xs, str]:
# ...
| 21.296875
| 63
| 0.62876
|
# This sample tests various conditions under which Unpack
# can and cannot be used.
# pyright: reportMissingModuleSource=false
from typing import Generic, List, Tuple, TypeVar, Union
from typing_extensions import TypeVarTuple, Unpack
_T = TypeVar("_T")
_Xs = TypeVarTuple("_Xs")
class ClassA(Generic[_T, Unpack[_Xs]]):
def __init__(self, *shape: Unpack[_Xs]):
self.x: Tuple[Unpack[_Xs]] = shape
# This should generate an error
self.y: _Xs = shape
def func1(self) -> Union[Unpack[_Xs]]:
...
# This should generate an error
def func2(self) -> Tuple[Unpack[_T]]:
...
# This should generate an error
def func3(self) -> Tuple[Unpack[int]]:
...
# This should generate an error
def func4(self) -> Tuple[Unpack[_Xs, _Xs]]:
...
# This should generate an error.
a: List[Unpack[_Xs]] = []
# This should generate an error.
b: Unpack[_Xs] = ()
# This should generate an error.
x: List[Unpack[_Xs]] = []
# This should generate an error.
y: Unpack[_Xs] = ()
# This should generate an error.
z: Unpack = ()
class Array(Generic[Unpack[_Xs]]):
...
# This should generate two errors because _Xs must be unpacked.
def func0(value: Array[_Xs]) -> Tuple[complex, _Xs, str]:
...
# def func1(value: Array[*_Xs]) -> Tuple[complex, *_Xs, str]:
# ...
| 295
| 411
| 68
|
b3129c41be6df0f67c689237bce862f41d0f6489
| 12,097
|
py
|
Python
|
dockeroo/docker/gentoo_bootstrap.py
|
dockeroo/dockeroo
|
6395b4008d0d3d6cabdfec9d52ce448b095fdae1
|
[
"Apache-2.0"
] | 4
|
2016-07-25T09:21:32.000Z
|
2022-02-11T19:11:23.000Z
|
dockeroo/docker/gentoo_bootstrap.py
|
dockeroo/dockeroo
|
6395b4008d0d3d6cabdfec9d52ce448b095fdae1
|
[
"Apache-2.0"
] | null | null | null |
dockeroo/docker/gentoo_bootstrap.py
|
dockeroo/dockeroo
|
6395b4008d0d3d6cabdfec9d52ce448b095fdae1
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016, Giacomo Cariello. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from zc.buildout import UserError
from dockeroo import BaseGroupRecipe
from dockeroo.docker import BaseDockerSubRecipe, Archive
from dockeroo.utils import merge, string_as_bool
class DockerGentooBootstrapRecipe(BaseGroupRecipe):
"""
This recipe creates a docker image that contains a full operating system (typically Gentoo).
Such builder image can be used to create further docker images with :py:class:`dockeroo.docker.gentoo_build.DockerGentooBuildRecipe` recipe.
The recipe executes the following tasks:
1. Extract **archives** into a docker image.
2. Create a container from such image.
3. Install "freeze" binary into the container. This is a simple no-op binary executable.
4. If a **layout** is defined, copy layout contents onto container's root.
5. Execute **build-script**.
6. If **commit** is enabled, commit modifications of image.
.. describe:: Usage
The following example buildout part shows how to build a full Gentoo amd64 docker image.
.. code-block:: ini
[crossdev_builder.img]
crossdev-arch = x86_64
crossdev-platform = x86_64
crossdev-processor = x86_64
crossdev-variant = docker
crossdev-abi = gnu
crossdev-gentoo-profile = no-multilib
crossdev-gentoo-platform = amd64
crossdev-gentoo-platform-flavor = amd64
recipe = dockeroo:docker.gentoo-bootstrap
image = dockeroo/builder_${:crossdev-arch}:latest
container = dockeroo_builder_${:crossdev-arch}
volumes-from = ${distfiles:container}
gentoo-platform = amd64
gentoo-platform-flavor = amd64-nomultilib
gentoo-version = 20160414
archives =
http://distfiles.gentoo.org/releases/${:gentoo-platform}/autobuilds/${:gentoo-version}/stage3-${:gentoo-platform-flavor}-${:gentoo-version}.tar.bz2
commit = true
keep = true
layout = ${buildout:containers-directory}/builder_${:crossdev-arch}
build-script =
test -d /usr/portage/profiles || emerge-webrsync
emerge --sync
emerge -uDNvkb world
emerge -nNuvkb sys-devel/crossdev
test -e /usr/${:crossdev-processor}-${:crossdev-variant}-linux-${:crossdev-abi}/.crossdev || \
crossdev -S -v -t ${:crossdev-processor}-${:crossdev-variant}-linux-${:crossdev-abi} --ov-output /usr/local/portage-crossdev-${:crossdev-arch} -P -kb && \
touch /usr/${:crossdev-processor}-${:crossdev-variant}-linux-${:crossdev-abi}/.crossdev
(cd /usr/${:crossdev-processor}-${:crossdev-variant}-linux-${:crossdev-abi}/etc/portage && \
rm -f make.profile && ln -s /usr/portage/profiles/default/linux/${:crossdev-gentoo-platform}/13.0/${:crossdev-gentoo-profile} make.profile)
ROOT=/usr/${:crossdev-processor}-${:crossdev-variant}-linux-${:crossdev-abi} \
${:crossdev-processor}-${:crossdev-variant}-linux-${:crossdev-abi}-emerge -nuvkb1 --keep-going sys-apps/baselayout
ROOT=/usr/${:crossdev-processor}-${:crossdev-variant}-linux-${:crossdev-abi} \
${:crossdev-processor}-${:crossdev-variant}-linux-${:crossdev-abi}-emerge -nuvkb1 --keep-going $(egrep '^[a-z]+' /usr/portage/profiles/default/linux/packages.build)
ROOT=/usr/${:crossdev-processor}-${:crossdev-variant}-linux-${:crossdev-abi} \
${:crossdev-processor}-${:crossdev-variant}-linux-${:crossdev-abi}-emerge -nuvkb1 --keep-going sys-apps/portage sys-apps/openrc net-misc/netifrc app-portage/gentoolkit
chroot-${:crossdev-arch}-docker -c locale-gen
chroot-${:crossdev-arch}-docker -c env-update
To use the above part, several other files are necessary, to be copied in via **layout**::
/etc/locale.gen
/etc/portage/repos.conf/crossdev.conf
/etc/portage/repos.conf/local.conf
/usr/local/bin/chroot-x86_64-docker
/usr/local/portage-crossdev-x86_64/metadata/layout.conf
/usr/local/portage-crossdev-x86_64/profiles/repo_name
/usr/x86_64-docker-linux-gnu/dockeroo-root/.keep
/usr/x86_64-docker-linux-gnu/etc/bash/bashrc.d/emerge-chroot
/usr/x86_64-docker-linux-gnu/etc/locale.gen
/usr/x86_64-docker-linux-gnu/etc/portage/make.conf
Here's an example of chroot-x86_64-docker script, useful to build docker images with :py:class:`dockeroo.docker.gentoo_build.DockerGentooBuildRecipe` recipe:
.. code-block:: bash
#!/bin/sh
cd /usr/x86_64-docker-linux-gnu
set -e
mkdir -p dev proc sys tmp etc/portage/repos.conf usr/portage usr/local/portage-crossdev-x86_64/packages var/lib/layman
mount -o bind /dev dev
mount -o bind /dev/pts dev/pts
mount -o bind /dev/shm dev/shm
mount -o bind /etc/portage/repos.conf etc/portage/repos.conf
mount -o bind /proc proc
mount -o bind /sys sys
mount -o bind /tmp tmp
mount -o bind /usr/portage usr/portage
mount -o bind /usr/portage/distfiles usr/portage/distfiles
mount -o bind /usr/local/portage-crossdev-x86_64 usr/local/portage-crossdev-x86_64
mount -o bind /usr/local/portage-crossdev-x86_64/packages usr/local/portage-crossdev-x86_64/packages
mount -o bind /var/lib/layman var/lib/layman
cp /etc/resolv.conf etc/resolv.conf
set +e
chroot . /bin/bash --login "$@"
ret=$?
set -e
umount var/lib/layman
umount usr/local/portage-crossdev-x86_64/packages
umount usr/local/portage-crossdev-x86_64
umount usr/portage/distfiles
umount usr/portage
umount tmp
umount sys
umount proc
umount etc/portage/repos.conf
umount dev/shm
umount dev/pts
umount dev
set +e
exit $ret
.. describe:: Configuration options
archives
List of URLs of operating system initial filesystem contents (Gentoo stageX).
crossdev-platform
Name of destination platform. If enabled, allows automatic configuration of QEMU binfmt mapping.
command
Command to execute upon container starting. Defaults to "/bin/freeze".
commit
Commit image changes after recipe install execution. Defaults to false.
container
Name of build container.
keep
Don't delete image upon uninstall.
layout
Copies a local folder to container's root with **docker cp**.
machine-name
Docker machine where **build-image** and **base-image** reside.
Defaults to DOCKER_MACHINE_NAME environment variable or "default" if unset.
name
Name of destination image. Defaults to part name.
build-script
Execute this script after extraction of archives filesystem and import of layout.
tag
Tag name. Defaults to "latest".
timeout
**docker** command timeout.
tty
Assign a **Pseudo-TTY** to the container.
volumes
Volumes to bind mount, one per line. Format is <path>:<mountpoint>.
volumes-from
Mount volumes from specified container.
"""
subrecipe_class = DockerGentooBootstrapSubRecipe
| 42.745583
| 182
| 0.630322
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016, Giacomo Cariello. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from zc.buildout import UserError
from dockeroo import BaseGroupRecipe
from dockeroo.docker import BaseDockerSubRecipe, Archive
from dockeroo.utils import merge, string_as_bool
class DockerGentooBootstrapSubRecipe(BaseDockerSubRecipe): # pylint: disable=too-many-instance-attributes
def initialize(self):
super(DockerGentooBootstrapSubRecipe, self).initialize()
self.tag = self.options.get("tag", "latest")
self.name += ':{}'.format(self.tag)
self.command = self.options.get("command", "/bin/freeze")
self.commit = string_as_bool(self.options.get('commit', False))
self.container = self.options.get('container',
"{}_bootstrap".format(self.name.replace(':', '_')))
self.keep = string_as_bool(self.options.get('keep', False))
self.layout = self.options.get('layout', None)
self.crossdev_platform = self.options.get(
'crossdev-platform', self.engine.platform)
self.build_shell = self.options.get('build-shell', self.shell)
self.build_script = "#!{}\n{}".format(
self.build_shell,
'\n'.join([_f for _f in
[x.strip() for x in
self.options.get('build-script').replace('$$', '$').splitlines()]
if _f])) \
if self.options.get('build-script', None) is not None else None
self.tty = string_as_bool(self.options.get('tty', False))
self.archives = []
for url, prefix, md5sum in [merge([None, None, None], x.split())[:3] for x in
[_f for _f in
[x.strip() for x in
self.options.get(
'archives', self.options.get('archive', '')).splitlines()]
if _f]]:
if prefix == '/':
prefix = None
self.archives.append(
Archive(url=url, prefix=prefix, md5sum=md5sum))
self.volumes = [y for y in [x.strip().split(
':', 1) for x in self.options.get('volumes', '').splitlines()] if y[0]]
self.volumes_from = self.options.get('volumes-from', None)
def install(self):
if not any([x for x in self.engine.images() if self.name == x['image']]):
if not self.archives:
raise UserError(
"Image does not exist and no source specified.")
for archive in self.archives:
archive.download(self.recipe.buildout)
self.engine.import_archives(self.name, *self.archives)
if not self.engine.containers(include_stopped=True, name=self.container):
self.engine.create_container(self.container,
self.name, command=self.command,
privileged=True, tty=self.tty, volumes=self.volumes,
volumes_from=self.volumes_from)
# else:
# raise RuntimeError("Container \"{}\" already exists".format(self.container))
self.engine.install_freeze(self.container)
if self.layout:
self.engine.load_layout(self.container, self.layout)
self.engine.start_container(self.container)
if self.build_script:
if self.crossdev_platform != self.engine.platform:
if self.engine.machine is not None:
self.engine.machine.config_binfmt(self.crossdev_platform)
else:
raise UserError("docker-machine is not defined but binfmt configuration is needed.")
self.engine.run_script(self.container, self.build_script)
if self.commit:
self.engine.commit_container(self.container, self.name)
self.engine.remove_container(self.container)
self.engine.clean_stale_images()
return self.mark_completed()
def update(self):
if (self.layout and self.is_layout_updated(self.layout)) or \
not next(self.engine.images(name=self.name), None):
return self.install()
return self.mark_completed()
def uninstall(self):
self.engine.remove_container(self.container)
if not self.keep:
self.engine.remove_image(self.name)
class DockerGentooBootstrapRecipe(BaseGroupRecipe):
"""
This recipe creates a docker image that contains a full operating system (typically Gentoo).
Such builder image can be used to create further docker images with :py:class:`dockeroo.docker.gentoo_build.DockerGentooBuildRecipe` recipe.
The recipe executes the following tasks:
1. Extract **archives** into a docker image.
2. Create a container from such image.
3. Install "freeze" binary into the container. This is a simple no-op binary executable.
4. If a **layout** is defined, copy layout contents onto container's root.
5. Execute **build-script**.
6. If **commit** is enabled, commit modifications of image.
.. describe:: Usage
The following example buildout part shows how to build a full Gentoo amd64 docker image.
.. code-block:: ini
[crossdev_builder.img]
crossdev-arch = x86_64
crossdev-platform = x86_64
crossdev-processor = x86_64
crossdev-variant = docker
crossdev-abi = gnu
crossdev-gentoo-profile = no-multilib
crossdev-gentoo-platform = amd64
crossdev-gentoo-platform-flavor = amd64
recipe = dockeroo:docker.gentoo-bootstrap
image = dockeroo/builder_${:crossdev-arch}:latest
container = dockeroo_builder_${:crossdev-arch}
volumes-from = ${distfiles:container}
gentoo-platform = amd64
gentoo-platform-flavor = amd64-nomultilib
gentoo-version = 20160414
archives =
http://distfiles.gentoo.org/releases/${:gentoo-platform}/autobuilds/${:gentoo-version}/stage3-${:gentoo-platform-flavor}-${:gentoo-version}.tar.bz2
commit = true
keep = true
layout = ${buildout:containers-directory}/builder_${:crossdev-arch}
build-script =
test -d /usr/portage/profiles || emerge-webrsync
emerge --sync
emerge -uDNvkb world
emerge -nNuvkb sys-devel/crossdev
test -e /usr/${:crossdev-processor}-${:crossdev-variant}-linux-${:crossdev-abi}/.crossdev || \
crossdev -S -v -t ${:crossdev-processor}-${:crossdev-variant}-linux-${:crossdev-abi} --ov-output /usr/local/portage-crossdev-${:crossdev-arch} -P -kb && \
touch /usr/${:crossdev-processor}-${:crossdev-variant}-linux-${:crossdev-abi}/.crossdev
(cd /usr/${:crossdev-processor}-${:crossdev-variant}-linux-${:crossdev-abi}/etc/portage && \
rm -f make.profile && ln -s /usr/portage/profiles/default/linux/${:crossdev-gentoo-platform}/13.0/${:crossdev-gentoo-profile} make.profile)
ROOT=/usr/${:crossdev-processor}-${:crossdev-variant}-linux-${:crossdev-abi} \
${:crossdev-processor}-${:crossdev-variant}-linux-${:crossdev-abi}-emerge -nuvkb1 --keep-going sys-apps/baselayout
ROOT=/usr/${:crossdev-processor}-${:crossdev-variant}-linux-${:crossdev-abi} \
${:crossdev-processor}-${:crossdev-variant}-linux-${:crossdev-abi}-emerge -nuvkb1 --keep-going $(egrep '^[a-z]+' /usr/portage/profiles/default/linux/packages.build)
ROOT=/usr/${:crossdev-processor}-${:crossdev-variant}-linux-${:crossdev-abi} \
${:crossdev-processor}-${:crossdev-variant}-linux-${:crossdev-abi}-emerge -nuvkb1 --keep-going sys-apps/portage sys-apps/openrc net-misc/netifrc app-portage/gentoolkit
chroot-${:crossdev-arch}-docker -c locale-gen
chroot-${:crossdev-arch}-docker -c env-update
To use the above part, several other files are necessary, to be copied in via **layout**::
/etc/locale.gen
/etc/portage/repos.conf/crossdev.conf
/etc/portage/repos.conf/local.conf
/usr/local/bin/chroot-x86_64-docker
/usr/local/portage-crossdev-x86_64/metadata/layout.conf
/usr/local/portage-crossdev-x86_64/profiles/repo_name
/usr/x86_64-docker-linux-gnu/dockeroo-root/.keep
/usr/x86_64-docker-linux-gnu/etc/bash/bashrc.d/emerge-chroot
/usr/x86_64-docker-linux-gnu/etc/locale.gen
/usr/x86_64-docker-linux-gnu/etc/portage/make.conf
Here's an example of chroot-x86_64-docker script, useful to build docker images with :py:class:`dockeroo.docker.gentoo_build.DockerGentooBuildRecipe` recipe:
.. code-block:: bash
#!/bin/sh
cd /usr/x86_64-docker-linux-gnu
set -e
mkdir -p dev proc sys tmp etc/portage/repos.conf usr/portage usr/local/portage-crossdev-x86_64/packages var/lib/layman
mount -o bind /dev dev
mount -o bind /dev/pts dev/pts
mount -o bind /dev/shm dev/shm
mount -o bind /etc/portage/repos.conf etc/portage/repos.conf
mount -o bind /proc proc
mount -o bind /sys sys
mount -o bind /tmp tmp
mount -o bind /usr/portage usr/portage
mount -o bind /usr/portage/distfiles usr/portage/distfiles
mount -o bind /usr/local/portage-crossdev-x86_64 usr/local/portage-crossdev-x86_64
mount -o bind /usr/local/portage-crossdev-x86_64/packages usr/local/portage-crossdev-x86_64/packages
mount -o bind /var/lib/layman var/lib/layman
cp /etc/resolv.conf etc/resolv.conf
set +e
chroot . /bin/bash --login "$@"
ret=$?
set -e
umount var/lib/layman
umount usr/local/portage-crossdev-x86_64/packages
umount usr/local/portage-crossdev-x86_64
umount usr/portage/distfiles
umount usr/portage
umount tmp
umount sys
umount proc
umount etc/portage/repos.conf
umount dev/shm
umount dev/pts
umount dev
set +e
exit $ret
.. describe:: Configuration options
archives
List of URLs of operating system initial filesystem contents (Gentoo stageX).
crossdev-platform
Name of destination platform. If enabled, allows automatic configuration of QEMU binfmt mapping.
command
Command to execute upon container starting. Defaults to "/bin/freeze".
commit
Commit image changes after recipe install execution. Defaults to false.
container
Name of build container.
keep
Don't delete image upon uninstall.
layout
Copies a local folder to container's root with **docker cp**.
machine-name
Docker machine where **build-image** and **base-image** reside.
Defaults to DOCKER_MACHINE_NAME environment variable or "default" if unset.
name
Name of destination image. Defaults to part name.
build-script
Execute this script after extraction of archives filesystem and import of layout.
tag
Tag name. Defaults to "latest".
timeout
**docker** command timeout.
tty
Assign a **Pseudo-TTY** to the container.
volumes
Volumes to bind mount, one per line. Format is <path>:<mountpoint>.
volumes-from
Mount volumes from specified container.
"""
subrecipe_class = DockerGentooBootstrapSubRecipe
| 3,999
| 84
| 131
|
bc5473086318e86d614755dbb30522dfe789f2a8
| 10,298
|
py
|
Python
|
Apollo.py
|
heitorsampaio/ApolloAI
|
c2983ce51c52641453fb1f6e0d7598bdd47ed66d
|
[
"MIT"
] | null | null | null |
Apollo.py
|
heitorsampaio/ApolloAI
|
c2983ce51c52641453fb1f6e0d7598bdd47ed66d
|
[
"MIT"
] | null | null | null |
Apollo.py
|
heitorsampaio/ApolloAI
|
c2983ce51c52641453fb1f6e0d7598bdd47ed66d
|
[
"MIT"
] | null | null | null |
#!/usr/bin python3
#importing stuffs
import speech_recognition as sr
import os
import sys
import re
import webbrowser
import smtplib
import requests
import subprocess
from pyowm import OWM
import youtube_dl
import vlc
import urllib
import urllib3
import json
from bs4 import BeautifulSoup as soup
from urllib.request import urlopen
import wikipedia
import random
from time import strftime
from gtts import gTTS
import pyttsx3
import lxml
from yeelight import Bulb
from yeelight import *
from matplotlib import colors
from tempfile import TemporaryFile
import facebook
import tweepy
import snowboydecoder
import signal
import pyaudio
from ibm_watson import TextToSpeechV1
from ibm_watson.websocket import SynthesizeCallback
from pygame import mixer
bulb = Bulb('192.168.15.2')
interrupted = False
text_to_speech = TextToSpeechV1(
iam_apikey='9mDYXRnjmXZS5grZPaBVleJarFajeVEn-Mjp9m_sWFSm',
url='https://stream.watsonplatform.net/text-to-speech/api'
)
class Play(object):
"""
Wrapper to play the audio in a blocking mode
"""
test_callback = MySynthesizeCallback()
while True:
assistant(mic())
| 32.588608
| 158
| 0.578171
|
#!/usr/bin python3
#importing stuffs
import speech_recognition as sr
import os
import sys
import re
import webbrowser
import smtplib
import requests
import subprocess
from pyowm import OWM
import youtube_dl
import vlc
import urllib
import urllib3
import json
from bs4 import BeautifulSoup as soup
from urllib.request import urlopen
import wikipedia
import random
from time import strftime
from gtts import gTTS
import pyttsx3
import lxml
from yeelight import Bulb
from yeelight import *
from matplotlib import colors
from tempfile import TemporaryFile
import facebook
import tweepy
import snowboydecoder
import signal
import pyaudio
from ibm_watson import TextToSpeechV1
from ibm_watson.websocket import SynthesizeCallback
from pygame import mixer
bulb = Bulb('192.168.15.2')
interrupted = False
text_to_speech = TextToSpeechV1(
iam_apikey='9mDYXRnjmXZS5grZPaBVleJarFajeVEn-Mjp9m_sWFSm',
url='https://stream.watsonplatform.net/text-to-speech/api'
)
class Play(object):
"""
Wrapper to play the audio in a blocking mode
"""
def __init__(self):
self.format = pyaudio.paInt16
self.channels = 1
self.rate = 22050
self.chunk = 1024
self.pyaudio = None
self.stream = None
def start_streaming(self):
self.pyaudio = pyaudio.PyAudio()
self.stream = self._open_stream()
self._start_stream()
def _open_stream(self):
stream = self.pyaudio.open(
format=self.format,
channels=self.channels,
rate=self.rate,
output=True,
frames_per_buffer=self.chunk,
start=False
)
return stream
def _start_stream(self):
self.stream.start_stream()
def write_stream(self, audio_stream):
self.stream.write(audio_stream)
def complete_playing(self):
self.stream.stop_stream()
self.stream.close()
self.pyaudio.terminate()
class MySynthesizeCallback(SynthesizeCallback):
def __init__(self):
SynthesizeCallback.__init__(self)
self.play = Play()
def on_connected(self):
print('Opening stream to play')
self.play.start_streaming()
def on_error(self, error):
print('Error received: {}'.format(error))
def on_timing_information(self, timing_information):
print(timing_information)
def on_audio_stream(self, audio_stream):
self.play.write_stream(audio_stream)
def on_close(self):
print('Completed synthesizing')
self.play.complete_playing()
test_callback = MySynthesizeCallback()
def apolloRes(audio):
text_to_speech.synthesize_using_websocket(audio,
test_callback,
accept='audio/wav',
voice="en-US_MichaelV3Voice",)
def mic():
r = sr.Recognizer()
with sr.Microphone() as source:
print('Tell me something... ')
r.pause_threshold = 1
r.adjust_for_ambient_noise(source)
audio = r.listen(source)
try:
command = r.recognize_google(audio, language='en-US').lower()
print('You said: ' + command)
except sr.UnknownValueError:
print('...')
command = mic()
return command
def assistant(command):
if 'tell me about' in command:
reg_ex = re.search('tell me about(.*)', command)
try:
if reg_ex:
topic = reg_ex.group(1)
wikipedia.set_lang('en')
ny = wikipedia.summary(topic, sentences=1)
print(ny)
apolloRes(ny)
apolloRes('Thats what i know about %s' %(topic))
except Exception as e:
apolloRes(e)
elif 'open the website' in command:
reg_ex = re.search('open the website (.*)', command)
if reg_ex:
domain = reg_ex.group(1)
print(domain)
url = 'https://www.' + domain
webbrowser.open(url)
apolloRes('I have opened the desired website.')
else:
pass
bulb.turn_off()
elif 'hey apollo' in command:
day_time = int(strftime('%H'))
if day_time < 12:
apolloRes('Hey. Good Morning')
elif 12 <= day_time < 18:
apolloRes('Hey. Good Afternoon')
else:
apolloRes('Hey. Good Night')
elif 'tell me a joke' in command:
res = requests.get(
'https://icanhazdadjoke.com/',
headers={'Accept':'application/json'})
if res.status_code == requests.codes.ok:
apolloRes(str(res.json()['joke']))
else:
apolloRes('oops! Im running out of jokes right now and im not in the mood for jokes.')
elif 'tell me todays news' in command:
try:
news_url = 'https://news.google.com/rss?hl=pt-BR&gl=BR&ceid=BR:pt-419'
Client = urlopen(news_url)
xml_page = Client.read()
Client.close()
soup_page = soup(xml_page,'lxml')
news_list = soup_page.findAll('item')
for news in news_list[:15]:
apolloRes(news.title.text)
except Exception as e:
print(e)
elif '''how's the weather in ''' in command:
reg_ex = re.search('''how's the weather in (.*)''', command)
if reg_ex:
city = reg_ex.group(1)
owm = OWM(API_key='247eed961dfdbff4a65c25d27834eaea')
obs = owm.weather_at_place(city)
w = obs.get_weather()
k = w.get_status()
x = w.get_temperature(unit='celsius')
apolloRes('O clima em %s é %s. Com máxima temperature de %0.2f e a minima temperatura de %0.2f celsius' % (city, k, x['temp_max'], x['temp_min']))
elif 'what time is it' in command:
import datetime
now = datetime.datetime.now()
apolloRes('It is %d hours and %d minutes' %(now.hour, now.minute))
elif 'turn on the room light' in command:
bulb.turn_on()
bulb.set_brightness(100)
apolloRes('Feito.')
elif 'turn off the room light' in command:
bulb.turn_off()
apolloRes('Pronto.')
elif 'change the room light color to' in command:
reg_ex = re.search('mude a luz do quarto para (.*)', command)
if reg_ex:
color = reg_ex.group(1)
if color == 'azul':
bulb.set_rgb(0,0,255)
apolloRes('Feito.')
elif color == 'vermelho':
bulb.set_rgb(255,0,0)
apolloRes('Feito.')
elif color == 'cyano':
bulb.set_rgb(0,255,255)
apolloRes('Feito.')
elif color == 'verde limão':
bulb.set_rgb(0,255,0)
apolloRes('Feito.')
elif color == 'amarelo':
bulb.set_rgb(255,255,0)
apolloRes('Feito.')
elif color == 'rosa':
bulb.set_rgb(255,0,255)
apolloRes('Feito.')
elif color == 'verde':
bulb.set_rgb(128,128,0)
apolloRes('Feito.')
elif color == 'azul marinho':
bulb.set_rgb(0,0,128)
apolloRes('Feito.')
elif color == 'roxo':
bulb.set_rgb(128,0,128)
apolloRes('Feito.')
elif color == 'branco':
bulb.set_rgb(255,255,255)
apolloRes('Feito.')
elif 'mude o brilho da luz do quarto para' in command:
reg_ex = re.search('mude o brilho da luz do quarto para (.*)', command)
if reg_ex:
bri = reg_ex.group(1)
bulb.set_brightness(int(bri))
apolloRes('O brilho da luz do quarto agora está em %d porcento' %(int(bri)))
elif 'start light flow' in command:
transitions = [
RGBTransition(255,0,255, duration=1000)
]
flow = Flow(
count=0,
transitions=transitions
)
bulb.start_flow(flow)
elif 'who are you' in command:
apolloRes('''Olá, eu sou Apollo, uma inteligencia artificial criada por Heitor Sampaio,
basta você pedir que eu posso fazer qualquer coisa, todos os dias estou aprendendo mais,
caso queira saber minhas habilidades, diga "Me ajuda", até mais!
''')
elif 'open the application' in command:
reg_ex = re.search('abra o (.*)', command)
if reg_ex:
appname = reg_ex.group(1)
appname1 = appname+'.app'
subprocess.Popen(['open', '-n', '/Applications/' + appname1], stdout=subprocess.PIPE)
apolloRes('I have opened the desired application')
elif 'post on twitter' in command:
reg_ex = re.search('post on twitter (.*)', command)
if reg_ex:
post = reg_ex.group(1)
consumer_key = ''
consumer_secret = ''
access_token = ''
access_token_secret = ''
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
api.update_status(status = post)
elif 'send a e-mail' in command:
apolloRes('Para quem você gostaria de enviar o e-mail?')
#recipient = mic()
elif 'play the next song' in command:
subprocess.Popen(['atvremote', '-a', 'next'])
apolloRes('Ok, i have changed the song on AppleTV')
elif 'comece uma dinâmica molecular' in command:
reg_ex = re.search('comece uma dinâmica molecular (.*)', command)
if reg_ex:
din = reg_ex.group(1)
if din == 'sem gpu':
process = subprocess.Popen(['python3 /Users/heitorsampaio/GMXSMDF/run.py'], stdin=subprocess.PIPE, stdout=subprocess.PIPE)
process.stdin.write(b'2')
process.stdin.close()
elif din == 'com gpu':
process = subprocess.Popen(['python3', '/Users/heitorsampaio/GMXSMDF/run.py'], stdin=subprocess.PIPE, stdout=subprocess.PIPE)
process.stdin.write(b'1')
process.stdin.close()
while True:
assistant(mic())
| 8,734
| 26
| 414
|
77402bae8e8b2c3ce234d1baa57bbcf8550c4e85
| 1,094
|
py
|
Python
|
lib/xos-synchronizer/xos-synchronizer-tests/test_event_steps/event_step.py
|
mary-grace/xos
|
3e269834d29f936757f5091183c9b5188ed5cb9e
|
[
"Apache-2.0"
] | 66
|
2015-01-29T20:56:45.000Z
|
2021-07-01T09:56:44.000Z
|
lib/xos-synchronizer/xos-synchronizer-tests/test_event_steps/event_step.py
|
mary-grace/xos
|
3e269834d29f936757f5091183c9b5188ed5cb9e
|
[
"Apache-2.0"
] | 112
|
2015-01-30T19:59:09.000Z
|
2017-04-08T16:43:40.000Z
|
lib/xos-synchronizer/xos-synchronizer-tests/test_event_steps/event_step.py
|
mary-grace/xos
|
3e269834d29f936757f5091183c9b5188ed5cb9e
|
[
"Apache-2.0"
] | 66
|
2015-02-09T17:35:36.000Z
|
2021-03-24T12:31:19.000Z
|
# Copyright 2017-present Open Networking Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from __future__ import absolute_import
from xossynchronizer.event_steps.eventstep import EventStep
from mock_modelaccessor import *
| 35.290323
| 81
| 0.756856
|
# Copyright 2017-present Open Networking Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from __future__ import absolute_import
from xossynchronizer.event_steps.eventstep import EventStep
from mock_modelaccessor import *
class TestEventStep(EventStep):
technology = "kafka"
topics = ["sometopic"]
pattern = None
def __init__(self, model_accessor, log, *args, **kwargs):
super(TestEventStep, self).__init__(model_accessor, log, *args, **kwargs)
def process_event(self, event):
print("received an event", event)
| 170
| 135
| 23
|
40ad6cc077d9a49f51d8967b96d2c30ae64493ca
| 484
|
py
|
Python
|
modules/joinall.py
|
JohnBishop95/MirahezeBots
|
4408c65cabac750cd9771f4ed0889f530253bfff
|
[
"EFL-2.0"
] | null | null | null |
modules/joinall.py
|
JohnBishop95/MirahezeBots
|
4408c65cabac750cd9771f4ed0889f530253bfff
|
[
"EFL-2.0"
] | null | null | null |
modules/joinall.py
|
JohnBishop95/MirahezeBots
|
4408c65cabac750cd9771f4ed0889f530253bfff
|
[
"EFL-2.0"
] | null | null | null |
"""This module implements .joinall."""
from __future__ import (
unicode_literals,
absolute_import,
print_function,
division
)
from sopel import module
import time
@module.require_admin
@module.commands('joinall')
@module.thread(True)
def handle_joins(bot, trigger):
"""Join some channels."""
channels = bot.config.core.channels
if trigger.sender == '#ZppixBot':
for channel in channels:
bot.join(channel)
time.sleep(1)
| 19.36
| 39
| 0.669421
|
"""This module implements .joinall."""
from __future__ import (
unicode_literals,
absolute_import,
print_function,
division
)
from sopel import module
import time
@module.require_admin
@module.commands('joinall')
@module.thread(True)
def handle_joins(bot, trigger):
"""Join some channels."""
channels = bot.config.core.channels
if trigger.sender == '#ZppixBot':
for channel in channels:
bot.join(channel)
time.sleep(1)
| 0
| 0
| 0
|
2c158777c33cf24c011d63203509380559f6719a
| 2,052
|
py
|
Python
|
application/py/crolling.py
|
MaengJowan/bookshare
|
aee23df229e67961a2cb92526ac74d2563287fb0
|
[
"MIT"
] | null | null | null |
application/py/crolling.py
|
MaengJowan/bookshare
|
aee23df229e67961a2cb92526ac74d2563287fb0
|
[
"MIT"
] | null | null | null |
application/py/crolling.py
|
MaengJowan/bookshare
|
aee23df229e67961a2cb92526ac74d2563287fb0
|
[
"MIT"
] | null | null | null |
import requests
import json
from bs4 import BeautifulSoup
import sys
word = sys.argv[1]
word = word.strip().replace("&", "+")
main()
| 31.569231
| 142
| 0.634016
|
import requests
import json
from bs4 import BeautifulSoup
import sys
word = sys.argv[1]
word = word.strip().replace("&", "+")
def get_coverImg(url):
result=requests.get(url)
soup = BeautifulSoup(result.text, 'html.parser')
div = soup.find("div", {"id":"divDetailInfo"})
if(div == None):
imgUrl = "http://hanul.hannam.ac.kr/image/ko/common/noImageM.gif"
return imgUrl
else:
iframe=div.find('iframe')['src']
iframeResult=requests.get(iframe)
soup = BeautifulSoup(iframeResult.text, 'html.parser')
imgUrl=soup.find('td').find("img")['src']
return imgUrl
def get_list():
source={}
url = f"http://hanul.hannam.ac.kr/search/tot/result?pn=1&st=KWRD&si=TOTAL&oi=DISP06&os=DESC&q={word}&cpp=20"
result=requests.get(url)
if(result.status_code != requests.codes.ok):
print(f"{word} can not search")
else:
soup = BeautifulSoup(result.text, 'html.parser')
div = soup.find("div", {"id" : "divResultList"})
cnt = soup.find("div", {"id" : "searchCnt"}).find("strong").get_text()
booklist=div.find_all("div", {"class" : "briefData"})
i = 0
for book in booklist:
detail=book.find("dl", {"class" : "briefDetail"})
searchTitle=detail.find("dd", {"class" : "searchTitle"}).find("a")
title=searchTitle.get_text()
url = f"http://hanul.hannam.ac.kr{searchTitle['href']}"
imgUrl=get_coverImg(url)
bookline=detail.find_all("dd", {"class" : "bookline"})
author=bookline[0].get_text()
publisher = bookline[1].get_text()
borrow = bookline[-1].get_text()
if(borrow == "\xa0"):
borrow = "전자책"
source[f"{i}"]={"imgUrl":imgUrl, "url":url, "title":title, "author":author, "publisher":publisher, "borrow":borrow}
i += 1
source[f"{i}"]={"cnt":cnt, "libraryUrl":f"http://hanul.hannam.ac.kr/search/tot/result?st=KWRD&si=TOTAL&oi=DISP06&os=DESC&q={word}&cpp=20"}
return source
def listToDict(lst):
op = { i : lst[i] for i in range(0, len(lst) ) }
return op
def main():
result=get_list()
print(result)
main()
| 1,824
| 0
| 92
|
b247dcd44831854f5cf0ba67245c72b96e7ebb14
| 1,597
|
py
|
Python
|
WGALP/blocks/load_krakendb_ramdisk.py
|
redsnic/WGA-LP
|
1d8f4a85843b4220559e3e5cccaaee8c78e1b452
|
[
"MIT"
] | 5
|
2021-08-03T17:09:19.000Z
|
2021-12-14T18:11:02.000Z
|
WGALP/blocks/load_krakendb_ramdisk.py
|
redsnic/WGA-LP
|
1d8f4a85843b4220559e3e5cccaaee8c78e1b452
|
[
"MIT"
] | null | null | null |
WGALP/blocks/load_krakendb_ramdisk.py
|
redsnic/WGA-LP
|
1d8f4a85843b4220559e3e5cccaaee8c78e1b452
|
[
"MIT"
] | null | null | null |
# --- default imports ---
# --- load utils ---
from WGALP.utils.commandLauncher import run_sp
from WGALP.step import Step
description = """
load kraken_db in a ramdisk
"""
input_description = """
the position (on disk) of the kraken2 database
"""
output_description = """
the mounting point of the newly created ramdisk
"""
### Wrapper
### Runner
def krakendb_make_ramdisk_runner(step, args):
"""
NOTE: this requires 8GB of free RAM, be careful not to forget the ramdisk loaded...
[better to be run with "force"]
input:
kraken_db : path
output:
kraken_ram_db : position of kraken2 dabtabase in the ramdisk
kraken_ramdisk : ramdisk mounting point
"""
db = args["kraken_db"]
# this command works with minikraken db, change ramdisk size if needed...
command = "mount -t tmpfs -o size=8G tmpfs " + step.outpath + " && "
command += "cp -R " + db + " " + step.outpath + "/kraken_db"
# note that this command requies to be root (may prompt to get a password)
if step.execution_mode != "read":
run_sp(step, command)
# organize output
step.outputs = {
"kraken_ram_db" : "kraken_db",
"kraken_ramdisk" : ""
}
return step
| 28.017544
| 87
| 0.65623
|
# --- default imports ---
# --- load utils ---
from WGALP.utils.commandLauncher import run_sp
from WGALP.step import Step
description = """
load kraken_db in a ramdisk
"""
input_description = """
the position (on disk) of the kraken2 database
"""
output_description = """
the mounting point of the newly created ramdisk
"""
### Wrapper
def load_krakendb_ramdisk(name, rootpath, kraken_db, execution_mode = "on_demand"):
step = Step(name, rootpath, execution_mode=execution_mode)
step.set_command(krakendb_make_ramdisk_runner)
step_args = {
"kraken_db": kraken_db,
}
step.run(step_args)
step.set_description("load kraken_db in a ramdisk", "...", "...")
return step
### Runner
def krakendb_make_ramdisk_runner(step, args):
"""
NOTE: this requires 8GB of free RAM, be careful not to forget the ramdisk loaded...
[better to be run with "force"]
input:
kraken_db : path
output:
kraken_ram_db : position of kraken2 dabtabase in the ramdisk
kraken_ramdisk : ramdisk mounting point
"""
db = args["kraken_db"]
# this command works with minikraken db, change ramdisk size if needed...
command = "mount -t tmpfs -o size=8G tmpfs " + step.outpath + " && "
command += "cp -R " + db + " " + step.outpath + "/kraken_db"
# note that this command requies to be root (may prompt to get a password)
if step.execution_mode != "read":
run_sp(step, command)
# organize output
step.outputs = {
"kraken_ram_db" : "kraken_db",
"kraken_ramdisk" : ""
}
return step
| 342
| 0
| 22
|
c40972a4bff7bb3ccdd36b0d87f0b2f304adfe8b
| 1,166
|
py
|
Python
|
src/toil/utils/toilDestroyCluster.py
|
YeoLab/toil
|
9837c396b946bc4a0cf97e7c2705e5892b88707b
|
[
"Apache-2.0"
] | null | null | null |
src/toil/utils/toilDestroyCluster.py
|
YeoLab/toil
|
9837c396b946bc4a0cf97e7c2705e5892b88707b
|
[
"Apache-2.0"
] | 1
|
2017-07-31T23:47:25.000Z
|
2017-07-31T23:47:25.000Z
|
src/toil/utils/toilDestroyCluster.py
|
lexentbio/toil
|
6ad3813af4450962d0899aa6c821189f86472ef9
|
[
"Apache-2.0"
] | 1
|
2020-09-17T17:49:32.000Z
|
2020-09-17T17:49:32.000Z
|
# Copyright (C) 2015 UCSC Computational Genomics Lab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Terminates the specified cluster and associated resources
"""
import logging
from toil.provisioners import Cluster
from toil.lib.bioio import parseBasicOptions, getBasicOptionParser
from toil.utils import addBasicProvisionerOptions
logger = logging.getLogger(__name__)
| 35.333333
| 74
| 0.766724
|
# Copyright (C) 2015 UCSC Computational Genomics Lab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Terminates the specified cluster and associated resources
"""
import logging
from toil.provisioners import Cluster
from toil.lib.bioio import parseBasicOptions, getBasicOptionParser
from toil.utils import addBasicProvisionerOptions
logger = logging.getLogger(__name__)
def main():
parser = getBasicOptionParser()
parser = addBasicProvisionerOptions(parser)
config = parseBasicOptions(parser)
cluster = Cluster(provisioner=config.provisioner,
clusterName=config.clusterName, zone=config.zone)
cluster.destroyCluster()
| 268
| 0
| 23
|
0c4337a0ad68810e4d3b72feb11fc8e929ff526c
| 778
|
py
|
Python
|
Muzact/app.py
|
Vikash-Kothary/hackathon-project-muzact
|
f5fdc8537faeda691dccdccc79d45a153c2dbab6
|
[
"MIT"
] | null | null | null |
Muzact/app.py
|
Vikash-Kothary/hackathon-project-muzact
|
f5fdc8537faeda691dccdccc79d45a153c2dbab6
|
[
"MIT"
] | null | null | null |
Muzact/app.py
|
Vikash-Kothary/hackathon-project-muzact
|
f5fdc8537faeda691dccdccc79d45a153c2dbab6
|
[
"MIT"
] | null | null | null |
from flask import Flask, request
import requests, json
app = Flask(__name__)
@app.route('/shazam', methods=['GET', 'POST'])
@app.route('/')
if __name__ == '__main__':
app.run('0.0.0.0', debug = 'True')
| 28.814815
| 74
| 0.661954
|
from flask import Flask, request
import requests, json
app = Flask(__name__)
@app.route('/shazam', methods=['GET', 'POST'])
@app.route('/')
def index():
if request.method == 'POST':
return "{'artist':'Taylor Swift', 'song':'style'}"
# return shazam_request(request.data)
return 'Hello World'
def shazam_request(file):
headers = {'X-Shazam-Api-Key': '03789B8E-A8CE-4229-A880-7FDE4C4FAEFC',
'Content-Type':'application/octet-stream'}
payload = open(file, 'rb').read()
url = 'http://beta.amp.shazam.com/partner/recognise'
response = requests.post(url, data=payload, headers=headers)
res = json.loads(response)
metadata = res['matches'][0]['metadata']
return metadata
if __name__ == '__main__':
app.run('0.0.0.0', debug = 'True')
| 523
| 0
| 46
|
ed5972ed81f4bea34843be0060bc2e8cdc364c2f
| 2,287
|
py
|
Python
|
Decimal_to_Binary_Converter.py
|
jerryhcooke/Decimal-Binary-Converter
|
6ee6d47b33418f65dbcbbaa67b7e2da3c94f9063
|
[
"MIT"
] | null | null | null |
Decimal_to_Binary_Converter.py
|
jerryhcooke/Decimal-Binary-Converter
|
6ee6d47b33418f65dbcbbaa67b7e2da3c94f9063
|
[
"MIT"
] | null | null | null |
Decimal_to_Binary_Converter.py
|
jerryhcooke/Decimal-Binary-Converter
|
6ee6d47b33418f65dbcbbaa67b7e2da3c94f9063
|
[
"MIT"
] | null | null | null |
# A converter that takes a denary number and converts it, using comparison
# rather than recursion, to binary
import sys
from colorama import init
from termcolor import cprint
from pyfiglet import figlet_format
init(strip=not sys.stdout.isatty())
cprint(figlet_format('Binary / Decimal Converter', font='doom'), 'white',
attrs=['bold'])
while True:
print("Please select from the following options:", "\n")
print("1 - Convert Decimal -> Binary")
print("2 - Convert Binary -> Decimal")
print("X - Exit the program", "\n")
choice = input("Please make your selection: ")
if choice == "x":
break
elif choice == '1':
try:
user_number = input("Your number: ")
result = convert_to_binary(int(user_number))
print("\n", "Your number converted to binary is: ", "0b", result,
" \n")
except ValueError:
print("\n", "Please enter an integer" " \n")
elif choice == '2':
try:
user_number = input("Your number: ")
result = binary_to_decimal(user_number)
print("\n", "Your number converted to decimal is: ", result, " \n")
except ValueError:
print("\n""Please enter a decimal number." " \n")
else:
print("\n", "!! Please select from one of the three options !!", " \n")
| 31.763889
| 79
| 0.615216
|
# A converter that takes a denary number and converts it, using comparison
# rather than recursion, to binary
import sys
from colorama import init
from termcolor import cprint
from pyfiglet import figlet_format
init(strip=not sys.stdout.isatty())
cprint(figlet_format('Binary / Decimal Converter', font='doom'), 'white',
attrs=['bold'])
def generate_comparisons(number): # generates a list of powers of two, the
# max being the first that is greater than n
list = [1]
while max(list) < number:
x = max(list)
list.append(x * 2)
comparisons = reversed(list)
return comparisons
def convert_to_binary(decimal):
# takes the output of generate_comparisons and uses it to to convert
binary = []
comparison = generate_comparisons(int(decimal))
for number in comparison:
if (decimal - number) >= 0:
binary.append(1)
decimal -= number
else:
binary.append(0)
new_string = "".join(str(i) for i in binary)
return new_string
def binary_to_decimal(user_binary):
# takes an inputted binary number and converts decimal
output_decimal = 0
for bit in user_binary:
output_decimal = output_decimal*2 + int(bit)
return output_decimal
while True:
print("Please select from the following options:", "\n")
print("1 - Convert Decimal -> Binary")
print("2 - Convert Binary -> Decimal")
print("X - Exit the program", "\n")
choice = input("Please make your selection: ")
if choice == "x":
break
elif choice == '1':
try:
user_number = input("Your number: ")
result = convert_to_binary(int(user_number))
print("\n", "Your number converted to binary is: ", "0b", result,
" \n")
except ValueError:
print("\n", "Please enter an integer" " \n")
elif choice == '2':
try:
user_number = input("Your number: ")
result = binary_to_decimal(user_number)
print("\n", "Your number converted to decimal is: ", result, " \n")
except ValueError:
print("\n""Please enter a decimal number." " \n")
else:
print("\n", "!! Please select from one of the three options !!", " \n")
| 847
| 0
| 69
|
0fc69002b14a34c3e184479e98b73ecbe4f4b141
| 2,341
|
py
|
Python
|
appliedpy_ecourse/class7/ex2_models.py
|
fallenfuzz/pynet
|
9624d83cca160fd325a34e838e4474c9b80fe2ab
|
[
"Apache-2.0"
] | 528
|
2015-01-07T15:28:51.000Z
|
2022-03-27T09:45:37.000Z
|
appliedpy_ecourse/class7/ex2_models.py
|
fallenfuzz/pynet
|
9624d83cca160fd325a34e838e4474c9b80fe2ab
|
[
"Apache-2.0"
] | 19
|
2015-07-01T23:52:27.000Z
|
2021-09-22T04:30:34.000Z
|
appliedpy_ecourse/class7/ex2_models.py
|
fallenfuzz/pynet
|
9624d83cca160fd325a34e838e4474c9b80fe2ab
|
[
"Apache-2.0"
] | 555
|
2015-01-18T07:21:43.000Z
|
2022-03-20T21:25:22.000Z
|
'''
Add an SnmpCredentials model to the models.py file. This model should support
both SNMPv3 and SNMPv1/v2c. Add a foreign key reference in the NetworkDevice
model pointing to the SnmpCredentials model.
'''
from django.db import models
| 48.770833
| 80
| 0.699701
|
'''
Add an SnmpCredentials model to the models.py file. This model should support
both SNMPv3 and SNMPv1/v2c. Add a foreign key reference in the NetworkDevice
model pointing to the SnmpCredentials model.
'''
from django.db import models
class Credentials(models.Model):
username = models.CharField(max_length=50)
password = models.CharField(max_length=50)
description = models.CharField(max_length=200, blank=True, null=True)
def __unicode__(self):
return u'%s' % (self.username)
class SnmpCredentials(models.Model):
snmp_mode = models.CharField(max_length=20) # 'snmp1_2c' or 'snmp3'
description = models.CharField(max_length=200, blank=True, null=True)
username = models.CharField(max_length=50, blank=True, null=True)
auth_proto = models.CharField(max_length=30, blank=True, null=True)
auth_key = models.CharField(max_length=50, blank=True, null=True)
encrypt_proto = models.CharField(max_length=30, blank=True, null=True)
encrypt_key = models.CharField(max_length=50, blank=True, null=True)
community = models.CharField(max_length=50, blank=True, null=True)
def __unicode__(self):
return u'%s' % (self.description)
class NetworkDevice(models.Model):
device_name = models.CharField(primary_key=True, max_length=80)
ip_address = models.IPAddressField()
device_class = models.CharField(max_length=50)
ssh_port = models.IntegerField(blank=True, null=True)
api_port = models.IntegerField(blank=True, null=True)
vendor = models.CharField(max_length=50, blank=True, null=True)
model = models.CharField(max_length=50, blank=True, null=True)
device_type = models.CharField(max_length=50, blank=True, null=True)
os_version = models.CharField(max_length=100, blank=True, null=True)
serial_number = models.CharField(max_length=50, blank=True, null=True)
uptime_seconds = models.IntegerField(blank=True, null=True)
credentials = models.ForeignKey(Credentials, blank=True, null=True)
snmp_credentials = models.ForeignKey(SnmpCredentials, blank=True, null=True)
snmp_port = models.IntegerField(blank=True, null=True)
def __unicode__(self):
return u'%s' % (self.device_name)
| 126
| 1,905
| 69
|
d168eea59a42d588125683162b3e1f7ba5c1b21c
| 988
|
py
|
Python
|
epics/devices/ad_image.py
|
juanfem/pyepics
|
6ced9237aa51e306a48057be0f80b08e44dbf0ae
|
[
"OML"
] | 55
|
2015-05-09T14:42:51.000Z
|
2022-02-21T19:53:06.000Z
|
epics/devices/ad_image.py
|
juanfem/pyepics
|
6ced9237aa51e306a48057be0f80b08e44dbf0ae
|
[
"OML"
] | 192
|
2015-01-22T16:36:41.000Z
|
2022-02-09T17:19:17.000Z
|
epics/devices/ad_image.py
|
juanfem/pyepics
|
6ced9237aa51e306a48057be0f80b08e44dbf0ae
|
[
"OML"
] | 43
|
2015-09-15T20:55:02.000Z
|
2022-02-21T19:53:07.000Z
|
from .. import Device
class AD_ImagePlugin(Device):
"""
AreaDetector Image Plugin
"""
attrs = ('ArrayData',
'UniqueId', 'UniqueId_RBV',
'NDimensions', 'NDimensions_RBV',
'ArraySize0', 'ArraySize0_RBV',
'ArraySize1', 'ArraySize1_RBV',
'ArraySize2', 'ArraySize2_RBV',
'ColorMode', 'ColorMode_RBV')
_nonpvs = ('_prefix', '_pvs', '_delim')
def ensure_value(self, attr, value, wait=False):
"""ensures that an attribute with an associated _RBV value is
set to the specifed value
"""
rbv_attr = "%s_RBV" % attr
if rbv_attr not in self._pvs:
return self._pvs[attr].put(value, wait=wait)
if self._pvs[rbv_attr].get(as_string=True) != value:
self._pvs[attr].put(value, wait=wait)
| 30.875
| 69
| 0.572874
|
from .. import Device
class AD_ImagePlugin(Device):
"""
AreaDetector Image Plugin
"""
attrs = ('ArrayData',
'UniqueId', 'UniqueId_RBV',
'NDimensions', 'NDimensions_RBV',
'ArraySize0', 'ArraySize0_RBV',
'ArraySize1', 'ArraySize1_RBV',
'ArraySize2', 'ArraySize2_RBV',
'ColorMode', 'ColorMode_RBV')
_nonpvs = ('_prefix', '_pvs', '_delim')
def __init__(self, prefix):
Device.__init__(self, prefix, delim='', mutable=False,
attrs=self.attrs)
def ensure_value(self, attr, value, wait=False):
"""ensures that an attribute with an associated _RBV value is
set to the specifed value
"""
rbv_attr = "%s_RBV" % attr
if rbv_attr not in self._pvs:
return self._pvs[attr].put(value, wait=wait)
if self._pvs[rbv_attr].get(as_string=True) != value:
self._pvs[attr].put(value, wait=wait)
| 111
| 0
| 27
|
6327f2a9bb703904fc001768cbf7a0f87cd749ea
| 4,196
|
py
|
Python
|
script/tazerUtil/serverUtil.py
|
jstrube/tazer
|
18a6d66ad2c20376609f1af7940e43b2031e4fd0
|
[
"BSD-3-Clause"
] | null | null | null |
script/tazerUtil/serverUtil.py
|
jstrube/tazer
|
18a6d66ad2c20376609f1af7940e43b2031e4fd0
|
[
"BSD-3-Clause"
] | null | null | null |
script/tazerUtil/serverUtil.py
|
jstrube/tazer
|
18a6d66ad2c20376609f1af7940e43b2031e4fd0
|
[
"BSD-3-Clause"
] | null | null | null |
import subprocess as sp
from . import util
# This is a Tazer Server class. It is here to help wrap several functions that
# exist across the Tazer source. The Commands listed in the arguments are the
# equivalent commands (and arguments) as if run from the build tree.
# The path is the path to the build directory!!!
# This functions runs a Tazer server locally
# Command:
# pathToBuild/src/server/server port
# Args:
# (optional) port = Port to listen on. Default is 6023 (Config.h: serverPort)
# This funtions pings a Tazer server.
# Command:
# pathToBuild/test/PingServer serverIpAddr port attempts sleepTime
# Args:
# (optional) serverIpAddr = Ip address of the server. Default is 127.0.0.1
# (optional) port = Port server is listening on. Default is 6023 (Config.h: serverPort)
# (optional) attempts = Number of times to attempt a ping. Default is 10
# (optional) sleepTime = Time in seconds to sleep between attempts. Default is 10
# This funtion closes a Tazer server. Can close a local or remote server.
# Command:
# pathToBuild/test/CloseServer serverIpAddr port
# Args:
# serverIpAddr = Ip address of the server.
# (optional) port = Port server is listening on. Default is 6023 (Config.h: serverPort)
# Will block until the process that launched server finishes
# Returns true if the process that launched server is still running
# Kills the processes that is running the server
# Get the PID of the child process
| 41.137255
| 99
| 0.637274
|
import subprocess as sp
from . import util
# This is a Tazer Server class. It is here to help wrap several functions that
# exist across the Tazer source. The Commands listed in the arguments are the
# equivalent commands (and arguments) as if run from the build tree.
class TazerServer:
# The path is the path to the build directory!!!
def __init__(self, path, serverIpAddr=None, port=None, outputFileName=None):
self.serverProc = None
self.path = path
self.serverIpAddr = serverIpAddr
self.port = port
self.printResult = True
self.outputFileName = outputFileName
self.outFile = util.openOutputFile(outputFileName)
self.pid
# This functions runs a Tazer server locally
# Command:
# pathToBuild/src/server/server port
# Args:
# (optional) port = Port to listen on. Default is 6023 (Config.h: serverPort)
def run(self, port=None):
port = port if port != None else self.port
serverPath = self.path + "/src/server/server"
args = [serverPath]
if port:
args.append(port)
util.printCommand(args)
self.serverProc = sp.Popen(args, stdout=self.outFile, stderr=self.outFile)
self.childPid = self.serverProc.pid
# This funtions pings a Tazer server.
# Command:
# pathToBuild/test/PingServer serverIpAddr port attempts sleepTime
# Args:
# (optional) serverIpAddr = Ip address of the server. Default is 127.0.0.1
# (optional) port = Port server is listening on. Default is 6023 (Config.h: serverPort)
# (optional) attempts = Number of times to attempt a ping. Default is 10
# (optional) sleepTime = Time in seconds to sleep between attempts. Default is 10
def ping(self, serverIpAddr=None, port=None, attempts=None, sleepTime=None):
serverIpAddr = serverIpAdd if serverIpAddr != None else self.serverIpAddr
port = port if port != None else self.port
pingPath = self.path + "/test/PingServer"
args = [pingPath]
if serverIpAddr:
args.append(serverIpAddr)
if port:
args.append(port)
if attempts:
args.append(attempts)
if sleepTime:
args.append(sleepTime)
util.printCommand(args)
process = sp.Popen(args, stdout=self.outFile, stderr=self.outFile, universal_newlines=True)
process.wait()
if self.printResult and self.outputFileName == None:
print(process.stdout.read())
# This funtion closes a Tazer server. Can close a local or remote server.
# Command:
# pathToBuild/test/CloseServer serverIpAddr port
# Args:
# serverIpAddr = Ip address of the server.
# (optional) port = Port server is listening on. Default is 6023 (Config.h: serverPort)
def close(self, serverIpAddr=None, port=None):
serverIpAddr = serverIpAdd if serverIpAddr != None else self.serverIpAddr
port = port if port != None else self.port
closePath = self.path + "/test/CloseServer"
args = [closePath]
if serverIpAddr == None:
serverIpAddr = "127.0.0.1"
args.append(serverIpAddr)
if port:
args.append(port)
util.printCommand(args)
process = sp.Popen(args, stdout=self.outFile, stderr=self.outFile, universal_newlines=True)
process.wait()
if self.printResult and self.outputFileName == None:
print(process.stdout.read())
# Will block until the process that launched server finishes
def wait(self):
if self.serverProc != None:
self.serverProc.wait()
# Returns true if the process that launched server is still running
def poll(self):
if self.serverProc != None:
return None == self.serverProc.poll()
return False
# Kills the processes that is running the server
def kill(self):
if self.serverProc != None:
self.serverProc.kill()
# Get the PID of the child process
def pid(self):
if self.serverProc != None:
print("PID: ", self.childPid)
| 2,419
| -3
| 230
|
903d6526666ce50546bae687fd6815485193c917
| 915
|
py
|
Python
|
altstreamfield/utils.py
|
didorothy/wagtailaltstreamfield
|
00b6be4420e031036f1d2d6c0122969df7fb3900
|
[
"BSD-2-Clause"
] | null | null | null |
altstreamfield/utils.py
|
didorothy/wagtailaltstreamfield
|
00b6be4420e031036f1d2d6c0122969df7fb3900
|
[
"BSD-2-Clause"
] | 20
|
2019-11-12T16:49:32.000Z
|
2021-03-09T23:04:20.000Z
|
altstreamfield/utils.py
|
didorothy/wagtailaltstreamfield
|
00b6be4420e031036f1d2d6c0122969df7fb3900
|
[
"BSD-2-Clause"
] | null | null | null |
from django.forms.widgets import Media
def get_class_media(base, instance):
'''Convenience function to be used when overriding the `media` property.
This function maintains the tasks of the media property set up by
`MediaDefiningClass` but allows you to extend the normal behavior.
Use:
class MyClass(Block):
def media(self):
media = get_classMedia(super().media(), self)
# ... do extra stuff here ...
return media
'''
definition = getattr(instance, 'Media', None)
if definition:
extend = getattr(definition, 'extend', True)
if extend:
if extend is True:
m = base
else:
m = Media()
for medium in extend:
m = m + base[medium]
return m + Media(definition)
return Media(definition)
else:
return base
| 31.551724
| 76
| 0.571585
|
from django.forms.widgets import Media
def get_class_media(base, instance):
'''Convenience function to be used when overriding the `media` property.
This function maintains the tasks of the media property set up by
`MediaDefiningClass` but allows you to extend the normal behavior.
Use:
class MyClass(Block):
def media(self):
media = get_classMedia(super().media(), self)
# ... do extra stuff here ...
return media
'''
definition = getattr(instance, 'Media', None)
if definition:
extend = getattr(definition, 'extend', True)
if extend:
if extend is True:
m = base
else:
m = Media()
for medium in extend:
m = m + base[medium]
return m + Media(definition)
return Media(definition)
else:
return base
| 0
| 0
| 0
|
b553b1daf5dcad704f72c521e15d5d3492a81839
| 1,158
|
py
|
Python
|
tests/functional_tests/pool/open_pool_ledger_with_valid_data_test.py
|
wYaobiz/indy-test-suite
|
7b4a3f9bb73e5830fea17a158dc0fc96ab29ac32
|
[
"Apache-2.0"
] | 1
|
2021-07-26T14:19:07.000Z
|
2021-07-26T14:19:07.000Z
|
tests/functional_tests/pool/open_pool_ledger_with_valid_data_test.py
|
wYaobiz/indy-test-suite
|
7b4a3f9bb73e5830fea17a158dc0fc96ab29ac32
|
[
"Apache-2.0"
] | null | null | null |
tests/functional_tests/pool/open_pool_ledger_with_valid_data_test.py
|
wYaobiz/indy-test-suite
|
7b4a3f9bb73e5830fea17a158dc0fc96ab29ac32
|
[
"Apache-2.0"
] | null | null | null |
from indy import pool
from utilities import utils
from utilities import common, constant
from test_scripts.functional_tests.pool.pool_test_base import PoolTestBase
import pytest
| 39.931034
| 75
| 0.662349
|
from indy import pool
from utilities import utils
from utilities import common, constant
from test_scripts.functional_tests.pool.pool_test_base import PoolTestBase
import pytest
class TestOpenPoolLedgerConfig(PoolTestBase):
@pytest.mark.asyncio
async def test(self):
# 1. Create pool ledger configuration.
self.steps.add_step("Create pool ledger config")
await utils.perform(self.steps, common.create_pool_ledger_config,
self.pool_name, constant.pool_genesis_txn_file)
# 2. Open pool ledger.
self.steps.add_step("Open pool ledger")
self.pool_handle = await \
utils.perform(self.steps, pool.open_pool_ledger,
self.pool_name, None, ignore_exception=True)
# 3. Verify that returned pool_handle is a positive integer.
self.steps.add_step("Verify that returned pool_"
"handle is a positive integer")
utils.check(self.steps, error_message="Cannot open pool ledger",
condition=lambda: isinstance(self.pool_handle, int) and
self.pool_handle >= 0)
| 879
| 76
| 23
|
d9fe82a160d5b6f40eb7f848610280b2b907fd7b
| 1,213
|
py
|
Python
|
examples/reference/models/select_server.py
|
g-parki/bokeh
|
664ead5306bba64609e734d4105c8aa8cfb76d81
|
[
"BSD-3-Clause"
] | 15,193
|
2015-01-01T05:11:45.000Z
|
2022-03-31T19:30:20.000Z
|
examples/reference/models/select_server.py
|
g-parki/bokeh
|
664ead5306bba64609e734d4105c8aa8cfb76d81
|
[
"BSD-3-Clause"
] | 9,554
|
2015-01-01T03:16:54.000Z
|
2022-03-31T22:59:39.000Z
|
examples/reference/models/select_server.py
|
g-parki/bokeh
|
664ead5306bba64609e734d4105c8aa8cfb76d81
|
[
"BSD-3-Clause"
] | 4,829
|
2015-01-02T03:35:32.000Z
|
2022-03-30T16:40:26.000Z
|
## Bokeh server for Select
import pandas as pd
from bokeh.io import curdoc
from bokeh.layouts import row
from bokeh.models import ColumnDataSource, Select
from bokeh.plotting import figure
x=[3,4,6,12,10,1,5,6,3,8]
y=[7,1,3,4,1,6,10,4,10,3]
label=['Red', 'Orange', 'Red', 'Orange','Red', 'Orange','Red', 'Orange','Red', 'Orange',]
df=pd.DataFrame({'x':x,'y':y,'label':label})
source = ColumnDataSource(data=dict(x=df.x, y=df.y,label=df.label))
plot_figure = figure(title='Select',height=450, width=600,
tools="save,reset", toolbar_location="below")
plot_figure.scatter('x', 'y', source=source, size=10,color='label')
select = Select(title="Filter plot by color:", value="All", options=["All", "Red", "Orange"])
select.on_change('value',select_click)
layout=row(select, plot_figure)
curdoc().add_root(layout)
curdoc().title = "Select Bokeh Server"
| 29.585366
| 93
| 0.695796
|
## Bokeh server for Select
import pandas as pd
from bokeh.io import curdoc
from bokeh.layouts import row
from bokeh.models import ColumnDataSource, Select
from bokeh.plotting import figure
x=[3,4,6,12,10,1,5,6,3,8]
y=[7,1,3,4,1,6,10,4,10,3]
label=['Red', 'Orange', 'Red', 'Orange','Red', 'Orange','Red', 'Orange','Red', 'Orange',]
df=pd.DataFrame({'x':x,'y':y,'label':label})
source = ColumnDataSource(data=dict(x=df.x, y=df.y,label=df.label))
plot_figure = figure(title='Select',height=450, width=600,
tools="save,reset", toolbar_location="below")
plot_figure.scatter('x', 'y', source=source, size=10,color='label')
select = Select(title="Filter plot by color:", value="All", options=["All", "Red", "Orange"])
def select_click(attr,old,new):
active_select=select.value ##Getting radio button value
# filter the dataframe with value in select
if active_select!='All':
selected_df=df[df['label']==active_select]
else:
selected_df=df.copy()
source.data=dict(x=selected_df.x, y=selected_df.y,label=selected_df.label)
select.on_change('value',select_click)
layout=row(select, plot_figure)
curdoc().add_root(layout)
curdoc().title = "Select Bokeh Server"
| 319
| 0
| 23
|
da1d50a4f230ab93ab71b745cfeb7b321c903875
| 1,055
|
py
|
Python
|
src/sample/corpus_extraction_sklearn.py
|
humanist96/kmorph
|
0fc9ccdf77847de9602ca0c7f428e9dbb813b984
|
[
"Apache-2.0"
] | null | null | null |
src/sample/corpus_extraction_sklearn.py
|
humanist96/kmorph
|
0fc9ccdf77847de9602ca0c7f428e9dbb813b984
|
[
"Apache-2.0"
] | null | null | null |
src/sample/corpus_extraction_sklearn.py
|
humanist96/kmorph
|
0fc9ccdf77847de9602ca0c7f428e9dbb813b984
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from sklearn.feature_extraction.text import CountVectorizer
from natto import MeCab
_morpheme_type = ['NNG', 'NNP']
_escape_pattern = ['\n']
_nm = MeCab()
corpus = generate_corpus2('news.txt')
print('corpus : ', corpus)
_cv = CountVectorizer()
word_matrix = _cv.fit_transform(corpus)
index = 0
for word_vector in word_matrix.toarray():
print('[', corpus[index], '] \n: ', word_vector)
index += 1
| 23.977273
| 59
| 0.627488
|
# -*- coding: utf-8 -*-
from sklearn.feature_extraction.text import CountVectorizer
from natto import MeCab
_morpheme_type = ['NNG', 'NNP']
_escape_pattern = ['\n']
_nm = MeCab()
def filter_by_type(text):
terms = []
for term_info in str(_nm.parse(text)).split('\n'):
_term_info = term_info.split('\t')
if len(_term_info) < 2:
continue
surface = _term_info[0]
analysis = _term_info[1].split(',')
if analysis[0] in _morpheme_type:
terms.append(surface)
return terms
def generate_corpus2(data_path):
_corpus = []
fp = open(data_path, 'r')
for line in fp.readlines():
if line not in _escape_pattern:
terms = filter_by_type(line)
_corpus.append(' '.join(terms))
return _corpus
corpus = generate_corpus2('news.txt')
print('corpus : ', corpus)
_cv = CountVectorizer()
word_matrix = _cv.fit_transform(corpus)
index = 0
for word_vector in word_matrix.toarray():
print('[', corpus[index], '] \n: ', word_vector)
index += 1
| 573
| 0
| 46
|
03eae6ce95f068bc2d9cb82135f48b30fa2fa332
| 2,894
|
py
|
Python
|
UIBox/item.py
|
yemenPython/TheBossBaby
|
86e447b3fa360646a179e62c70b1750da5427541
|
[
"MIT"
] | 1
|
2021-12-16T16:31:29.000Z
|
2021-12-16T16:31:29.000Z
|
UIBox/item.py
|
yemenPython/TheBossBaby
|
86e447b3fa360646a179e62c70b1750da5427541
|
[
"MIT"
] | null | null | null |
UIBox/item.py
|
yemenPython/TheBossBaby
|
86e447b3fa360646a179e62c70b1750da5427541
|
[
"MIT"
] | 1
|
2021-12-16T16:31:35.000Z
|
2021-12-16T16:31:35.000Z
|
#!/usr/bin/python3
from PyQt5 import QtCore, QtGui, QtWidgets
| 36.632911
| 104
| 0.653766
|
#!/usr/bin/python3
from PyQt5 import QtCore, QtGui, QtWidgets
class UIBUi_Item(QtWidgets.QWidget):
def __init__(self):
super(UIBUi_Item, self).__init__()
self.setObjectName("Form")
self.setMouseTracking(True)
self.gridLayout_2 = QtWidgets.QGridLayout(self)
self.gridLayout_2.setSpacing(0)
self.gridLayout_2.setContentsMargins(0, 0, -2, 0)
self.gridLayout = QtWidgets.QGridLayout()
self.gridLayout.setVerticalSpacing(0)
self.title = QtWidgets.QLabel(self)
font = QtGui.QFont()
font.setPointSize(11)
self.title.setFont(font)
self.title.setText("")
self.title.setObjectName("title")
self.gridLayout.addWidget(self.title, 0, 1, 1, 1)
self.subtitle = QtWidgets.QLabel(self)
font = QtGui.QFont()
font.setPointSize(7)
self.subtitle.setFont(font)
self.subtitle.setText("")
self.subtitle.setObjectName("subtitle")
self.gridLayout.addWidget(self.subtitle, 1, 1, 1, 1)
self.subtitle.setLayoutDirection(QtCore.Qt.LeftToRight)
self.subtitle.setAlignment(QtCore.Qt.AlignLeft)
self.image = QtWidgets.QLabel(self)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.image.sizePolicy().hasHeightForWidth())
self.image.setSizePolicy(sizePolicy)
self.image.setText("")
self.image.setObjectName("image")
self.gridLayout.addWidget(self.image, 0, 0, 2, 1)
self.hotkey = QtWidgets.QLabel(self)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.hotkey.sizePolicy().hasHeightForWidth())
self.hotkey.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(8)
self.hotkey.setFont(font)
self.hotkey.setText("")
self.hotkey.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.hotkey.setObjectName("hotkey")
self.gridLayout.addWidget(self.hotkey, 0, 2, 2, 1)
self.gridLayout_2.addLayout(self.gridLayout, 0, 0, 1, 1)
self.retranslateUi()
QtCore.QMetaObject.connectSlotsByName(self)
self.setStyleSheet("""
#subtitle {
padding-left: 3px;
font-size: 11px;
color: #929a90;
}
#hotkey {
padding-left: 5px;
}
#title {padding-left: 2px}
""")
def retranslateUi(self):
_translate = QtCore.QCoreApplication.translate
self.setWindowTitle(_translate("self", "self"))
| 2,740
| 15
| 76
|
64abab9832222fe0a8a6331f331b74645977e71e
| 5,313
|
py
|
Python
|
sentenceSpliter.py
|
neerbek/taboo-mon
|
3dc74fd38b112531db8b5f696ed92b894cc0a3b3
|
[
"MIT"
] | 1
|
2021-05-10T00:27:10.000Z
|
2021-05-10T00:27:10.000Z
|
sentenceSpliter.py
|
neerbek/taboo-mon
|
3dc74fd38b112531db8b5f696ed92b894cc0a3b3
|
[
"MIT"
] | null | null | null |
sentenceSpliter.py
|
neerbek/taboo-mon
|
3dc74fd38b112531db8b5f696ed92b894cc0a3b3
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on April 12, 2018
@author: neerbek
"""
import os
import getopt
import sys
import io
import monUtil
import monsantoData
import textClean
import server_rnn_helper
from similarity import load_trees
from importlib import reload
reload(textClean)
# from backends import PDFMinerBackend
datadir = "../data_monsanto/2018-04-12-workdir"
filename = "monsantoDataEntries.json"
startindex = 0
endindex = -1
outfile = "indexedsentences.txt"
# parse input
argv = sys.argv[1:] # first arg is filename
# argv = "-s 0 -e -1 -o jantmp_trees.txt".split()
try:
opts, args = getopt.getopt(argv, "hd:l:s:e:o:", ["help", "datadir=", "labelfile=", "start=", "end=", "outfile="])
except getopt.GetoptError:
usage(exitCode=2)
for opt, arg in opts:
if opt in ("-h", "--help"):
usage(exitCode=0)
elif opt in ("-d", "--datadir"):
datadir = arg
elif opt in ("-l", "--labelfile"):
filename = arg
elif opt in ("-s", "--start"):
startindex = int(arg)
elif opt in ("-e", "--end"):
endindex = int(arg)
elif opt in ("-o", "--outfile"):
outfile = arg
labelMap = {}
for i, label in enumerate(monsantoData.labelList):
labelMap[label] = i
docs = monUtil.loadJSONList(filename, monUtil.MonsantoDocument.dictToMonsantoDocument)
for d in docs:
d.uri = os.path.join(datadir, d.uri[:-4] + ".txt")
d.label = labelMap[d.label]
# want to address
# non-ascii
# sentences split over several lines (with newlines)
# monIds in text
# non-informative lines (maybe later)
# save with docId (monId?) and with lineNumber and sentenceNumber
# I guess to save in file next to datafile with extension .sentences (might cause problems on windows)
# watch out for very long or very short sentences
cleanDocs = []
for i, doc in enumerate(docs):
doc = docs[i]
doc.uri = doc.uri.replace("\ ", " ") # don't escape for io.open
doc.uri = doc.uri.replace("\$", "$") # don't escape for io.open
with io.open(doc.uri, 'r', encoding='utf8') as f:
lines = f.readlines()
cleaner = textClean.TextCleaner()
res = []
for j, line in enumerate(lines):
before = line
line = cleaner.cleanLine(line)
if len(line) != 0:
res.append(line)
cleanDocs.append(res)
if len(res) == 0:
print("INFO: received empty doc: ", i, doc.uri)
# if i % 10 == 0:
# print("INFO: done processing doc", i)
indexedSentenceList = []
for i, text in enumerate(cleanDocs):
sentences = server_rnn_helper.get_indexed_sentences("\n".join(text))
for sentence in sentences:
sentence.sentence = monUtil.removeNewline(sentence.sentence) # remove \n introduced above, not handled by server_rnn_helper.get_nltk_trees below
sentence.sentence = monUtil.removeApostrof(sentence.sentence) # remove "'" not handled by server_rnn_helper.get_nltk_trees below
sentence.sentence = monUtil.removeMultiCommas(sentence.sentence) # remove multiple commas
indexedSentenceList.append(sentences)
# i = 0
# for indexedSentence in indexedSentenceList[i]:
# print(indexedSentence)
# count = 0
# for sentences in indexedSentenceList:
# count += len(sentences)
# print(count)
# counts = []
# for sentences in indexedSentenceList:
# for sentence in sentences:
# counts.append(len(sentence.sentence))
# len(counts)
# import numpy
# data = numpy.array(counts)
# print(max(counts))
# bins = numpy.array([5, 20, 75, 125, 300, 500, 1000])
# classes = numpy.digitize(data, bins)
# unique, counts = numpy.unique(classes, return_counts=True)
# print(dict(zip(unique, counts)))
# # {0: 1175, 1: 1122, 2: 2428, 3: 2062, 4: 3165, 5: 573, 6: 195, 7: 54}
# count = 0
# val = 1
# for c in counts:
# if c == val:
# count += 1
# print("{}:".format(val), count)
treeList = []
if endindex == -1:
endindex = len(indexedSentenceList)
endindex = min(endindex, len(indexedSentenceList))
for i in range(startindex, endindex):
indexSentences = indexedSentenceList[i]
print("Working on doc {}/{} ({}-{})".format(i, len(indexedSentenceList), startindex, endindex))
trees = server_rnn_helper.get_nltk_trees(i, indexedSentenceList[i])
treeList.append(trees)
if len(trees) == 0:
print("WARN: trees were empty", i, docs[i].uri)
trees = []
for i in range(startindex, endindex):
doc = docs[i]
indexSentences = indexedSentenceList[i]
for indexSentence in indexSentences:
tree = indexSentence.tree
if tree is None:
continue # empty tree
n = load_trees.Node()
n.syntax = "{}".format(indexSentence.beginIndex)
n.word = doc.monsantoId
n.word = n.word.replace(" ", "%20")
tree.parent = n
n.left = tree
nRight = n.add_child()
nRight.syntax = "{}".format(doc.label)
nRight.word = doc.uri[len(datadir) + 1:-4]
nRight.word = nRight.word.replace(" ", "%20")
trees.append(n)
load_trees.put_trees(outfile, trees)
# TODO: add timestamp here so we can track when the process terminated
print("done. saved {} trees to {}".format(len(trees), outfile))
| 31.070175
| 153
| 0.652174
|
# -*- coding: utf-8 -*-
"""
Created on April 12, 2018
@author: neerbek
"""
import os
import getopt
import sys
import io
import monUtil
import monsantoData
import textClean
import server_rnn_helper
from similarity import load_trees
from importlib import reload
reload(textClean)
# from backends import PDFMinerBackend
datadir = "../data_monsanto/2018-04-12-workdir"
filename = "monsantoDataEntries.json"
startindex = 0
endindex = -1
outfile = "indexedsentences.txt"
def usage(exitCode=0):
print('sentenceSplitter.py [-o <outputfile>] [-d datadir] [-l <labelfile>] [-s <startindex>] [-e <endindex] [-h]')
sys.exit(exitCode)
# parse input
argv = sys.argv[1:] # first arg is filename
# argv = "-s 0 -e -1 -o jantmp_trees.txt".split()
try:
opts, args = getopt.getopt(argv, "hd:l:s:e:o:", ["help", "datadir=", "labelfile=", "start=", "end=", "outfile="])
except getopt.GetoptError:
usage(exitCode=2)
for opt, arg in opts:
if opt in ("-h", "--help"):
usage(exitCode=0)
elif opt in ("-d", "--datadir"):
datadir = arg
elif opt in ("-l", "--labelfile"):
filename = arg
elif opt in ("-s", "--start"):
startindex = int(arg)
elif opt in ("-e", "--end"):
endindex = int(arg)
elif opt in ("-o", "--outfile"):
outfile = arg
labelMap = {}
for i, label in enumerate(monsantoData.labelList):
labelMap[label] = i
docs = monUtil.loadJSONList(filename, monUtil.MonsantoDocument.dictToMonsantoDocument)
for d in docs:
d.uri = os.path.join(datadir, d.uri[:-4] + ".txt")
d.label = labelMap[d.label]
# want to address
# non-ascii
# sentences split over several lines (with newlines)
# monIds in text
# non-informative lines (maybe later)
# save with docId (monId?) and with lineNumber and sentenceNumber
# I guess to save in file next to datafile with extension .sentences (might cause problems on windows)
# watch out for very long or very short sentences
cleanDocs = []
for i, doc in enumerate(docs):
doc = docs[i]
doc.uri = doc.uri.replace("\ ", " ") # don't escape for io.open
doc.uri = doc.uri.replace("\$", "$") # don't escape for io.open
with io.open(doc.uri, 'r', encoding='utf8') as f:
lines = f.readlines()
cleaner = textClean.TextCleaner()
res = []
for j, line in enumerate(lines):
before = line
line = cleaner.cleanLine(line)
if len(line) != 0:
res.append(line)
cleanDocs.append(res)
if len(res) == 0:
print("INFO: received empty doc: ", i, doc.uri)
# if i % 10 == 0:
# print("INFO: done processing doc", i)
indexedSentenceList = []
for i, text in enumerate(cleanDocs):
sentences = server_rnn_helper.get_indexed_sentences("\n".join(text))
for sentence in sentences:
sentence.sentence = monUtil.removeNewline(sentence.sentence) # remove \n introduced above, not handled by server_rnn_helper.get_nltk_trees below
sentence.sentence = monUtil.removeApostrof(sentence.sentence) # remove "'" not handled by server_rnn_helper.get_nltk_trees below
sentence.sentence = monUtil.removeMultiCommas(sentence.sentence) # remove multiple commas
indexedSentenceList.append(sentences)
# i = 0
# for indexedSentence in indexedSentenceList[i]:
# print(indexedSentence)
# count = 0
# for sentences in indexedSentenceList:
# count += len(sentences)
# print(count)
# counts = []
# for sentences in indexedSentenceList:
# for sentence in sentences:
# counts.append(len(sentence.sentence))
# len(counts)
# import numpy
# data = numpy.array(counts)
# print(max(counts))
# bins = numpy.array([5, 20, 75, 125, 300, 500, 1000])
# classes = numpy.digitize(data, bins)
# unique, counts = numpy.unique(classes, return_counts=True)
# print(dict(zip(unique, counts)))
# # {0: 1175, 1: 1122, 2: 2428, 3: 2062, 4: 3165, 5: 573, 6: 195, 7: 54}
# count = 0
# val = 1
# for c in counts:
# if c == val:
# count += 1
# print("{}:".format(val), count)
treeList = []
if endindex == -1:
endindex = len(indexedSentenceList)
endindex = min(endindex, len(indexedSentenceList))
for i in range(startindex, endindex):
indexSentences = indexedSentenceList[i]
print("Working on doc {}/{} ({}-{})".format(i, len(indexedSentenceList), startindex, endindex))
trees = server_rnn_helper.get_nltk_trees(i, indexedSentenceList[i])
treeList.append(trees)
if len(trees) == 0:
print("WARN: trees were empty", i, docs[i].uri)
trees = []
for i in range(startindex, endindex):
doc = docs[i]
indexSentences = indexedSentenceList[i]
for indexSentence in indexSentences:
tree = indexSentence.tree
if tree is None:
continue # empty tree
n = load_trees.Node()
n.syntax = "{}".format(indexSentence.beginIndex)
n.word = doc.monsantoId
n.word = n.word.replace(" ", "%20")
tree.parent = n
n.left = tree
nRight = n.add_child()
nRight.syntax = "{}".format(doc.label)
nRight.word = doc.uri[len(datadir) + 1:-4]
nRight.word = nRight.word.replace(" ", "%20")
trees.append(n)
load_trees.put_trees(outfile, trees)
# TODO: add timestamp here so we can track when the process terminated
print("done. saved {} trees to {}".format(len(trees), outfile))
| 143
| 0
| 23
|
c11bc4ddc779cc37ccba80a70677f0703b4a7cf3
| 7,182
|
py
|
Python
|
scoring/fields.py
|
stkrizh/otus
|
6bff5cf62661bb7bab6eac6cb563a63a52a56423
|
[
"MIT"
] | 1
|
2021-07-25T11:11:55.000Z
|
2021-07-25T11:11:55.000Z
|
scoring/fields.py
|
stkrizh/otus
|
6bff5cf62661bb7bab6eac6cb563a63a52a56423
|
[
"MIT"
] | 3
|
2020-03-24T17:33:29.000Z
|
2021-08-23T20:20:31.000Z
|
scoring/fields.py
|
stkrizh/otus
|
6bff5cf62661bb7bab6eac6cb563a63a52a56423
|
[
"MIT"
] | null | null | null |
import datetime as dt
import re
class Field(object):
"""Base class for fields validation.
Attributes
----------
allowed_type : Optional[type or Tuple[type]]
Allowed field's type.
Parameters
----------
label: Optional[unicode]
Name of the field.
required : bool
Raise a `ValidationError` if the field value is `None`.
False by default.
nullable : bool
Set this to `True` to consider nullable values as valid ones.
True by default.
"""
allowed_type = None
choices = None
@staticmethod
def is_nullable(value):
"""Check nullability of the value.
Parameters
----------
value : Any
Actual field value.
Returns
-------
bool
`True` if `value` may be considered as a nullable,
`False` otherwise.
"""
return not bool(value)
def clean(self, value):
"""Validate the given value and return its "cleaned" value.
Raise ValidationError for any errors.
Parameters
----------
value : Any
Actual field value.
Raises
------
ValidationError
If validation does not succeed.
Returns
-------
result : ValidationResult
"""
if value is None:
if self.required:
err = u"Field `{}` is required."
raise ValidationError(err.format(self.label))
return value
if isinstance(self.allowed_type, (type, tuple)):
if not isinstance(value, self.allowed_type):
err = u"Field `{}` must be an instance of `{}` type / types."
err = err.format(self.label, self.allowed_type)
raise ValidationError(err)
if self.is_nullable(value):
if not self.nullable:
err = u"Field `{}` may not be nullable."
raise ValidationError(err.format(self.label))
return value
if self.choices:
if value not in self.choices:
err = u"Invalid value for field `{}`. Choices are: `{}`."
choices = ", ".join(str(item) for item in self.choices)
raise ValidationError(err.format(self.label, choices))
return value
return self.validate(value)
def validate(self, value):
"""Additional validation of non-empty value.
"""
return value
class CharField(Field):
"""Represents a string.
Parameters
----------
max_len: int
Max length of the string.
+ from Field
"""
allowed_type = unicode, str
class RegexField(CharField):
""" Represents a string that match specified pattern.
Parameters
----------
patter: Optional[unicode]
Regular expression pattern.
+ from CharField
"""
pattern = r".*"
error_message = u"Field `{}` doesn't match `{}` pattern."
class ArgumentsField(Field):
"""Represents a dictionary.
"""
allowed_type = dict
class EmailField(RegexField):
"""Represents an email address.
"""
pattern = r"^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$"
error_message = u"Field `{}` is not a valid email address."
class PhoneField(RegexField):
"""Represents a phone number.
"""
allowed_type = unicode, str, int
pattern = r"^7\d{10}$"
error_message = u"Field `{}` is not a valid phone number."
@staticmethod
class DateField(Field):
"""Represents a date in `DD.MM. YYYY` format.
"""
allowed_type = unicode, str
class BirthDayField(DateField):
"""Represents a date of birth in `DD.MM. YYYY` format.
"""
class GenderField(Field):
"""Represents a gender.
"""
UNKNOWN = 0
MALE = 1
FEMALE = 2
GENDERS = {UNKNOWN: u"unknown", MALE: u"male", FEMALE: u"female"}
allowed_type = unicode, str, int
choices = GENDERS
@staticmethod
class ClientIDsField(Field):
"""Represents a non-empty list of integers.
"""
allowed_type = list
| 25.378092
| 79
| 0.574353
|
import datetime as dt
import re
class ValidationError(Exception):
pass
class Field(object):
"""Base class for fields validation.
Attributes
----------
allowed_type : Optional[type or Tuple[type]]
Allowed field's type.
Parameters
----------
label: Optional[unicode]
Name of the field.
required : bool
Raise a `ValidationError` if the field value is `None`.
False by default.
nullable : bool
Set this to `True` to consider nullable values as valid ones.
True by default.
"""
allowed_type = None
choices = None
def __init__(
self, label=None, required=True, nullable=False, choices=None
):
self.label = label
self.required = required
self.nullable = nullable
self.choices = choices if choices is not None else self.choices
@staticmethod
def is_nullable(value):
"""Check nullability of the value.
Parameters
----------
value : Any
Actual field value.
Returns
-------
bool
`True` if `value` may be considered as a nullable,
`False` otherwise.
"""
return not bool(value)
def clean(self, value):
"""Validate the given value and return its "cleaned" value.
Raise ValidationError for any errors.
Parameters
----------
value : Any
Actual field value.
Raises
------
ValidationError
If validation does not succeed.
Returns
-------
result : ValidationResult
"""
if value is None:
if self.required:
err = u"Field `{}` is required."
raise ValidationError(err.format(self.label))
return value
if isinstance(self.allowed_type, (type, tuple)):
if not isinstance(value, self.allowed_type):
err = u"Field `{}` must be an instance of `{}` type / types."
err = err.format(self.label, self.allowed_type)
raise ValidationError(err)
if self.is_nullable(value):
if not self.nullable:
err = u"Field `{}` may not be nullable."
raise ValidationError(err.format(self.label))
return value
if self.choices:
if value not in self.choices:
err = u"Invalid value for field `{}`. Choices are: `{}`."
choices = ", ".join(str(item) for item in self.choices)
raise ValidationError(err.format(self.label, choices))
return value
return self.validate(value)
def validate(self, value):
"""Additional validation of non-empty value.
"""
return value
def __get__(self, instance, owner=None):
if instance is None:
return self
if self.label is None:
raise ValueError(
u"Label of an instance of `Field` class may not be None."
)
return instance.__dict__[self.label]
def __set__(self, instance, value):
if self.label is None:
raise ValueError(
u"Label of an instance of `Field` class may not be None."
)
cleaned = self.clean(value)
instance.__dict__[self.label] = cleaned
class CharField(Field):
"""Represents a string.
Parameters
----------
max_len: int
Max length of the string.
+ from Field
"""
allowed_type = unicode, str
def __init__(self, max_len=128, **kwargs):
super(CharField, self).__init__(**kwargs)
self.max_len = max_len
def validate(self, value):
if len(value) > self.max_len:
err = u"Field `{}` must contain less than {} characters."
raise ValidationError(err.format(self.label, self.max_len))
return value
class RegexField(CharField):
""" Represents a string that match specified pattern.
Parameters
----------
patter: Optional[unicode]
Regular expression pattern.
+ from CharField
"""
pattern = r".*"
error_message = u"Field `{}` doesn't match `{}` pattern."
def __init__(self, pattern=None, **kwargs):
super(RegexField, self).__init__(**kwargs)
self.pattern = self.pattern if pattern is None else pattern
self.compiled_pattern = re.compile(self.pattern)
def validate(self, value):
value = super(RegexField, self).validate(value)
if not self.compiled_pattern.match(value):
raise ValidationError(
self.error_message.format(self.label, self.pattern)
)
return value
class ArgumentsField(Field):
"""Represents a dictionary.
"""
allowed_type = dict
class EmailField(RegexField):
"""Represents an email address.
"""
pattern = r"^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$"
error_message = u"Field `{}` is not a valid email address."
class PhoneField(RegexField):
"""Represents a phone number.
"""
allowed_type = unicode, str, int
pattern = r"^7\d{10}$"
error_message = u"Field `{}` is not a valid phone number."
@staticmethod
def is_nullable(value):
return not bool(str(value))
def validate(self, value):
value = str(value)
return super(PhoneField, self).validate(value)
class DateField(Field):
"""Represents a date in `DD.MM. YYYY` format.
"""
allowed_type = unicode, str
def validate(self, value):
try:
date = dt.datetime.strptime(value, "%d.%m.%Y")
except ValueError:
err = u"Field `{}` doesn't match date format `DD.MM.YYYY`."
raise ValidationError(err.format(self.label))
return date
class BirthDayField(DateField):
"""Represents a date of birth in `DD.MM. YYYY` format.
"""
def validate(self, value):
date = super(BirthDayField, self).validate(value)
now = dt.datetime.now()
if date < now - dt.timedelta(days=(365.25 * 70)):
err = u"Field `{}` is not a valid birthday."
raise ValidationError(err.format(self.label))
if date > now:
err = u"Field `{}` is not a valid birthday."
raise ValidationError(err.format(self.label))
return date
class GenderField(Field):
"""Represents a gender.
"""
UNKNOWN = 0
MALE = 1
FEMALE = 2
GENDERS = {UNKNOWN: u"unknown", MALE: u"male", FEMALE: u"female"}
allowed_type = unicode, str, int
choices = GENDERS
@staticmethod
def is_nullable(value):
return not bool(unicode(value))
class ClientIDsField(Field):
"""Represents a non-empty list of integers.
"""
allowed_type = list
def validate(self, value):
if all(isinstance(item, int) and item >= 0 for item in value):
unique = list(set(value))
if unique:
return unique
err = u"Field `{}` must be a non-empty list with non-negative integers"
raise ValidationError(err.format(self.label))
| 2,662
| 21
| 372
|
b2dc8eb7563fbeae7c3260ea4983e67be1ebdf31
| 974
|
py
|
Python
|
Ex028.py
|
andrade-lcs/ex_curso_em_video_python
|
f2d029efe7a20cdf0fcb5b602f9992e27d37c263
|
[
"MIT"
] | null | null | null |
Ex028.py
|
andrade-lcs/ex_curso_em_video_python
|
f2d029efe7a20cdf0fcb5b602f9992e27d37c263
|
[
"MIT"
] | null | null | null |
Ex028.py
|
andrade-lcs/ex_curso_em_video_python
|
f2d029efe7a20cdf0fcb5b602f9992e27d37c263
|
[
"MIT"
] | null | null | null |
#t=int(input('Quantos anos tem seu carro?'))
#if t <=3:
# print('Carro novo')
#else:
# print('Carro velho')
#print('Carro novo' if t<=3 else 'Carro velho')
#print('fim')
# print('Que nome lindo')
#print('Bom dia, {}'.format(n))
#print('fim')
#_____________________________
import random
from time import sleep
c = 0
while c != 1:
print('O computador escolherá um númeor entre 1 e 6')
a = int(random.randint(1,6))
b = int(input('Agora vc deve escolher um número entre 1 e 6: '))
print('-='*20)
sleep(2)
if a==b:
print('Você escolheu o número que o computador escolheu')
print('Você ganhou o jogo!')
else:
print('você escolheu o número errado')
print('O número escolhido pelo computador foi {:.0f}, e o seu {:.0f}'.format(a,b))
print('O computador ganhou o jogo')
print('-='*20)
sleep(2)
c = int(input('Para continuar digite 0 e para sair digite 1: '))
print(' ')
print('Fim do jogo')
| 30.4375
| 90
| 0.616016
|
#t=int(input('Quantos anos tem seu carro?'))
#if t <=3:
# print('Carro novo')
#else:
# print('Carro velho')
#print('Carro novo' if t<=3 else 'Carro velho')
#print('fim')
# print('Que nome lindo')
#print('Bom dia, {}'.format(n))
#print('fim')
#_____________________________
import random
from time import sleep
c = 0
while c != 1:
print('O computador escolherá um númeor entre 1 e 6')
a = int(random.randint(1,6))
b = int(input('Agora vc deve escolher um número entre 1 e 6: '))
print('-='*20)
sleep(2)
if a==b:
print('Você escolheu o número que o computador escolheu')
print('Você ganhou o jogo!')
else:
print('você escolheu o número errado')
print('O número escolhido pelo computador foi {:.0f}, e o seu {:.0f}'.format(a,b))
print('O computador ganhou o jogo')
print('-='*20)
sleep(2)
c = int(input('Para continuar digite 0 e para sair digite 1: '))
print(' ')
print('Fim do jogo')
| 0
| 0
| 0
|
eb225baa26daf67586d587e4ced2d1f3464104dc
| 5,232
|
py
|
Python
|
daluke/plot/plot_finetune_ner.py
|
peleiden/daluke
|
d2c85ba6b80021b2959b369381c447d18b058576
|
[
"MIT"
] | 10
|
2021-07-06T08:31:45.000Z
|
2021-12-17T09:13:33.000Z
|
daluke/plot/plot_finetune_ner.py
|
peleiden/daLUKE
|
d2c85ba6b80021b2959b369381c447d18b058576
|
[
"MIT"
] | 70
|
2021-03-26T13:30:39.000Z
|
2021-06-10T15:06:36.000Z
|
daluke/plot/plot_finetune_ner.py
|
peleiden/daluke
|
d2c85ba6b80021b2959b369381c447d18b058576
|
[
"MIT"
] | null | null | null |
from __future__ import annotations
import os
import click
from pelutils.logger import log
from pelutils.ds.plot import figsize_std, tab_colours, update_rc_params, rc_params
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
import numpy as np
from daluke.ner.training import TrainResults
from daluke.plot import running_avg
update_rc_params(rc_params)
@click.command()
@click.argument("location")
if __name__ == "__main__":
with log.log_errors:
make_finetuning_plots()
| 37.106383
| 183
| 0.661888
|
from __future__ import annotations
import os
import click
from pelutils.logger import log
from pelutils.ds.plot import figsize_std, tab_colours, update_rc_params, rc_params
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
import numpy as np
from daluke.ner.training import TrainResults
from daluke.plot import running_avg
update_rc_params(rc_params)
def loss_plot(location: str):
res = TrainResults.load()
batches_per_epoch = len(res.losses) // (res.epoch+1)
_, ax1 = plt.subplots(figsize=figsize_std)
res.losses
# Loss axis
x = np.arange(len(res.losses)) + 1
ax1.semilogy(x/batches_per_epoch, res.losses, color="gray", alpha=0.3)
x, y = running_avg(x, res.losses, neighbors=10)
ax1.semilogy(x/batches_per_epoch, y, color=tab_colours[0], label="Loss (Rolling Avg.)")
ax1.set_xlabel("Epoch")
ax1.set_ylabel("Cross Entropy Loss")
h, l = ax1.get_legend_handles_labels()
# Accuracy axis
if res.running_dev_evaluations:
ax2 = ax1.twinx()
x = range(len(res.running_train_statistics)+1)
ax2.plot(
x, 100*np.array([0]+[e.statistics["micro avg"]["f1-score"] for e in res.running_dev_evaluations]),
color=tab_colours[1], ms=15, label="Dev. Set F1", marker=".", lw=3,
)
ax2.plot(
x, 100*np.array([0]+[s["micro avg"]["f1-score"] for s in res.running_train_statistics]),
color=tab_colours[2], ms=15, label="Training Set F1", marker=".", lw=3,
)
ax2.set_ylim([0, 110])
ax2.set_ylabel("Micro Avg. F1 [%]")
h2, l2 = ax2.get_legend_handles_labels()
h += h2
l += l2
ax1.legend(h, l, loc="lower right", framealpha=1, edgecolor=(0, 0, 0, 1))
ax1.set_title("NER Fine-tuning of DaLUKE")
plt.grid()
plt.tight_layout()
plt.savefig(os.path.join(location, "finetune-plots", "loss.png"))
plt.close()
def running_f1_detail_plot(location: str):
res = TrainResults.load()
if not res.running_train_statistics:
return
_, ax = plt.subplots(figsize=figsize_std)
x = range(len(res.running_train_statistics)+1)
cols = iter(tab_colours)
train_stats = [(0,0,0), *[(s["micro avg"]["f1-score"], s["micro avg"]["precision"], s["micro avg"]["recall"]) for s in res.running_train_statistics]]
dev_stats = [(0,0,0), *[(e.statistics["micro avg"]["f1-score"], e.statistics["micro avg"]["precision"], e.statistics["micro avg"]["recall"]) for e in res.running_dev_evaluations]]
for stats, name in zip((train_stats, dev_stats), ("Training Set", "Dev. Set")):
ax.plot(x, [100*f1_score for f1_score, _, _ in stats], color=next(cols), linewidth=3, ms=15, marker=".", label=f"{name}")
ax.set_xlabel("Epoch")
ax.set_ylabel("Micro Avg. F1 [%]")
ax.set_xlim(left=0)
ax.set_ylim([0, 110])
plt.title("Running Performance of Fine-tuning")
ax.legend()
plt.grid()
plt.tight_layout()
plt.savefig(os.path.join(location, "finetune-plots", "f1s.png"))
plt.close()
def _do_prediction_distribution_plot(location: str, true_type_distribution: dict[str, int], pred_distributions: list[dict[str, int]], dataset: str):
types = sorted(list(set(true_type_distribution.keys()) - {"O"}))
type_sequences = {t: list() for t in types}
for dist in pred_distributions:
for t in types:
type_sequences[t].append(dist.get(t, 0))
_, ax = plt.subplots(figsize=figsize_std)
x = np.arange(1, len(pred_distributions)+1)
for i, t in enumerate(types):
ax.plot(
x,
type_sequences[t],
label=f"{t} predictions",
color=tab_colours[i],
linewidth=2,
marker=".",
markersize=20,
)
ax.axhline(y=true_type_distribution[t], color=tab_colours[i], linestyle="--", alpha=.8, lw=2)
h, l = ax.get_legend_handles_labels()
h += [Line2D([0], [0], color="black", linestyle="--")]
l += ["True annotation counts"]
ax.legend(h, l)
ax.set_xlim(left=0)
ax.set_ylim(bottom=0)
ax.set_xlabel("Epoch")
ax.set_ylabel("# Spans Predicted")
ax.set_title(f"Entity Predictions on {dataset.capitalize()}. Set During Fine-tuning")
plt.grid()
plt.tight_layout()
plt.savefig(os.path.join(location, "finetune-plots", f"{dataset}-dists.png"))
plt.close()
def prediction_distribution_plots(location: str):
res = TrainResults.load()
_do_prediction_distribution_plot(location, res.dev_true_type_distribution, res.dev_pred_distributions, "dev")
_do_prediction_distribution_plot(location, res.train_true_type_distribution, res.train_pred_distributions, "train")
@click.command()
@click.argument("location")
def make_finetuning_plots(location: str):
log.configure(os.path.join(location, "finetune-plots", "finetune-plot.log"), "Training plot of NER fine-tune")
TrainResults.subfolder = location
log("Loss and accuracy plot")
loss_plot(location)
log("Prediction distribution plots")
prediction_distribution_plots(location)
log("Detailed plot of running F1's")
running_f1_detail_plot(location)
if __name__ == "__main__":
with log.log_errors:
make_finetuning_plots()
| 4,613
| 0
| 114
|
06fdf5dfdb4933696507f332998511229cae25be
| 833
|
py
|
Python
|
VSCode_work/chapter10/chapter10_10_10.py
|
yangyahu-1994/Python-Crash-Course
|
6f8ef7fe8466d88931a0d3cc423ba5d966663b9d
|
[
"MIT"
] | 12
|
2020-10-22T14:03:27.000Z
|
2022-03-28T08:14:22.000Z
|
VSCode_work/chapter10/chapter10_10_10.py
|
yangyahu-1994/Python-Crash-Course
|
6f8ef7fe8466d88931a0d3cc423ba5d966663b9d
|
[
"MIT"
] | null | null | null |
VSCode_work/chapter10/chapter10_10_10.py
|
yangyahu-1994/Python-Crash-Course
|
6f8ef7fe8466d88931a0d3cc423ba5d966663b9d
|
[
"MIT"
] | 9
|
2020-12-22T10:22:12.000Z
|
2022-03-28T08:14:53.000Z
|
# 定义函数
def count_numbers(filename):
"""获取指定的文件,并计算单词'the'在每个文件中出现的次数"""
try:
with open(filename) as file_object:
contents = file_object.read()
except FileNotFoundError:
print(f"Sorry, the file {filename} does not exits.")
else:
# 计算出现的次数
numbers = contents.lower().count('the')
print(f"The word 'the' appears {numbers} times in file {filename}.")
# 获取文件
filenames = ['/home/yyh/Documents/Python_Crash_Course/Python-Crash-Course/VSCode_work/chapter10/data/63558-0.txt',
'/home/yyh/Documents/Python_Crash_Course/Python-Crash-Course/VSCode_work/chapter10/data/63559-0.txt',
'/home/yyh/Documents/Python_Crash_Course/Python-Crash-Course/VSCode_work/chapter10/data/63560-0.txt']
for filename in filenames:
count_numbers(filename)
| 41.65
| 115
| 0.683073
|
# 定义函数
def count_numbers(filename):
"""获取指定的文件,并计算单词'the'在每个文件中出现的次数"""
try:
with open(filename) as file_object:
contents = file_object.read()
except FileNotFoundError:
print(f"Sorry, the file {filename} does not exits.")
else:
# 计算出现的次数
numbers = contents.lower().count('the')
print(f"The word 'the' appears {numbers} times in file {filename}.")
# 获取文件
filenames = ['/home/yyh/Documents/Python_Crash_Course/Python-Crash-Course/VSCode_work/chapter10/data/63558-0.txt',
'/home/yyh/Documents/Python_Crash_Course/Python-Crash-Course/VSCode_work/chapter10/data/63559-0.txt',
'/home/yyh/Documents/Python_Crash_Course/Python-Crash-Course/VSCode_work/chapter10/data/63560-0.txt']
for filename in filenames:
count_numbers(filename)
| 0
| 0
| 0
|
01db368e89b0bf8334c4f381333f9897fa93f294
| 793
|
py
|
Python
|
apps/article/tests/article/test_article_list.py
|
magocod/django_repository
|
660664ba2321499e92c3c5c23719756db2569e90
|
[
"MIT"
] | 1
|
2019-10-01T01:39:29.000Z
|
2019-10-01T01:39:29.000Z
|
apps/article/tests/article/test_article_list.py
|
magocod/django_repository
|
660664ba2321499e92c3c5c23719756db2569e90
|
[
"MIT"
] | 7
|
2019-12-04T21:40:40.000Z
|
2020-06-26T21:49:51.000Z
|
apps/article/tests/article/test_article_list.py
|
magocod/django_repository
|
660664ba2321499e92c3c5c23719756db2569e90
|
[
"MIT"
] | 1
|
2020-04-08T02:46:31.000Z
|
2020-04-08T02:46:31.000Z
|
# standard library
import json
# local Django
from apps.article.models import Article
from apps.article.serializers.article import ArticleHeavySerializer
from apps.tests.fixtures import RepositoryTestCase
class ListArticleTest(RepositoryTestCase):
"""
...
"""
serializer = ArticleHeavySerializer
| 29.37037
| 71
| 0.727617
|
# standard library
import json
# local Django
from apps.article.models import Article
from apps.article.serializers.article import ArticleHeavySerializer
from apps.tests.fixtures import RepositoryTestCase
class ListArticleTest(RepositoryTestCase):
"""
...
"""
serializer = ArticleHeavySerializer
def test_get_all(self):
response = self.admin_client.get("/api/articles/")
response_data = json.loads(response.content)
serializer = self.serializer(Article.objects.all(), many=True,)
self.assertEqual(response.status_code, 200)
self.assertEqual(serializer.data, response_data)
def test_get_all_authenticated(self):
response = self.public_client.get("/api/articles/")
self.assertEqual(response.status_code, 401)
| 423
| 0
| 54
|
93787bb0da83949daeb1c9f0fd9a21dd757d69bd
| 275
|
py
|
Python
|
ai/games/random_ai_game.py
|
JonKruger/checkers
|
8cc1390fcb69c1af15ac740fb3321aea7357f5d0
|
[
"MIT"
] | null | null | null |
ai/games/random_ai_game.py
|
JonKruger/checkers
|
8cc1390fcb69c1af15ac740fb3321aea7357f5d0
|
[
"MIT"
] | null | null | null |
ai/games/random_ai_game.py
|
JonKruger/checkers
|
8cc1390fcb69c1af15ac740fb3321aea7357f5d0
|
[
"MIT"
] | null | null | null |
from ai.players.random_ai_player import RandomAIPlayer
from ai.games.ai_game import AIGame
from checkers.game import Game
import random
| 30.555556
| 70
| 0.781818
|
from ai.players.random_ai_player import RandomAIPlayer
from ai.games.ai_game import AIGame
from checkers.game import Game
import random
class RandomAIGame(AIGame):
def __init__(self, verbose=False):
super().__init__(RandomAIPlayer(), RandomAIPlayer(), verbose)
| 83
| 6
| 50
|
e3fac1b3c3b5a1717c6d26e6cb738d1ed4d5e6c2
| 8,281
|
py
|
Python
|
syne_tune/optimizer/schedulers/transfer_learning/zero_shot.py
|
awslabs/syne-tune
|
1dd8e157477b86db01047a9a7821780ea04389bc
|
[
"ECL-2.0",
"Apache-2.0"
] | 97
|
2021-11-18T17:14:30.000Z
|
2022-03-29T00:33:12.000Z
|
syne_tune/optimizer/schedulers/transfer_learning/zero_shot.py
|
awslabs/syne-tune
|
1dd8e157477b86db01047a9a7821780ea04389bc
|
[
"ECL-2.0",
"Apache-2.0"
] | 54
|
2021-11-18T17:14:12.000Z
|
2022-03-22T08:11:48.000Z
|
syne_tune/optimizer/schedulers/transfer_learning/zero_shot.py
|
awslabs/syne-tune
|
1dd8e157477b86db01047a9a7821780ea04389bc
|
[
"ECL-2.0",
"Apache-2.0"
] | 9
|
2021-11-29T11:47:32.000Z
|
2022-02-24T15:28:11.000Z
|
# Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import logging
from typing import Dict, Optional
import numpy as np
import pandas as pd
import xgboost
from syne_tune.blackbox_repository.blackbox_surrogate import BlackboxSurrogate
from syne_tune.config_space import Domain
from syne_tune.optimizer.schedulers.searchers.searcher import BaseSearcher
from syne_tune.optimizer.schedulers.transfer_learning import (
TransferLearningTaskEvaluations,
TransferLearningMixin,
)
logger = logging.getLogger(__name__)
| 44.762162
| 120
| 0.662601
|
# Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import logging
from typing import Dict, Optional
import numpy as np
import pandas as pd
import xgboost
from syne_tune.blackbox_repository.blackbox_surrogate import BlackboxSurrogate
from syne_tune.config_space import Domain
from syne_tune.optimizer.schedulers.searchers.searcher import BaseSearcher
from syne_tune.optimizer.schedulers.transfer_learning import (
TransferLearningTaskEvaluations,
TransferLearningMixin,
)
logger = logging.getLogger(__name__)
class ZeroShotTransfer(TransferLearningMixin, BaseSearcher):
def __init__(
self,
config_space: Dict,
transfer_learning_evaluations: Dict[str, TransferLearningTaskEvaluations],
metric: str,
mode: str = "min",
sort_transfer_learning_evaluations: bool = True,
use_surrogates: bool = False,
random_seed: Optional[int] = None,
) -> None:
"""
A zero-shot transfer hyperparameter optimization method which jointly selects configurations that minimize the
average rank obtained on historic metadata (transfer_learning_evaluations).
Reference: Sequential Model-Free Hyperparameter Tuning.
Martin Wistuba, Nicolas Schilling, Lars Schmidt-Thieme.
IEEE International Conference on Data Mining (ICDM) 2015.
:param config_space: Configuration space for trial evaluation function.
:param transfer_learning_evaluations: Dictionary from task name to offline evaluations.
:param metric: Objective name to optimize, must be present in transfer learning evaluations.
:param mode: Whether to minimize (min) or maximize (max)
:param sort_transfer_learning_evaluations: Use False if the hyperparameters for each task in
transfer_learning_evaluations Are already in the same order. If set to True, hyperparameters are sorted.
:param use_surrogates: If the same configuration is not evaluated on all tasks, set this to true. This will
generate a set of configurations and will impute their performance using surrogate models.
:param random_seed: Used for randomly sampling candidates. Only used if use_surrogate is True.
"""
super().__init__(
config_space=config_space,
transfer_learning_evaluations=transfer_learning_evaluations,
metric=metric,
metric_names=[metric],
)
self._mode = mode
self._random_state = np.random.RandomState(random_seed)
if use_surrogates and len(transfer_learning_evaluations) <= 1:
use_surrogates = False
sort_transfer_learning_evaluations = False
if use_surrogates:
sort_transfer_learning_evaluations = False
transfer_learning_evaluations = (
self._create_surrogate_transfer_learning_evaluations(
config_space, transfer_learning_evaluations, metric
)
)
warning_message = "This searcher assumes that each hyperparameter configuration occurs in all tasks. "
scores = list()
hyperparameters = None
for task_name, task_data in transfer_learning_evaluations.items():
assert (
hyperparameters is None
or task_data.hyperparameters.shape == hyperparameters.shape
), warning_message
hyperparameters = task_data.hyperparameters
if sort_transfer_learning_evaluations:
hyperparameters = task_data.hyperparameters.sort_values(
list(task_data.hyperparameters.columns)
)
idx = hyperparameters.index.values
avg_scores = task_data.objective_values(metric).mean(axis=1)
if self._mode == "max":
avg_scores = avg_scores.max(axis=1)[idx]
else:
avg_scores = avg_scores.min(axis=1)[idx]
scores.append(avg_scores)
if not use_surrogates:
if len(transfer_learning_evaluations) > 1:
logger.warning(
warning_message
+ "If this is not the case, this searcher fails without a warning."
)
if not sort_transfer_learning_evaluations:
hyperparameters = hyperparameters.copy()
hyperparameters.reset_index(drop=True, inplace=True)
self._hyperparameters = hyperparameters
sign = 1 if self._mode == "min" else -1
self._scores = sign * pd.DataFrame(scores)
self._ranks = self._update_ranks()
def _create_surrogate_transfer_learning_evaluations(
self,
config_space: Dict,
transfer_learning_evaluations: Dict[str, TransferLearningTaskEvaluations],
metric: str,
) -> Dict[str, TransferLearningTaskEvaluations]:
"""
Creates transfer_learning_evaluations where each configuration is evaluated on each task using surrogate models.
"""
surrogate_transfer_learning_evaluations = dict()
for task_name, task_data in transfer_learning_evaluations.items():
estimator = BlackboxSurrogate.make_model_pipeline(
configuration_space=config_space,
fidelity_space={},
model=xgboost.XGBRegressor(),
)
X_train = task_data.hyperparameters
y_train = task_data.objective_values(metric).mean(axis=1)
if self._mode == "max":
y_train = y_train.max(axis=1)
else:
y_train = y_train.min(axis=1)
estimator.fit(X_train, y_train)
num_candidates = 10000 if len(config_space) >= 6 else 5 ** len(config_space)
hyperparameters_new = pd.DataFrame(
[
self._sample_random_config(config_space)
for _ in range(num_candidates)
]
)
objectives_evaluations_new = estimator.predict(hyperparameters_new).reshape(
-1, 1, 1, 1
)
surrogate_transfer_learning_evaluations[
task_name
] = TransferLearningTaskEvaluations(
configuration_space=config_space,
hyperparameters=hyperparameters_new,
objectives_names=[metric],
objectives_evaluations=objectives_evaluations_new,
)
return surrogate_transfer_learning_evaluations
def get_config(self, **kwargs) -> Optional[Dict]:
if self._ranks.shape[1] == 0:
return None
# Select greedy-best configuration considering all others
best_idx = self._ranks.mean(axis=0).idxmin()
# Update ranks for choosing each configuration considering the previously chosen ones
self._ranks.clip(upper=self._ranks[best_idx], axis=0, inplace=True)
# Drop the chosen configuration as a future candidate
self._scores.drop(columns=best_idx, inplace=True)
best_config = self._hyperparameters.loc[best_idx]
self._hyperparameters.drop(index=best_idx, inplace=True)
if self._ranks.std(axis=1).sum() == 0:
self._ranks = self._update_ranks()
return best_config.to_dict()
def _sample_random_config(self, config_space: Dict) -> Dict:
return {
k: v.sample(random_state=self._random_state) if isinstance(v, Domain) else v
for k, v in config_space.items()
}
def _update_ranks(self) -> pd.DataFrame:
return self._scores.rank(axis=1)
def _update(self, trial_id: str, config: Dict, result: Dict) -> None:
pass
def clone_from_state(self, state: Dict):
raise NotImplementedError()
| 1,129
| 6,085
| 23
|
5a21299c7960ef147aa799e8ad8d4e56f46263b8
| 423
|
py
|
Python
|
copymaks.py
|
LittleQBerry/sklearn-logist
|
4ed208547b9d93171a8da14d3d010ec721cd17fa
|
[
"MIT"
] | null | null | null |
copymaks.py
|
LittleQBerry/sklearn-logist
|
4ed208547b9d93171a8da14d3d010ec721cd17fa
|
[
"MIT"
] | null | null | null |
copymaks.py
|
LittleQBerry/sklearn-logist
|
4ed208547b9d93171a8da14d3d010ec721cd17fa
|
[
"MIT"
] | null | null | null |
import os
from shutil import copyfile
from os import listdir
save_dir =r'J:/game/seg_classification/data/'
imgs_dir =r'J:/game/seg_classification/_ouput_dir_/'
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
for files in listdir(imgs_dir):
if files[-5] =='0':
source_file=os.path.join(imgs_dir +files)
target_file =os.path.join(save_dir +files)
copyfile(source_file,target_file)
| 22.263158
| 52
| 0.718676
|
import os
from shutil import copyfile
from os import listdir
save_dir =r'J:/game/seg_classification/data/'
imgs_dir =r'J:/game/seg_classification/_ouput_dir_/'
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
for files in listdir(imgs_dir):
if files[-5] =='0':
source_file=os.path.join(imgs_dir +files)
target_file =os.path.join(save_dir +files)
copyfile(source_file,target_file)
| 0
| 0
| 0
|
c313d884667195d72422dcbdf27ccb9ddfa77912
| 3,927
|
py
|
Python
|
haiku/_src/random_test.py
|
rushic24/dm-haiku
|
8ee1a2125587831783ae7ae1e74baacec23ae56d
|
[
"Apache-2.0"
] | null | null | null |
haiku/_src/random_test.py
|
rushic24/dm-haiku
|
8ee1a2125587831783ae7ae1e74baacec23ae56d
|
[
"Apache-2.0"
] | null | null | null |
haiku/_src/random_test.py
|
rushic24/dm-haiku
|
8ee1a2125587831783ae7ae1e74baacec23ae56d
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for haiku._src.random."""
import functools
from absl.testing import absltest
from haiku._src import base
from haiku._src import random
from haiku._src import transform
import jax
from jax import prng
import jax.numpy as jnp
import numpy as np
if __name__ == "__main__":
absltest.main()
| 32.188525
| 80
| 0.66794
|
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for haiku._src.random."""
import functools
from absl.testing import absltest
from haiku._src import base
from haiku._src import random
from haiku._src import transform
import jax
from jax import prng
import jax.numpy as jnp
import numpy as np
class RandomTest(absltest.TestCase):
def test_optimize_rng_splitting(self):
def f():
k1 = base.next_rng_key()
k2 = base.next_rng_key()
return k1, k2
key = jax.random.PRNGKey(42)
assert_allclose = functools.partial(np.testing.assert_allclose, atol=1e-5)
# With optimize_rng_use the keys returned should be equal to split(n).
f_opt = transform.transform(random.optimize_rng_use(f))
jax.tree_multimap(assert_allclose,
f_opt.apply({}, key),
tuple(jax.random.split(key, 3))[1:])
# Without optimize_rng_use the keys should be equivalent to splitting in a
# loop.
f = transform.transform(f)
jax.tree_multimap(assert_allclose,
f.apply({}, key),
tuple(split_for_n(key, 2)))
def test_rbg_default_impl(self):
with jax.default_prng_impl("rbg"):
key = jax.random.PRNGKey(42)
self.assertEqual(key.shape, (4,))
_, apply = transform.transform(base.next_rng_key)
out_key = apply({}, key)
self.assertEqual(out_key.shape, (4,))
def test_rbg_default_impl_invalid_key_shape(self):
with jax.default_prng_impl("rbg"):
key = jax.random.PRNGKey(42)[0:2]
self.assertEqual(key.shape, (2,))
init, _ = transform.transform(base.next_rng_key)
with self.assertRaisesRegex(ValueError,
"Init must be called with an RNG"):
init(key)
class CustomRNGTest(absltest.TestCase):
def setUp(self):
super().setUp()
jax.config.update("jax_enable_custom_prng", True)
def tearDown(self):
super().tearDown()
jax.config.update("jax_enable_custom_prng", False)
def test_non_custom_key(self):
init, _ = transform.transform(base.next_rng_key)
init(jax.random.PRNGKey(42)) # does not crash
def test_custom_key(self):
count = 0
def count_splits(_, num):
nonlocal count
count += 1
return jnp.zeros((num, 13), np.uint32)
differently_shaped_prng_impl = prng.PRNGImpl(
# Testing a different key shape to make sure it's accepted by Haiku
key_shape=(13,),
seed=lambda _: jnp.zeros((13,), np.uint32),
split=count_splits,
random_bits=lambda *_, data: jnp.zeros(data, np.uint32),
fold_in=lambda key, _: key)
init, _ = transform.transform(base.next_rng_key)
key = prng.seed_with_impl(differently_shaped_prng_impl, 42)
init(key)
self.assertEqual(count, 1)
# testing if Tracers with a different key shape are accepted
jax.jit(init)(key)
self.assertEqual(count, 2)
def test_invalid_custom_key(self):
init, _ = transform.transform(base.next_rng_key)
with self.assertRaisesRegex(ValueError, "Init must be called with an RNG"):
init(jnp.ones((2,), dtype=jnp.uint32))
def split_for_n(key, n):
for _ in range(n):
key, subkey = jax.random.split(key)
yield subkey
if __name__ == "__main__":
absltest.main()
| 2,625
| 33
| 269
|
6a6b82dcc5b09f38955912cdd231dfc079e2215b
| 319
|
py
|
Python
|
csf_tz/after_sales_services/doctype/requested_payments/requested_payments_dashboard.py
|
Craftint/CSF_TZ
|
b5cb2d59d8f4e958ad7d4cb89421cfbec992abc5
|
[
"MIT"
] | null | null | null |
csf_tz/after_sales_services/doctype/requested_payments/requested_payments_dashboard.py
|
Craftint/CSF_TZ
|
b5cb2d59d8f4e958ad7d4cb89421cfbec992abc5
|
[
"MIT"
] | null | null | null |
csf_tz/after_sales_services/doctype/requested_payments/requested_payments_dashboard.py
|
Craftint/CSF_TZ
|
b5cb2d59d8f4e958ad7d4cb89421cfbec992abc5
|
[
"MIT"
] | 1
|
2022-03-17T22:49:40.000Z
|
2022-03-17T22:49:40.000Z
|
from frappe import _
| 16.789474
| 50
| 0.579937
|
from frappe import _
def get_data():
return {
'fieldname': 're',
'non_standard_fieldnames': {
'Payment Entry': 'reference_name'
},
'internal_links': {
'Sales Order': ['references', 'reference_name']
},
'transactions': [
{
'label': _('Payments'),
'items': ['Payment Entry']
}
]
}
| 275
| 0
| 23
|
d7b4ca7a20b08e02e79aa7bd12cd25a873780374
| 3,180
|
py
|
Python
|
najdisi_sms/cli.py
|
brodul/najdi-si-sms
|
a73c2bfb55bcf7e2bfb4a75f50adff166873c6ed
|
[
"BSD-3-Clause"
] | 5
|
2016-01-08T21:35:21.000Z
|
2018-10-11T08:59:21.000Z
|
najdisi_sms/cli.py
|
brodul/najdi-si-sms
|
a73c2bfb55bcf7e2bfb4a75f50adff166873c6ed
|
[
"BSD-3-Clause"
] | 4
|
2016-01-09T14:30:33.000Z
|
2017-09-16T17:39:49.000Z
|
najdisi_sms/cli.py
|
brodul/najdi-si-sms
|
a73c2bfb55bcf7e2bfb4a75f50adff166873c6ed
|
[
"BSD-3-Clause"
] | 2
|
2017-09-08T21:45:25.000Z
|
2018-10-16T11:55:13.000Z
|
from argparse import ArgumentParser
import os
import sys
from six.moves.configparser import ConfigParser
from najdisi_sms import utils
from .api import SMSSender
log = utils.get_logger()
class SettingParser(object):
"""docstring for SettingParser"""
def merge_settings(self, parser_space):
"""
Merge config file and cli options
"""
if os.path.exists(parser_space.config):
ini_config = parse_ini(parser_space.config)
for attr in ['username', 'password']:
setattr(
parser_space,
attr,
getattr(parser_space, attr, None) or
ini_config.get('najdisi_sms', attr)
)
elif not self.default_config_path == parser_space.config:
log.info('Config file you specified not found!')
return parser_space
if __name__ == '__main__':
main()
| 27.179487
| 70
| 0.545597
|
from argparse import ArgumentParser
import os
import sys
from six.moves.configparser import ConfigParser
from najdisi_sms import utils
from .api import SMSSender
log = utils.get_logger()
class SettingParser(object):
"""docstring for SettingParser"""
def __init__(self, args=None):
self.args = args or sys.argv[1:]
home = os.path.expanduser('~')
self.default_config_path = os.path.join(
home,
'.config',
'najdisi_sms.ini'
)
self.argparser = self._create_argparser()
self.parser_space = self.argparser.parse_args(self.args)
self.namespace = self.merge_settings(self.parser_space)
self.check_password_username(self.namespace)
def _create_argparser(self):
parser = ArgumentParser()
parser.add_argument(
"rec_num",
metavar=u"reciever_NUM",
help="slovenian phone number starting with 0"
)
parser.add_argument(
"message",
metavar=u"MESSAGE",
help="SMS message (less than 160 characters)"
)
parser.add_argument(
"-c",
"--configfile",
dest="config",
help=u"Config file",
default=self.default_config_path
)
parser.add_argument(
"-u",
"--username",
dest="username",
help=u"Username"
)
parser.add_argument(
"-p",
"--password",
dest="password",
help=u"Password"
)
parser.add_argument(
"-A",
"--useragent",
dest="useragent",
help=u"HTTP User Agent",
default=("Mozilla/5.0 "
"(Windows; U; Windows NT 6.1; es-ES; rv:1.9.2.3)"
"Gecko/20100401 Firefox/3.6.3")
)
return parser
def merge_settings(self, parser_space):
"""
Merge config file and cli options
"""
def parse_ini(file_path):
config = ConfigParser()
config.read(file_path)
return config
if os.path.exists(parser_space.config):
ini_config = parse_ini(parser_space.config)
for attr in ['username', 'password']:
setattr(
parser_space,
attr,
getattr(parser_space, attr, None) or
ini_config.get('najdisi_sms', attr)
)
elif not self.default_config_path == parser_space.config:
log.info('Config file you specified not found!')
return parser_space
def check_password_username(self, namespace):
for attr in ['username', 'password']:
if not getattr(namespace, attr):
raise LookupError("Missing {}!".format(attr))
def main():
parser = SettingParser()
namespace = parser.namespace
sender = SMSSender(
namespace.username,
namespace.password,
namespace.useragent
)
sender.send(namespace.rec_num, namespace.message)
if __name__ == '__main__':
main()
| 2,108
| 0
| 134
|
93045327b67530baa933c410a2531ed53bd0fbbd
| 6,484
|
py
|
Python
|
utils/metrics.py
|
Hua-YS/Multi-Scene-Recognition
|
5c3db2a893221a1b49482675ac5f03532edd4f85
|
[
"MIT"
] | 5
|
2021-06-28T12:49:38.000Z
|
2021-09-07T11:21:29.000Z
|
utils/metrics.py
|
Hua-YS/Multi-Scene-Recognition
|
5c3db2a893221a1b49482675ac5f03532edd4f85
|
[
"MIT"
] | null | null | null |
utils/metrics.py
|
Hua-YS/Multi-Scene-Recognition
|
5c3db2a893221a1b49482675ac5f03532edd4f85
|
[
"MIT"
] | 2
|
2021-07-19T09:19:11.000Z
|
2021-11-07T20:25:04.000Z
|
"""
Modified from ML-GCN/util.py:
* adding calculations: OP, OR, OF1, CP, CR, CF1, EP, ER, EF1
* removing args: difficult_examples
"""
import math
import torch
from PIL import Image
from tqdm import tqdm
import numpy as np
import random
import torch.nn.functional as F
| 35.048649
| 111
| 0.549352
|
"""
Modified from ML-GCN/util.py:
* adding calculations: OP, OR, OF1, CP, CR, CF1, EP, ER, EF1
* removing args: difficult_examples
"""
import math
import torch
from PIL import Image
from tqdm import tqdm
import numpy as np
import random
import torch.nn.functional as F
class AveragePrecisionMeter(object):
def __init__(self):
super(AveragePrecisionMeter, self).__init__()
self.reset()
def reset(self):
"""Resets the meter with empty member variables"""
self.scores = torch.FloatTensor(torch.FloatStorage())
self.targets = torch.LongTensor(torch.LongStorage())
def add(self, output, target):
"""
concatenate samples of the new batch and previous batches
Args:
output: predicted multiple labels, should be an NxK tensor, postive/negative means presence/absence
target: ground truth multiple labels, should be an NxK binary tensors, each is multi-hot
Notes:
N: the number of samples
K: the number of classes
"""
if not torch.is_tensor(output):
output = torch.from_numpy(output)
if not torch.is_tensor(target):
target = torch.from_numpy(target)
if output.dim() == 1:
output = output.view(-1, 1)
else:
assert output.dim() == 2, \
'wrong output size (should be 1D or 2D with one column \
per class)'
if target.dim() == 1:
target = target.view(-1, 1)
else:
assert target.dim() == 2, \
'wrong target size (should be 1D or 2D with one column \
per class)'
if self.scores.numel() > 0:
assert target.size(1) == self.targets.size(1), \
'dimensions for output should match previously added examples.'
# make sure storage is of sufficient size
if self.scores.storage().size() < self.scores.numel() + output.numel():
new_size = math.ceil(self.scores.storage().size() * 1.5)
self.scores.storage().resize_(int(new_size + output.numel()))
self.targets.storage().resize_(int(new_size + output.numel()))
# store scores and targets
offset = self.scores.size(0) if self.scores.dim() > 0 else 0
self.scores.resize_(offset + output.size(0), output.size(1))
self.targets.resize_(offset + target.size(0), target.size(1))
self.scores.narrow(0, offset, output.size(0)).copy_(output)
self.targets.narrow(0, offset, target.size(0)).copy_(target)
def value(self):
"""Returns the model's average precision for each class
Return:
ap (FloatTensor): 1xK tensor, with avg precision for each class k
"""
if self.scores.numel() == 0:
return 0
self.scores_nonzero = self.scores[:, self.targets.sum(axis=0)>0]
self.targets_nonzero = self.targets[:, self.targets.sum(axis=0)>0]
ap = torch.zeros(self.scores_nonzero.size(1))
rg = torch.arange(1, self.scores_nonzero.size(0)).float()
# compute average precision for each class
for k in range(self.scores_nonzero.size(1)):
# sort scores
scores = self.scores_nonzero[:, k]
targets = self.targets_nonzero[:, k]
# compute average precision
ap[k] = AveragePrecisionMeter.average_precision(scores, targets)
return ap
@staticmethod
def average_precision(output, target):
# sort examples
sorted, indices = torch.sort(output, dim=0, descending=True)
# Computes prec@i
pos_count = 0.
total_count = 0.
precision_at_i = 0.
for i in indices:
label = target[i]
if label == 1:
pos_count += 1
total_count += 1
if label == 1:
precision_at_i += pos_count / total_count
if pos_count==0:
precision_at_i = 0
else:
precision_at_i /= pos_count
return precision_at_i
def overall(self):
if self.scores.numel() == 0:
return 0
scores = self.scores.cpu().numpy()
targets = self.targets.cpu().numpy()
targets[targets == -1] = 0
return self.evaluation(scores, targets)
def overall_topk(self, k):
targets = self.targets.cpu().numpy()
targets[targets == -1] = 0
n, c = self.scores.size()
scores = np.zeros((n, c)) - 1
index = self.scores.topk(k, 1, True, True)[1].cpu().numpy()
tmp = self.scores.cpu().numpy()
for i in range(n):
for ind in index[i]:
scores[i, ind] = 1 if tmp[i, ind] >= 0 else -1
return self.evaluation(scores, targets)
def evaluation(self, scores_, targets_):
"""Returns the model's OP, OR, OF1, CP, CR, CF1, EP, ER, EF1
Return:
OP, OR, OF1, CP, CR, CF1, EP, ER, EF1: 9 Float tensors
"""
eps = 1e-10
n, n_class = scores_.shape
Nc, Np, Ng = np.zeros(n_class), np.zeros(n_class), np.zeros(n_class)
for k in range(n_class):
scores = scores_[:, k]
targets = targets_[:, k]
targets[targets == -1] = 0
Ng[k] = np.sum(targets == 1)
Np[k] = np.sum(scores >= 0)
Nc[k] = np.sum(targets * (scores >= 0))
OP = np.sum(Nc) / (np.sum(Np) + eps)
OR = np.sum(Nc) / (np.sum(Ng) + eps)
OF1 = (2 * OP * OR) / (OP + OR + eps)
CP = Nc / (Np + eps)
CR = Nc / (Ng + eps)
CF1 = (2 * CP * CR) / ( CP + CR + eps)
CP = np.mean(CP)
CR = np.mean(CR)
CF1 = np.mean(CF1)
# calculate example-based
pred = np.int8(np.round(1/(1+np.exp(-scores_))))
gt = np.int8(np.round(targets_))
TP_e = np.float32(np.sum(((pred+gt) == 2), 1))
FP_e = np.float32(np.sum(((pred-gt) == 1), 1))
FN_e = np.float32(np.sum(((pred-gt) == -1), 1))
TN_e = np.float32(np.sum(((pred+gt) == 0), 1))
# clear TP_e is 0, assign it some value and latter assign zero
Nc = TP_e
Np = TP_e + FP_e
Ng = TP_e + FN_e
EP = Nc / (Np + eps)
ER = Nc / (Ng + eps)
EF1 = (2 * EP * ER) / (EP + ER + eps)
EP = np.mean(EP)
ER = np.mean(ER)
EF1 = np.mean(EF1)
return OP, OR, OF1, CP, CR, CF1, EP, ER, EF1
| 1,316
| 4,872
| 24
|
c8983a64630f30f7f21c9bbdaa9a782c0bbb3db8
| 3,862
|
py
|
Python
|
compmatscipy/MyStats.py
|
YunyeongChoi/compmatscipy
|
44d0fe9f4d3806a21ee3bfcbca24b42120d91193
|
[
"MIT"
] | 5
|
2019-08-19T14:48:31.000Z
|
2022-03-24T20:08:31.000Z
|
compmatscipy/MyStats.py
|
YunyeongChoi/compmatscipy
|
44d0fe9f4d3806a21ee3bfcbca24b42120d91193
|
[
"MIT"
] | 1
|
2019-08-24T16:51:29.000Z
|
2019-08-24T16:51:29.000Z
|
compmatscipy/MyStats.py
|
YunyeongChoi/compmatscipy
|
44d0fe9f4d3806a21ee3bfcbca24b42120d91193
|
[
"MIT"
] | 5
|
2019-07-16T19:15:28.000Z
|
2021-03-29T04:49:54.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 6 12:05:46 2019
@author: chrisbartel
"""
import numpy as np
from sklearn.metrics import confusion_matrix, r2_score
| 31.145161
| 95
| 0.56318
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 6 12:05:46 2019
@author: chrisbartel
"""
import numpy as np
from sklearn.metrics import confusion_matrix, r2_score
def _make_binary_labels(data, thresh):
return [1 if v <= thresh else 0 for v in data]
class MyStats(object):
def __init__(self, actual, pred,
percentiles=[1, 10, 25, 50, 75, 90, 99],
stability_thresholds=[0, 0.025, 0.5, 0.1, 0.2]):
if len(actual) != len(pred):
raise ValueError
self.actual = actual
self.pred = pred
self.percentiles = percentiles
self.stability_thresholds = stability_thresholds
@property
def errors(self):
a, p = self.actual, self.pred
return [a[i] - p[i] for i in range(len(a))]
@property
def abs_errors(self):
errors = self.errors
return [abs(e) for e in errors]
@property
def sq_errors(self):
errors = self.errors
return [e**2 for e in errors]
@property
def mean_error(self):
return np.mean(self.errors)
@property
def mean_abs_error(self):
return np.mean(self.abs_errors)
@property
def root_mean_sq_error(self):
return np.sqrt(np.mean(self.sq_errors))
@property
def median_error(self):
return np.median(self.errors)
@property
def median_abs_error(self):
return np.median(self.abs_errors)
@property
def root_median_sq_error(self):
return np.sqrt(np.median(self.sq_errors))
@property
def r2(self):
return r2_score(self.actual, self.pred)
@property
def per_errors(self):
percentiles = self.percentiles
errors = self.errors
return {p : np.percentile(errors, percentiles) for p in percentiles}
@property
def per_abs_errors(self):
percentiles = self.percentiles
errors = self.abs_errors
return {p : np.percentile(errors, percentiles) for p in percentiles}
@property
def per_sq_errors(self):
percentiles = self.percentiles
errors = self.sq_errors
return {p : np.sqrt(np.percentile(errors, percentiles)) for p in percentiles}
@property
def regression_stats(self):
return {'abs' : {'mean' : self.mean_abs_error,
'median' : self.median_abs_error,
'per' : self.per_abs_errors},
'raw' : {'mean' : self.mean_error,
'median' : self.median_error,
'per' : self.per_errors},
'sq' : {'mean' : self.root_mean_sq_error,
'median' : self.root_median_sq_error,
'per' : self.per_sq_errors},
'r2' : self.r2}
def confusion(self, thresh):
actual = _make_binary_labels(self.actual, thresh)
pred = _make_binary_labels(self.pred, thresh)
labels = ['tn', 'fp', 'fn', 'tp']
return dict(zip(labels, confusion_matrix(actual, pred).ravel()))
def classification_scores(self, thresh):
confusion = self.confusion['thresh']
tn, fp, fn, tp = [confusion[stat] for stat in ['tn', 'afp', 'fn', 'tp']]
prec = tp/(tp+fp)
rec = tp/(tp+fn)
f1 = 2*(prec*rec)/(prec+rec)
acc = (tp+tn)/(tp+tn+fp+fn)
fpr = fp/(fp+tn)
return {'precision' : prec,
'recall' : rec,
'f1' : f1,
'accuracy' : acc,
'fpr' : fpr}
@property
def classification_stats(self):
threshs = self.stability_thresholds
return {thresh : {'raw' : self.classification(thresh),
'scores' : self.classification_scores(thresh)} for thresh in threshs}
| 2,860
| 770
| 46
|
471a892d7e1469bc55d6a5fb681247474418f271
| 2,531
|
py
|
Python
|
ceph_deploy/hosts/suse/install.py
|
zidarsk8/ceph-deploy
|
e286d7d0cf6e161708909c91ea7f99aacf08c7c4
|
[
"MIT"
] | 1
|
2018-01-03T03:13:24.000Z
|
2018-01-03T03:13:24.000Z
|
ceph_deploy/hosts/suse/install.py
|
zidarsk8/ceph-deploy
|
e286d7d0cf6e161708909c91ea7f99aacf08c7c4
|
[
"MIT"
] | null | null | null |
ceph_deploy/hosts/suse/install.py
|
zidarsk8/ceph-deploy
|
e286d7d0cf6e161708909c91ea7f99aacf08c7c4
|
[
"MIT"
] | null | null | null |
from ceph_deploy.util import templates
from ceph_deploy.lib.remoto import process
| 25.31
| 112
| 0.453971
|
from ceph_deploy.util import templates
from ceph_deploy.lib.remoto import process
def install(distro, version_kind, version, adjust_repos):
release = distro.release
machine = distro.machine_type
if version_kind in ['stable', 'testing']:
key = 'release'
else:
key = 'autobuild'
if distro.codename == 'Mantis':
distro = 'opensuse12'
else:
distro = 'sles-11sp2'
if adjust_repos:
process.run(
distro.conn,
[
'rpm',
'--import',
"https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/{key}.asc".format(key=key)
]
)
if version_kind == 'stable':
url = 'http://ceph.com/rpm-{version}/{distro}/'.format(
version=version,
distro=distro,
)
elif version_kind == 'testing':
url = 'http://ceph.com/rpm-testing/{distro}'.format(distro=distro)
elif version_kind == 'dev':
url = 'http://gitbuilder.ceph.com/ceph-rpm-{distro}{release}-{machine}-basic/ref/{version}/'.format(
distro=distro,
release=release.split(".", 1)[0],
machine=machine,
version=version,
)
process.run(
distro.conn,
[
'rpm',
'-Uvh',
'--replacepkgs',
'--force',
'--quiet',
'{url}noarch/ceph-release-1-0.noarch.rpm'.format(
url=url,
),
]
)
process.run(
distro.conn,
[
'zypper',
'--non-interactive',
'--quiet',
'install',
'ceph',
],
)
def mirror_install(distro, repo_url, gpg_url, adjust_repos):
repo_url = repo_url.strip('/') # Remove trailing slashes
if adjust_repos:
process.run(
distro.conn,
[
'rpm',
'--import',
gpg_url,
]
)
ceph_repo_content = templates.ceph_repo.format(
repo_url=repo_url,
gpg_url=gpg_url
)
distro.conn.remote_module.write_yum_repo(ceph_repo_content)
process.run(
distro.conn,
[
'zypper',
'--non-interactive',
'--quiet',
'install',
'ceph',
],
)
| 2,401
| 0
| 46
|
74891ac87248010c5d11d2a67dbd35f68b342069
| 29
|
py
|
Python
|
examples/class_trivial.py
|
doboy/Underscore
|
d98273db3144cda79191d2c90f45d81b6d700b1f
|
[
"MIT"
] | 7
|
2016-09-23T00:44:05.000Z
|
2021-10-04T21:19:12.000Z
|
examples/class_trivial.py
|
jameswu1991/Underscore
|
d98273db3144cda79191d2c90f45d81b6d700b1f
|
[
"MIT"
] | 1
|
2016-09-23T00:45:05.000Z
|
2019-02-16T19:05:37.000Z
|
examples/class_trivial.py
|
jameswu1991/Underscore
|
d98273db3144cda79191d2c90f45d81b6d700b1f
|
[
"MIT"
] | 3
|
2016-09-23T01:13:15.000Z
|
2018-07-20T21:22:17.000Z
|
Bar()
| 5.8
| 12
| 0.517241
|
class Bar():
pass
Bar()
| 0
| 0
| 22
|
983347675b3f89295d6d1c7b7c4125a61b2ca029
| 771
|
py
|
Python
|
project_euler/problem_13/sol1.py
|
ming-nju/Python_learn
|
a0d31fe79310ed636047b1595da04a824394f3bc
|
[
"MIT"
] | 2
|
2019-07-19T07:45:01.000Z
|
2019-07-19T07:45:09.000Z
|
project_euler/problem_13/sol1.py
|
ming-nju/Python_learn
|
a0d31fe79310ed636047b1595da04a824394f3bc
|
[
"MIT"
] | null | null | null |
project_euler/problem_13/sol1.py
|
ming-nju/Python_learn
|
a0d31fe79310ed636047b1595da04a824394f3bc
|
[
"MIT"
] | 3
|
2019-04-29T02:36:37.000Z
|
2019-10-05T12:17:59.000Z
|
"""
Problem Statement:
Work out the first ten digits of the sum of the following one-hundred 50-digit
numbers.
"""
from __future__ import print_function
import os
try:
raw_input # Python 2
except NameError:
raw_input = input # Python 3
def solution(array):
"""Returns the first ten digits of the sum of the array elements.
>>> sum = 0
>>> array = []
>>> with open(os.path.dirname(__file__) + "/num.txt","r") as f:
... for line in f:
... array.append(int(line))
...
>>> solution(array)
'5537376230'
"""
return str(sum(array))[:10]
if __name__ == "__main__":
n = int(input().strip())
array = []
for i in range(n):
array.append(int(input().strip()))
print(solution(array))
| 20.837838
| 78
| 0.594034
|
"""
Problem Statement:
Work out the first ten digits of the sum of the following one-hundred 50-digit
numbers.
"""
from __future__ import print_function
import os
try:
raw_input # Python 2
except NameError:
raw_input = input # Python 3
def solution(array):
"""Returns the first ten digits of the sum of the array elements.
>>> sum = 0
>>> array = []
>>> with open(os.path.dirname(__file__) + "/num.txt","r") as f:
... for line in f:
... array.append(int(line))
...
>>> solution(array)
'5537376230'
"""
return str(sum(array))[:10]
if __name__ == "__main__":
n = int(input().strip())
array = []
for i in range(n):
array.append(int(input().strip()))
print(solution(array))
| 0
| 0
| 0
|
06be20eddfdbb8f127ad0de90c3d9e742d293f5a
| 2,275
|
py
|
Python
|
extensions/music.py
|
ggoncalopereira/JBB.py
|
e66419466cc6d35e134cceb8c8ad48102556658b
|
[
"MIT"
] | null | null | null |
extensions/music.py
|
ggoncalopereira/JBB.py
|
e66419466cc6d35e134cceb8c8ad48102556658b
|
[
"MIT"
] | null | null | null |
extensions/music.py
|
ggoncalopereira/JBB.py
|
e66419466cc6d35e134cceb8c8ad48102556658b
|
[
"MIT"
] | 1
|
2020-12-10T23:08:52.000Z
|
2020-12-10T23:08:52.000Z
|
import discord
from discord.ext import commands
import json
import subprocess
class Music(commands.Cog):
"""Play all great classics"""
@commands.command(name='play',
description="play a given music",
brief="play a given music")
@commands.command(name='stop',
description="stop music and leave voice channel",
brief="stop music")
| 39.224138
| 119
| 0.565714
|
import discord
from discord.ext import commands
import json
import subprocess
class Music(commands.Cog):
"""Play all great classics"""
def __init__(self, bot):
self.bot = bot
@commands.command(name='play',
description="play a given music",
brief="play a given music")
async def play(self, ctx, music):
#play a mp3 file
#check if user in voice channel
if ctx.message.author.voice_channel:
music = music.lower()
#check if requested music exists
if music in self.bot.musicMap:
#if bot is not connected to voice channel connect
if self.bot.voice_client == None:
voice = await self.bot.join_voice_channel(ctx.message.author.voice_channel)
self.bot.voice_client = voice
#if bot is connected and is playing dont play
if self.bot.player_client != None and self.bot.player_client.is_playing():
await ctx.send("Already Playing")
else:
#create player and play file
player = self.bot.voice_client.create_ffmpeg_player(self.bot.MUSIC_PATH + self.bot.musicMap[music])
self.bot.player_client = player
player.start()
else:
await ctx.send("Invalid Music")
else:
await ctx.send("You're not in a voice channel")
@commands.command(name='stop',
description="stop music and leave voice channel",
brief="stop music")
async def stop(self, ctx):
appInfo = await self.bot.application_info()
#check if user in voice channel
if ctx.message.author.voice_channel:
#cheack if bot in voice channel
if self.bot.voice_client:
#disconnect
await self.bot.voice_client.disconnect()
self.bot.voice_client = None
self.bot.player_client = None
else:
await ctx.send(appInfo.name + " not in a voice channel")
else:
await ctx.send("You're not in a voice channel")
def setup(bot):
bot.add_cog(Music(bot))
| 1,738
| 0
| 101
|
1af70b5c74ac9c6f87ca8ab4ea3b051aa0ade4f9
| 2,313
|
py
|
Python
|
dmscripts/bulk_upload_documents.py
|
alphagov-mirror/digitalmarketplace-scripts
|
8a7ef9b2b5f5fffea6e012bd676b095a27d35101
|
[
"MIT"
] | 1
|
2020-06-23T01:55:31.000Z
|
2020-06-23T01:55:31.000Z
|
dmscripts/bulk_upload_documents.py
|
alphagov-mirror/digitalmarketplace-scripts
|
8a7ef9b2b5f5fffea6e012bd676b095a27d35101
|
[
"MIT"
] | 267
|
2015-10-12T12:43:52.000Z
|
2021-08-19T10:38:55.000Z
|
dmscripts/bulk_upload_documents.py
|
alphagov-mirror/digitalmarketplace-scripts
|
8a7ef9b2b5f5fffea6e012bd676b095a27d35101
|
[
"MIT"
] | 7
|
2015-11-11T16:47:41.000Z
|
2021-04-10T18:03:04.000Z
|
import re
import csv
from dmutils.documents import get_document_path, generate_download_filename
| 36.714286
| 119
| 0.707739
|
import re
import csv
from dmutils.documents import get_document_path, generate_download_filename
def upload_file(bucket, dry_run, file_path, framework_slug, bucket_category, supplier_name_dict=None):
# Retrieve the supplier ID from the filepath
supplier_id = get_supplier_id_from_framework_file_path(file_path)
# Construct the document name
document_name = get_document_name_from_file_path(file_path)
# Don't upload signed agreement files
if 'signed-framework-agreement' in document_name:
raise ValueError(
f"'{document_name}'. Signed and countersigned agreement documents should not be uploaded "
f"using this script as they require the document URL to be stored in the database."
)
# Construct the upload path
upload_path = get_document_path(framework_slug, supplier_id, bucket_category, document_name)
# Get the download_filename if TSV supplied
if supplier_name_dict is None:
download_filename = None
else:
supplier_name = supplier_name_dict[supplier_id]
download_filename = generate_download_filename(supplier_id, document_name, supplier_name)
# Do the actual upload
if not dry_run:
with open(file_path, 'rb') as source_file:
bucket.save(upload_path, source_file, acl='bucket-owner-full-control', download_filename=download_filename)
print(supplier_id)
else:
print("[Dry-run] UPLOAD: '{}' to '{}'".format(file_path, upload_path))
def get_supplier_id_from_framework_file_path(path):
match = re.search(r'(?:/|-)(\d{5,6})[-_]', path)
if not match:
raise ValueError("Could not find supplier ID in path {}".format(path))
return match.group(1)
def get_document_name_from_file_path(path):
match = re.search(r'/\d{5,6}-(.*)', path)
if not match:
raise ValueError("Could not find valid document name in path {}".format(path))
return match.group(1)
def get_supplier_name_dict_from_tsv(tsv_path):
if not tsv_path or not tsv_path.endswith('.tsv'):
return None
suppliers_name_dict = {}
with open(tsv_path, 'r') as tsvfile:
tsv_reader = csv.reader(tsvfile, delimiter='\t')
for row in tsv_reader:
suppliers_name_dict[row[0]] = row[1]
return suppliers_name_dict
| 2,119
| 0
| 92
|
042b8109e2f671dcb6a5528205ba2a23432c656d
| 1,200
|
py
|
Python
|
SUAVE/SUAVE-2.5.0/trunk/SUAVE/Methods/Flight_Dynamics/Dynamic_Stability/Full_Linearized_Equations/Supporting_Functions/cm_alphadot.py
|
Vinicius-Tanigawa/Undergraduate-Research-Project
|
e92372f07882484b127d7affe305eeec2238b8a9
|
[
"MIT"
] | null | null | null |
SUAVE/SUAVE-2.5.0/trunk/SUAVE/Methods/Flight_Dynamics/Dynamic_Stability/Full_Linearized_Equations/Supporting_Functions/cm_alphadot.py
|
Vinicius-Tanigawa/Undergraduate-Research-Project
|
e92372f07882484b127d7affe305eeec2238b8a9
|
[
"MIT"
] | null | null | null |
SUAVE/SUAVE-2.5.0/trunk/SUAVE/Methods/Flight_Dynamics/Dynamic_Stability/Full_Linearized_Equations/Supporting_Functions/cm_alphadot.py
|
Vinicius-Tanigawa/Undergraduate-Research-Project
|
e92372f07882484b127d7affe305eeec2238b8a9
|
[
"MIT"
] | null | null | null |
## @ingroup Methods-Flight_Dynamics-Dynamic_Stability-Full_Linearized_Equations-Supporting_Functions
# cm_alphadot.py
#
# Created: Jun 2014, A. Wendorff
# Modified: Jan 2016, E. Botero
# ----------------------------------------------------------------------
# Method
# ----------------------------------------------------------------------
## @ingroup Methods-Flight_Dynamics-Dynamic_Stability-Full_Linearized_Equations-Supporting_Functions
def cm_alphadot(cm_i, ep_alpha, l_t, mac):
""" This calculates the pitching moment coefficient with respect to the
rate of change of the alpha of attack of the aircraft
Assumptions:
None
Source:
J.H. Blakelock, "Automatic Control of Aircraft and Missiles"
Wiley & Sons, Inc. New York, 1991, (pg 23)
Inputs:
cm_i [dimensionless]
ep_alpha [dimensionless]
l_t [meters]
mac [meters]
Outputs:
cm_alphadot [dimensionless]
Properties Used:
N/A
"""
# Generating Stability derivative
cm_alphadot = 2. * cm_i * ep_alpha * l_t / mac
return cm_alphadot
| 30.769231
| 100
| 0.55
|
## @ingroup Methods-Flight_Dynamics-Dynamic_Stability-Full_Linearized_Equations-Supporting_Functions
# cm_alphadot.py
#
# Created: Jun 2014, A. Wendorff
# Modified: Jan 2016, E. Botero
# ----------------------------------------------------------------------
# Method
# ----------------------------------------------------------------------
## @ingroup Methods-Flight_Dynamics-Dynamic_Stability-Full_Linearized_Equations-Supporting_Functions
def cm_alphadot(cm_i, ep_alpha, l_t, mac):
""" This calculates the pitching moment coefficient with respect to the
rate of change of the alpha of attack of the aircraft
Assumptions:
None
Source:
J.H. Blakelock, "Automatic Control of Aircraft and Missiles"
Wiley & Sons, Inc. New York, 1991, (pg 23)
Inputs:
cm_i [dimensionless]
ep_alpha [dimensionless]
l_t [meters]
mac [meters]
Outputs:
cm_alphadot [dimensionless]
Properties Used:
N/A
"""
# Generating Stability derivative
cm_alphadot = 2. * cm_i * ep_alpha * l_t / mac
return cm_alphadot
| 0
| 0
| 0
|
d171c4fc232b86e1fa3960aaaa0c13f95e31ee4a
| 1,697
|
py
|
Python
|
test/application/test_request_assertion_model.py
|
Ashaba/API-Monitor
|
533eb6698fcb5decb48f746784af6894844b3c69
|
[
"MIT"
] | null | null | null |
test/application/test_request_assertion_model.py
|
Ashaba/API-Monitor
|
533eb6698fcb5decb48f746784af6894844b3c69
|
[
"MIT"
] | 22
|
2018-02-06T19:53:11.000Z
|
2021-04-30T20:35:01.000Z
|
test/application/test_request_assertion_model.py
|
Ashaba/API-Monitor
|
533eb6698fcb5decb48f746784af6894844b3c69
|
[
"MIT"
] | null | null | null |
from test.base import BaseTestCase
from application.models import RequestAssertion
| 41.390244
| 95
| 0.831467
|
from test.base import BaseTestCase
from application.models import RequestAssertion
class TestRequestAssertionModel(BaseTestCase):
def setUp(self):
super(TestRequestAssertionModel, self).setUp()
self.test_request_assertion = RequestAssertion(
request_id="1",
assertion_type="Status Code",
comparison="equal",
value=200
)
def test_create_request_assertion(self):
old_request_assertions = RequestAssertion.fetch_all()
self.assertEqual(len(old_request_assertions), 0)
self.test_request_assertion.save()
current_request_assertions = RequestAssertion.fetch_all()
self.assertEqual(len(current_request_assertions), len(old_request_assertions) + 1)
def test_update_request_assertion(self):
self.test_request_assertion.save()
request_assertion = RequestAssertion.get(self.test_request_assertion.id)
old_request_assertion_comparison = request_assertion.comparison
request_assertion.comparison = 'less than'
request_assertion.save()
new_request_assertion_comparison = RequestAssertion.get(self.test_request_assertion.id)
self.assertEqual(new_request_assertion_comparison.comparison, 'less than')
self.assertNotEqual(old_request_assertion_comparison, new_request_assertion_comparison)
def test_delete_request_assertion(self):
self.test_request_assertion.save()
old_request_assertions = RequestAssertion.fetch_all()
particular_request_assertion = RequestAssertion.find_first(id=self.test_request_assertion.id)
particular_request_assertion.delete()
current_request_assertions = RequestAssertion.fetch_all()
self.assertNotEqual(len(old_request_assertions), len(current_request_assertions))
self.assertEqual(len(current_request_assertions), 0)
| 1,467
| 25
| 121
|
59a020e72f267e4924a4e2b3801e624ac28cb564
| 539
|
py
|
Python
|
regex/count_smiley_faces.py
|
ahmedelq/PythonicAlgorithms
|
ce10dbb6e1fd0ea5c922a932b0f920236aa411bf
|
[
"MIT"
] | null | null | null |
regex/count_smiley_faces.py
|
ahmedelq/PythonicAlgorithms
|
ce10dbb6e1fd0ea5c922a932b0f920236aa411bf
|
[
"MIT"
] | null | null | null |
regex/count_smiley_faces.py
|
ahmedelq/PythonicAlgorithms
|
ce10dbb6e1fd0ea5c922a932b0f920236aa411bf
|
[
"MIT"
] | null | null | null |
#Author: ahmelq - github.com/ahmedelq/
#License: MIT
#This is a solution of https://www.codewars.com/kata/583203e6eb35d7980400002a
if __name__ == "__main__":
print(
countSmileys([':)', ';(', ';}', ':-D']), # should return 2
countSmileys([';D', ':-(', ':-)', ';~)']), # should return 3
countSmileys([';]', ':[', ';*', ':$', ';-D']) # should return 1
)
| 26.95
| 77
| 0.489796
|
#Author: ahmelq - github.com/ahmedelq/
#License: MIT
#This is a solution of https://www.codewars.com/kata/583203e6eb35d7980400002a
def countSmileys(arr):
import re
return len(
re.compile(r'[:;][-~]?[\)D]')
.findall(' '.join(arr))
)
if __name__ == "__main__":
print(
countSmileys([':)', ';(', ';}', ':-D']), # should return 2
countSmileys([';D', ':-(', ':-)', ';~)']), # should return 3
countSmileys([';]', ':[', ';*', ':$', ';-D']) # should return 1
)
| 111
| 0
| 23
|
10d104b9b64fc13f624cb8a75615a586496f8dc6
| 6,984
|
py
|
Python
|
plastron/commands/export.py
|
dsteelma-umd/plastron
|
d0e344c65ee2dfeba4fd78df3f73a1ae5f42b1d0
|
[
"Apache-2.0"
] | null | null | null |
plastron/commands/export.py
|
dsteelma-umd/plastron
|
d0e344c65ee2dfeba4fd78df3f73a1ae5f42b1d0
|
[
"Apache-2.0"
] | null | null | null |
plastron/commands/export.py
|
dsteelma-umd/plastron
|
d0e344c65ee2dfeba4fd78df3f73a1ae5f42b1d0
|
[
"Apache-2.0"
] | null | null | null |
import json
import logging
import os
from argparse import Namespace
from datetime import datetime
from tempfile import NamedTemporaryFile
from time import sleep
from plastron import pcdm
from plastron.stomp import Message
from plastron.exceptions import FailureException, DataReadException, RESTAPIException
from plastron.namespaces import get_manager
from plastron.serializers import SERIALIZER_CLASSES
from plastron.util import LocalFile
logger = logging.getLogger(__name__)
nsm = get_manager()
| 36.375
| 113
| 0.507016
|
import json
import logging
import os
from argparse import Namespace
from datetime import datetime
from tempfile import NamedTemporaryFile
from time import sleep
from plastron import pcdm
from plastron.stomp import Message
from plastron.exceptions import FailureException, DataReadException, RESTAPIException
from plastron.namespaces import get_manager
from plastron.serializers import SERIALIZER_CLASSES
from plastron.util import LocalFile
logger = logging.getLogger(__name__)
nsm = get_manager()
def configure_cli(subparsers):
parser = subparsers.add_parser(
name='export',
description='Export resources from the repository'
)
parser.add_argument(
'-o', '--output-file',
help='File to write export package to',
action='store',
)
parser.add_argument(
'-f', '--format',
help='Export job format',
action='store',
choices=SERIALIZER_CLASSES.keys(),
required=True
)
parser.add_argument(
'--uri-template',
help='Public URI template',
action='store'
)
parser.add_argument(
'uris',
nargs='*',
help='URIs of repository objects to export'
)
parser.set_defaults(cmd_name='export')
class Command:
def __init__(self):
self.result = None
def __call__(self, *args, **kwargs):
for result in self.execute(*args, **kwargs):
pass
def execute(self, fcrepo, args):
start_time = datetime.now().timestamp()
count = 0
errors = 0
total = len(args.uris)
try:
serializer_class = SERIALIZER_CLASSES[args.format]
except KeyError:
logger.error(f'Unknown format: {args.format}')
raise FailureException()
logger.debug(f'Exporting to file {args.output_file}')
with serializer_class(args.output_file, public_uri_template=args.uri_template) as serializer:
for uri in args.uris:
r = fcrepo.head(uri)
if r.status_code == 200:
# do export
if 'describedby' in r.links:
# the resource is a binary, get the RDF description URI
rdf_uri = r.links['describedby']['url']
else:
rdf_uri = uri
logger.info(f'Exporting item {count + 1}/{total}: {uri}')
graph = fcrepo.get_graph(rdf_uri)
try:
serializer.write(graph)
count += 1
except DataReadException as e:
# log the failure, but continue to attempt to export the rest of the URIs
logger.error(f'Export of {uri} failed: {e}')
errors += 1
sleep(1)
else:
# log the failure, but continue to attempt to export the rest of the URIs
logger.error(f'Unable to retrieve {uri}')
errors += 1
# update the status
now = datetime.now().timestamp()
yield {
'time': {
'started': start_time,
'now': now,
'elapsed': now - start_time
},
'count': {
'total': total,
'exported': count,
'errors': errors
}
}
logger.info(f'Exported {count} of {total} items')
self.result = {
'content_type': serializer.content_type,
'file_extension': serializer.file_extension,
'count': {
'total': total,
'exported': count,
'errors': errors
}
}
def process_message(listener, message):
# define the processor for this message
def process():
if message.job_id is None:
logger.error('Expecting a PlastronJobId header')
else:
uris = message.body.split('\n')
export_format = message.args.get('format', 'text/turtle')
logger.info(f'Received message to initiate export job {message.job_id} containing {len(uris)} items')
logger.info(f'Requested export format is {export_format}')
try:
command = Command()
with NamedTemporaryFile() as export_fh:
logger.debug(f'Export temporary file name is {export_fh.name}')
args = Namespace(
uris=uris,
output_file=export_fh.name,
format=export_format,
uri_template=listener.public_uri_template
)
for status in command.execute(listener.repository, args):
listener.broker.connection.send(
'/topic/plastron.jobs.status',
headers={
'PlastronJobId': message.job_id
},
body=json.dumps(status)
)
job_name = message.args.get('name', message.job_id)
filename = job_name + command.result['file_extension']
file = pcdm.File(LocalFile(
export_fh.name,
mimetype=command.result['content_type'],
filename=filename
))
with listener.repository.at_path('/exports'):
file.create_object(repository=listener.repository)
command.result['download_uri'] = file.uri
logger.info(f'Uploaded export file to {file.uri}')
logger.debug(f'Export temporary file size is {os.path.getsize(export_fh.name)}')
logger.info(f'Export job {message.job_id} complete')
return Message(
headers={
'PlastronJobId': message.job_id,
'PlastronJobStatus': 'Done',
'persistent': 'true'
},
body=json.dumps(command.result)
)
except (FailureException, RESTAPIException) as e:
logger.error(f"Export job {message.job_id} failed: {e}")
return Message(
headers={
'PlastronJobId': message.job_id,
'PlastronJobStatus': 'Failed',
'PlastronJobError': str(e),
'persistent': 'true'
}
)
# process message
listener.executor.submit(process).add_done_callback(listener.get_response_handler(message.id))
| 6,339
| -7
| 149
|
2d0af0d7fb0d233d7cf933b498df9ce8898f7a13
| 8,312
|
py
|
Python
|
helpers/ResNet.py
|
kchare/advex_notbugs_features
|
0ec0578a1aba2bdb86854676c005488091b64123
|
[
"MIT"
] | 2
|
2022-02-08T11:51:12.000Z
|
2022-02-23T00:30:07.000Z
|
helpers/ResNet.py
|
kchare/advex_notbugs_features
|
0ec0578a1aba2bdb86854676c005488091b64123
|
[
"MIT"
] | null | null | null |
helpers/ResNet.py
|
kchare/advex_notbugs_features
|
0ec0578a1aba2bdb86854676c005488091b64123
|
[
"MIT"
] | 2
|
2021-12-21T20:31:28.000Z
|
2022-01-21T17:06:34.000Z
|
<<<<<<< HEAD
'''Implements ResNet9,..56 dynamically for CIFAR-10
Description of implementation can be found here: https://arxiv.org/pdf/1512.03385.pdf'''
import tensorflow as
class ResNetBlock(tf.keras.layers.Layer):
'''See official RStudio/Keras documentation here:
https://github.com/rstudio/keras/blob/main/vignettes/examples/cifar10_resnet.py
for implemetation of residual block layers
Implements residual block described for CIFAR 10 in
He et al. (2016): https://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/He_Deep_Residual_Learning_CVPR_2016_paper.pdf
'''
class ResNet56(tf.keras.Model):
def summary(self):
"""See hack here: https://stackoverflow.com/questions/55235212/model-summary-cant-print-output-shape-while-using-subclass-model
overrides default 'multiple' output shape for debugging, something that is still an open issue on GitHub for TF2.7"""
x = tf.keras.layers.Input(shape=(32,32,3))
m = tf.keras.Model(inputs=x, outputs=self.call(x))
return m.summary()
mod = ResNet56(3, 16)
mod.compile(loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True))
=======
'''Implements ResNet9,..56 dynamically for CIFAR-10
Description of implementation can be found here: https://arxiv.org/pdf/1512.03385.pdf'''
import tensorflow as tf
class ResNetBlock(tf.keras.layers.Layer):
'''See official RStudio/Keras documentation here:
https://github.com/rstudio/keras/blob/main/vignettes/examples/cifar10_resnet.py
for implemetation of residual block layers
Implements residual block described for CIFAR 10 in
He et al. (2016): https://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/He_Deep_Residual_Learning_CVPR_2016_paper.pdf
'''
>>>>>>> 5c65073e7e9b9d1e712f2f35af09fbe7b3ffc696
| 45.922652
| 131
| 0.664461
|
<<<<<<< HEAD
'''Implements ResNet9,..56 dynamically for CIFAR-10
Description of implementation can be found here: https://arxiv.org/pdf/1512.03385.pdf'''
import tensorflow as
class ResNetBlock(tf.keras.layers.Layer):
'''See official RStudio/Keras documentation here:
https://github.com/rstudio/keras/blob/main/vignettes/examples/cifar10_resnet.py
for implemetation of residual block layers
Implements residual block described for CIFAR 10 in
He et al. (2016): https://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/He_Deep_Residual_Learning_CVPR_2016_paper.pdf
'''
def __init__(self, n_filters, kernel_size, stride, l2=5e-4, init_stride=False, first_layer=False):
self.n_filters = n_filters
self.first_layer = first_layer
super(ResNetBlock, self).__init__()
if init_stride:
stride1 = stride + 1
else:
stride1 = stride
self.conv_layer_1 = tf.keras.layers.Conv2D(n_filters, kernel_size, strides=stride1, padding='same',
kernel_regularizer=tf.keras.regularizers.l2(l2),
kernel_initializer='he_normal')
self.conv_layer_2 = tf.keras.layers.Conv2D(n_filters, kernel_size, strides=stride, padding='same',
kernel_regularizer=tf.keras.regularizers.l2(l2),
kernel_initializer='he_normal')
self.bn1 = tf.keras.layers.BatchNormalization()
self.act1 = tf.keras.layers.ReLU()
self.bn2 = tf.keras.layers.BatchNormalization()
self.act2 = tf.keras.layers.ReLU()
self.conv_projection = tf.keras.layers.Conv2D(n_filters, 1, strides=stride1, padding='same',
kernel_regularizer=tf.keras.regularizers.l2(l2),
kernel_initializer='he_normal')
def call(self, inputs):
x = self.conv_layer_1(inputs) # apply without activation since will batch normalize
x = self.bn1(x)
x = self.act1(x) # use ReLU activation as specified by paper
x = self.conv_layer_2(x)
x = self.bn2(x)
if self.first_layer:
inputs = self.conv_projection(inputs)
x = tf.keras.layers.Add()([x, inputs])
x = self.act2(x)
return x
class ResNet56(tf.keras.Model):
def __init__(self, block_depth, base_filters=16, l2=5e-4):
self.block_depth = block_depth
super(ResNet56, self).__init__()
self.conv_1 = tf.keras.layers.Conv2D(base_filters, 3, padding='same')
self.pre_bn = tf.keras.layers.BatchNormalization()
self.stack1 = [ResNetBlock(base_filters, 3, 1, l2=l2) for _ in range(self.block_depth-1)]
self.one_to_two = ResNetBlock(base_filters * 2, 3, 1, init_stride=True, first_layer=True, l2=l2)
self.stack2 = [ResNetBlock(base_filters * 2, 3, 1, l2=l2) for _ in range(self.block_depth - 1)]
self.two_to_three = ResNetBlock(base_filters * 4, 3, 1, init_stride=True, first_layer=True, l2=l2)
self.stack3 = [ResNetBlock(base_filters * 4, 3, 1, l2=l2) for _ in range(self.block_depth - 1)]
self.out_dense = tf.keras.layers.Dense(10, kernel_regularizer=tf.keras.regularizers.l2(l2)) #, activation='softmax')
def call(self, inputs):
x = self.conv_1(inputs)
x = self.pre_bn(x)
x = tf.keras.layers.Activation('relu')(x)
for i in range(self.block_depth-1):
x = self.stack1[i](x)
x = self.one_to_two(x)
for i in range(self.block_depth-1):
x = self.stack2[i](x)
x = self.two_to_three(x)
for i in range(self.block_depth-1):
x = self.stack3[i](x)
x = tf.keras.layers.GlobalAveragePooling2D()(x)
x = tf.keras.layers.Flatten()(x)
x = self.out_dense(x)
return x
def summary(self):
"""See hack here: https://stackoverflow.com/questions/55235212/model-summary-cant-print-output-shape-while-using-subclass-model
overrides default 'multiple' output shape for debugging, something that is still an open issue on GitHub for TF2.7"""
x = tf.keras.layers.Input(shape=(32,32,3))
m = tf.keras.Model(inputs=x, outputs=self.call(x))
return m.summary()
mod = ResNet56(3, 16)
mod.compile(loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True))
=======
'''Implements ResNet9,..56 dynamically for CIFAR-10
Description of implementation can be found here: https://arxiv.org/pdf/1512.03385.pdf'''
import tensorflow as tf
class ResNetBlock(tf.keras.layers.Layer):
'''See official RStudio/Keras documentation here:
https://github.com/rstudio/keras/blob/main/vignettes/examples/cifar10_resnet.py
for implemetation of residual block layers
Implements residual block described for CIFAR 10 in
He et al. (2016): https://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/He_Deep_Residual_Learning_CVPR_2016_paper.pdf
'''
def __init__(self, n_filters, kernel_size, stride, init_stride=False, first_layer=False):
self.n_filters = n_filters
self.first_layer = first_layer
super(ResNetBlock, self).__init__()
if init_stride:
stride1 = stride + 1
else:
stride1 = stride
self.conv_layer_1 = tf.keras.layers.Conv2D(n_filters, kernel_size, strides=stride1, padding='same',
kernel_regularizer=tf.keras.regularizers.l2(1e-4),
kernel_initializer='he_normal')
self.conv_layer_2 = tf.keras.layers.Conv2D(n_filters, kernel_size, strides=stride, padding='same',
kernel_regularizer=tf.keras.regularizers.l2(1e-4),
kernel_initializer='he_normal')
self.bn1 = tf.keras.layers.BatchNormalization()
self.act1 = tf.keras.layers.ReLU()
self.bn2 = tf.keras.layers.BatchNormalization()
self.act2 = tf.keras.layers.ReLU()
self.conv_projection = tf.keras.layers.Conv2D(n_filters, 1, strides=stride1, padding='same',
#kernel_regularizer=tf.keras.regularizers.l2(1e-3),
kernel_initializer='he_normal')
def call(self, inputs):
x = self.conv_layer_1(inputs) # apply without activation since will batch normalize
x = self.bn1(x)
x = self.act1(x) # use ReLU activation as specified by paper
x = self.conv_layer_2(x)
x = self.bn2(x)
if self.first_layer:
inputs = self.conv_projection(inputs)
x = tf.keras.layers.Add()([x, inputs])
x = self.act2(x)
return x
class ResNet56(tf.keras.Model):
def __init__(self, block_depth, base_filters=16):
self.block_depth = block_depth
super(ResNet56, self).__init__()
self.conv_1 = tf.keras.layers.Conv2D(base_filters, 3, padding='same')
self.pre_bn = tf.keras.layers.BatchNormalization()
self.stack1 = [ResNetBlock(base_filters, 3, 1) for _ in range(self.block_depth-1)]
self.one_to_two = ResNetBlock(base_filters * 2, 3, 1, init_stride=True, first_layer=True)
self.stack2 = [ResNetBlock(base_filters * 2, 3, 1) for _ in range(self.block_depth - 1)]
self.two_to_three = ResNetBlock(base_filters * 4, 3, 1, init_stride=True, first_layer=True)
self.stack3 = [ResNetBlock(base_filters * 4, 3, 1) for _ in range(self.block_depth - 1)]
self.out_dense = tf.keras.layers.Dense(10, activation='softmax')
def call(self, inputs):
x = self.conv_1(inputs)
x = self.pre_bn(x)
x = tf.keras.layers.Activation('relu')(x)
for i in range(self.block_depth-1):
x = self.stack1[i](x)
x = self.one_to_two(x)
for i in range(self.block_depth-1):
x = self.stack2[i](x)
x = self.two_to_three(x)
for i in range(self.block_depth-1):
x = self.stack3[i](x)
x = tf.keras.layers.GlobalAveragePooling2D()(x)
x = tf.keras.layers.Flatten()(x)
x = self.out_dense(x)
return x
def summary(self):
"""See hack here: https://stackoverflow.com/questions/55235212/model-summary-cant-print-output-shape-while-using-subclass-model
overrides default 'multiple' output shape for debugging, something that is still an open issue on GitHub for TF2.7"""
x = tf.keras.layers.Input(shape=(32,32,3))
m = tf.keras.Model(inputs=x, outputs=self.call(x))
return m.summary()
>>>>>>> 5c65073e7e9b9d1e712f2f35af09fbe7b3ffc696
| 5,904
| 466
| 172
|
55b78c2012898f4fe86987d78fcabcd324b3ffd9
| 558
|
py
|
Python
|
src/ram/service/__init__.py
|
bootforce-dev/ram-framework
|
b39c43cbe3b6e76db73dfd65c38da4fa578b032f
|
[
"MIT"
] | 1
|
2019-03-01T10:19:34.000Z
|
2019-03-01T10:19:34.000Z
|
src/ram/service/__init__.py
|
ram-framework/ram-framework
|
b39c43cbe3b6e76db73dfd65c38da4fa578b032f
|
[
"MIT"
] | null | null | null |
src/ram/service/__init__.py
|
ram-framework/ram-framework
|
b39c43cbe3b6e76db73dfd65c38da4fa578b032f
|
[
"MIT"
] | null | null | null |
import pkgutil
from ram.classes import Service
| 27.9
| 62
| 0.625448
|
import pkgutil
from ram.classes import Service
class __api__(object):
def __iter__(self):
for _, srvname, _ in pkgutil.iter_modules(__path__):
yield srvname
def __getitem__(self, srvname):
if not srvname:
raise ImportError("Service name cannot be empty.")
srvpath = __name__ + '.' + srvname
service = __import__(srvpath, fromlist=['__api__'])
if not isinstance(service, Service):
raise ImportError("No service interface found.")
else:
return service
| 432
| 1
| 76
|
8cf872b6a3c803c792ca79410991edaca09c12a3
| 19,959
|
py
|
Python
|
src/test_chess.py
|
liyiran/adversarial-search
|
8346cec55e9872409e0e6714cf8bce5e8d82bc8d
|
[
"Apache-2.0"
] | null | null | null |
src/test_chess.py
|
liyiran/adversarial-search
|
8346cec55e9872409e0e6714cf8bce5e8d82bc8d
|
[
"Apache-2.0"
] | null | null | null |
src/test_chess.py
|
liyiran/adversarial-search
|
8346cec55e9872409e0e6714cf8bce5e8d82bc8d
|
[
"Apache-2.0"
] | null | null | null |
from unittest import TestCase, skip
from hw1cs561s2018 import Chess, Configuration, alphabeta_cutoff_search, minimax_decision
| 35.201058
| 200
| 0.651486
|
from unittest import TestCase, skip
from hw1cs561s2018 import Chess, Configuration, alphabeta_cutoff_search, minimax_decision
class TestChess(TestCase):
def test_evaluation(self):
configuration2 = Configuration(path=None)
configuration2.generate_configuration_from_string(
"""Circle
ALPHABETA
2
0,S3,0,0,0,0,0,S1
0,0,0,0,0,0,0,0
0,0,0,0,0,0,0,0
0,0,0,0,0,0,0,0
0,0,0,0,0,0,0,0
0,0,0,0,0,0,0,0
0,0,0,0,0,0,0,0
0,0,0,0,0,0,C1,0
10,20,30,40,50,60,70,80
""")
chess2 = Chess(path=None, configuration=configuration2)
self.assertEqual(80 - 4 * 80, chess2.evaluation(s_pieces=chess2.initial_state.s_pieces, c_pieces=chess2.initial_state.c_pieces, player="C", row_values=configuration2.row_values))
############################################################
def test_action_no_action(self):
configuration1 = Configuration(path=None)
configuration1.generate_configuration_from_string(
"""Star
MINIMAX
2
0,S1,0,0,0,0,0,0
0,0,0,0,0,0,0,0
0,0,0,S1,0,0,0,0
0,0,0,0,0,0,0,0
0,0,0,0,0,0,0,0
0,0,0,0,0,0,0,0
0,0,0,0,0,0,0,0
0,0,0,0,0,0,0,0
10,20,30,40,50,60,70,80
""")
chess1 = Chess(path=None, configuration=configuration1)
utility1 = minimax_decision(chess1.initial_state, chess1)
self.assertEqual(('Noop'), utility1[0]) # min action
self.assertEqual(140, utility1[1]) # myopic
self.assertEqual(140, utility1[2]) # farsighted
self.assertEqual(1, utility1[3]) # total number
chess1.initial_state.to_move = configuration1.player
utility2 = alphabeta_cutoff_search(chess1.initial_state, chess1, d=1024)
self.assertEqual(utility1[0], utility2[0])
self.assertEqual(utility1[1], utility2[1])
self.assertEqual(utility1[2], utility2[2])
self.assertGreaterEqual(utility1[3], utility2[3])
#############################################
def test_action_go_to_boarder(self):
configuration1 = Configuration(path=None)
configuration1.generate_configuration_from_string(
"""Circle
MINIMAX
2
0,S2,0,0,0,0,0,0
0,0,0,0,0,0,0,0
0,0,0,0,0,0,0,C1
0,0,0,0,0,0,S1,0
0,0,0,0,0,S1,0,0
0,0,0,0,0,0,0,0
0,0,0,0,0,0,0,0
0,0,0,0,0,0,0,0
10,20,30,40,50,60,70,80
""")
chess1 = Chess(path=None, configuration=configuration1)
utility1 = minimax_decision(chess1.initial_state, chess1)
self.assertEqual(2 * 80 + 2 * 60, -utility1[2])
chess1.initial_state.to_move = configuration1.player
utility2 = alphabeta_cutoff_search(chess1.initial_state, chess1, d=1024)
self.assertEqual(utility1[0], utility2[0])
self.assertEqual(utility1[1], utility2[1])
self.assertEqual(utility1[2], utility2[2])
self.assertGreaterEqual(utility1[3], utility2[3])
def test_tie_break(self):
configuration1 = Configuration(path=None)
configuration1.generate_configuration_from_string(
"""Star
MINIMAX
2
0,0,0,0,0,0,0,0
0,S1,0,0,0,0,0,0
0,0,0,0,S1,0,0,0
0,0,0,0,0,0,0,0
0,0,0,0,0,0,0,0
0,0,0,0,0,0,0,0
0,0,0,0,0,0,0,0
0,0,0,0,0,0,C1,0
10,20,30,40,50,60,70,80
""")
chess1 = Chess(path=None, configuration=configuration1)
utility1 = minimax_decision(chess1.initial_state, chess1)
self.assertEqual(((1, 1), (0, 0)), utility1[0])
self.assertEqual(80, utility1[2])
chess1.initial_state.to_move = configuration1.player
utility2 = alphabeta_cutoff_search(chess1.initial_state, chess1, d=1024)
self.assertEqual(utility1[0], utility2[0])
# self.assertEqual(utility1[1], utility2[1])
# self.assertEqual(utility1[2], utility2[2])
# self.assertGreaterEqual(utility1[3], utility2[3])
def test_tie_break2(self):
configuration1 = Configuration(path=None)
configuration1.generate_configuration_from_string(
"""Circle
MINIMAX
2
0,0,0,0,0,0,0,S1
0,0,0,0,0,0,0,0
0,0,0,0,0,0,0,0
0,0,0,0,0,0,0,0
0,0,0,0,0,0,0,0
0,0,0,0,0,0,0,0
0,0,0,0,0,0,C1,0
0,0,0,0,0,0,0,0
10,20,30,40,50,60,70,80
""")
chess1 = Chess(path=None, configuration=configuration1)
utility1 = minimax_decision(chess1.initial_state, chess1)
self.assertEqual(((6, 6), (7, 5)), utility1[0])
# self.assertEqual(2 * 80 + 2 * 60 + 80, -utility1[2])
chess1.initial_state.to_move = configuration1.player
utility2 = alphabeta_cutoff_search(chess1.initial_state, chess1, d=1024)
self.assertEqual(utility1[0], utility2[0])
# self.assertEqual(utility1[1], utility2[1])
# self.assertEqual(utility1[2], utility2[2])
# self.assertGreaterEqual(utility1[3], utility2[3])
@skip("heihei")
def test_pressure_test(self):
configuration1 = Configuration(path=None)
configuration1.generate_configuration_from_string(
"""Circle
MINIMAX
10
0,C1,0,C1,0,C1,0,C1
C1,0,C1,0,C1,0,C1,0
0,C1,0,C1,0,C1,0,C1
0,0,0,0,0,0,0,0
0,0,0,0,0,0,0,0
S1,0,S1,0,S1,0,S1,0
0,S1,0,S1,0,S1,0,S1
S1,0,S1,0,S1,0,S1,0
10,20,30,40,50,60,70,80
""")
chess1 = Chess(path=None, configuration=configuration1)
utility1 = minimax_decision(chess1.initial_state, chess1, depth_limit=10)
# self.assertEqual(2 * 80 + 2 * 60, -utility1[2])
# print utility1
# chess1.initial_state.to_move = configuration1.player
# utility2 = alphabeta_cutoff_search(chess1.initial_state, chess1, d=5)
# self.assertEqual(utility1[0], utility2[0])
# self.assertEqual(utility1[1], utility2[1])
# self.assertEqual(utility1[2], utility2[2])
# self.assertGreaterEqual(utility1[3], utility2[3])
#####################
def test_action_go_to_boarder1(self):
configuration1 = Configuration(path=None)
configuration1.generate_configuration_from_string(
"""Circle
MINIMAX
2
0,S2,0,0,0,0,0,0
S1,0,0,0,0,0,0,0
0,0,0,0,0,0,0,C1
0,0,0,0,0,0,S1,0
0,0,0,0,0,S1,0,0
0,0,0,0,0,0,0,0
0,0,0,0,0,0,0,0
0,0,0,0,0,0,0,0
10,20,30,40,50,60,70,80
""")
chess1 = Chess(path=None, configuration=configuration1)
utility1 = minimax_decision(chess1.initial_state, chess1)
self.assertEqual(2 * 80 + 2 * 60 + 80, -utility1[2])
chess1.initial_state.to_move = configuration1.player
utility2 = alphabeta_cutoff_search(chess1.initial_state, chess1, d=1024)
self.assertEqual(utility1[0], utility2[0])
self.assertEqual(utility1[1], utility2[1])
self.assertEqual(utility1[2], utility2[2])
self.assertGreaterEqual(utility1[3], utility2[3])
@skip("demonstrating skipping")
def test_action_go_to_boarder4(self):
configuration1 = Configuration(path=None)
configuration1.generate_configuration_from_string(
"""Star
MINIMAX
2
0,C1,0,0,0,0,0,S1
C1,0,0,0,0,0,0,0
0,0,0,0,0,0,0,0
0,0,0,0,0,0,0,0
0,0,0,0,0,0,0,0
0,0,0,0,0,0,0,0
0,0,0,0,0,0,0,0
0,0,0,0,0,0,0,0
10,20,30,40,50,60,70,80
""")
chess1 = Chess(path=None, configuration=configuration1)
# utility1 = minimax_decision(chess1.initial_state, chess1)
# self.assertEqual(80, -utility1[2])
chess1.initial_state.to_move = configuration1.player
utility2 = alphabeta_cutoff_search(chess1.initial_state, chess1, d=1024)
self.assertEqual(-80, utility2[2])
# self.assertEqual(utility1[0], utility2[0])
# self.assertEqual(utility1[1], utility2[1])
# self.assertEqual(utility1[2], utility2[2])
# self.assertGreaterEqual(utility1[3], utility2[3])
########################################
def test_actions(self):
configuration1 = Configuration(path=None)
configuration1.generate_configuration_from_string(
"""Star
MINIMAX
2
0,S1,0,0,0,0,0,0
0,0,C1,0,0,0,0,0
0,0,0,S1,0,0,0,0
0,0,0,0,0,0,0,0
0,0,0,0,0,0,0,0
0,0,0,0,0,0,0,0
0,0,0,0,0,0,0,0
0,0,0,0,0,0,0,0
10,20,30,40,50,60,70,80
""")
configuration2 = Configuration(path=None)
configuration2.generate_configuration_from_string(
"""Circle
ALPHABETA
2
0,S2,0,0,0,0,0,0
S1,0,0,0,0,0,0,0
0,0,0,0,0,0,0,C1
0,0,0,0,0,0,S1,0
0,0,0,0,0,S1,0,0
0,0,0,0,0,0,0,0
0,0,0,0,0,0,0,0
0,0,0,0,0,0,0,0
10,20,30,40,50,60,70,80
""")
configuration3 = Configuration(path=None)
configuration3.generate_configuration_from_string(
"""Circle
ALPHABETA
2
0,S100,0,0,0,0,0,0
0,0,C1,0,0,0,0,0
0,0,0,S1,0,0,0,0
0,0,0,0,0,0,0,0
0,0,0,0,0,0,0,0
0,0,0,0,0,0,0,0
0,0,0,0,0,0,0,0
0,0,0,0,0,0,0,0
10,20,30,40,50,60,70,80
""")
configuration4 = Configuration(path=None)
configuration4.generate_configuration_from_string(
"""Star
ALPHABETA
2
0,C1,0,0,0,0,0,0
0,0,C1,0,0,0,0,0
0,0,0,S1,0,0,0,0
0,0,0,0,0,0,0,0
0,0,0,0,0,0,0,0
0,0,0,0,0,0,0,0
0,0,0,0,0,0,0,0
0,0,0,0,0,0,0,0
10,20,30,40,50,60,70,80
""")
chess1 = Chess(path=None, configuration=configuration1)
chess2 = Chess(path=None, configuration=configuration2)
chess3 = Chess(path=None, configuration=configuration3)
chess4 = Chess(path=None, configuration=configuration4)
actions1 = chess1.actions(chess1.initial_state)
self.assertEqual(2, len(actions1))
actions2 = chess2.actions(chess2.initial_state)
self.assertEqual(1, len(actions2))
actions3 = chess3.actions(chess3.initial_state)
self.assertEqual(2, len(actions3))
actions4 = chess4.actions(chess4.initial_state)
self.assertEqual(1, len(actions4))
self.assertEqual(80 + 60 - 20, Chess.evaluation(c_pieces=chess1.initial_state.c_pieces, s_pieces=chess1.initial_state.s_pieces, player="S", row_values=configuration1.row_values))
self.assertEqual(80 * 2 + 70 + 50 + 40 - 30, Chess.evaluation(c_pieces=chess2.initial_state.c_pieces, s_pieces=chess2.initial_state.s_pieces, player="S", row_values=configuration2.row_values))
self.assertEqual(80 * 100 + 60 - 20, Chess.evaluation(c_pieces=chess3.initial_state.c_pieces, s_pieces=chess3.initial_state.s_pieces, player="S", row_values=configuration3.row_values))
self.assertEqual(60 - 10 - 20, Chess.evaluation(c_pieces=chess4.initial_state.c_pieces, s_pieces=chess4.initial_state.s_pieces, player="S", row_values=configuration4.row_values))
state = chess1.result(chess1.initial_state, actions1[0])
self.assertEqual('C', state.to_move)
self.assertEqual(len(chess1.initial_state.c_pieces + chess1.initial_state.s_pieces), len(state.c_pieces + state.s_pieces))
state = chess1.result(chess1.initial_state, actions1[1])
self.assertEqual('C', state.to_move)
self.assertEqual(len(chess1.initial_state.c_pieces + chess1.initial_state.s_pieces), len(state.c_pieces + state.s_pieces) + 1)
for a in actions2:
state = chess2.result(chess2.initial_state, a)
self.assertEqual('S', state.to_move)
self.assertEqual(len(chess2.initial_state.c_pieces + chess2.initial_state.s_pieces), len(state.c_pieces + state.s_pieces))
state = chess3.result(chess3.initial_state, actions3[0])
self.assertEqual('S', state.to_move)
self.assertEqual(len(chess3.initial_state.c_pieces + chess3.initial_state.s_pieces), len(state.c_pieces + state.s_pieces))
state = chess3.result(chess3.initial_state, actions3[1])
self.assertEqual('S', state.to_move)
self.assertEqual(len(chess3.initial_state.c_pieces + chess3.initial_state.s_pieces), len(state.c_pieces + state.s_pieces) + 1)
for a in actions4:
state = chess4.result(chess4.initial_state, a)
self.assertEqual('C', state.to_move)
self.assertEqual(len(chess4.initial_state.c_pieces + chess4.initial_state.s_pieces), len(state.c_pieces + state.s_pieces))
utility1 = minimax_decision(chess1.initial_state, chess1)
self.assertEqual(80 * 2, utility1[2])
chess2.initial_state.to_move = 'Circle'
utility2 = minimax_decision(chess2.initial_state, chess2)
self.assertEqual(-80 * 3 - 60 * 2, utility2[2])
utility3 = minimax_decision(chess3.initial_state, chess3)
self.assertEqual(80 - 80 * 100, utility3[2])
# utility4 = minimax_decision(chess4.initial_state, chess4)
# self.assertEqual(80 - 80 * 2, utility4)
def test_depth_limit1(self):
configuration1 = Configuration(path=None)
configuration1.generate_configuration_from_string(
"""Star
MINIMAX
2
0,S1,0,0,0,0,0,0
0,0,C1,0,0,0,0,0
0,0,0,S1,0,0,0,0
0,0,0,0,0,0,0,0
0,0,0,0,0,0,0,0
0,0,0,0,0,0,0,0
0,0,0,0,0,0,0,0
0,0,0,0,0,0,0,0
10,20,30,40,50,60,70,80
""")
chess1 = Chess(path=None, configuration=configuration1)
utility1 = minimax_decision(chess1.initial_state, chess1, configuration1.depth_limit)
self.assertEqual(((2, 3), (0, 1)), utility1[0])
self.assertEqual(160, utility1[1])
self.assertEqual(160, utility1[2])
self.assertEqual(5, utility1[3])
chess1.initial_state.to_move = configuration1.player
utility2 = alphabeta_cutoff_search(chess1.initial_state, chess1, configuration1.depth_limit)
self.assertEqual(utility1[0], utility2[0])
self.assertEqual(utility1[1], utility2[1])
self.assertEqual(utility1[2], utility2[2])
self.assertGreaterEqual(utility1[3], utility2[3])
def test_depth_limit2(self):
configuration1 = Configuration(path=None)
configuration1.generate_configuration_from_string(
"""Star
MINIMAX
9
0,S1,0,0,0,0,0,0
S1,0,0,0,0,0,0,0
0,0,0,0,0,0,0,C1
0,0,0,0,0,0,0,0
0,0,0,0,0,0,0,0
0,0,0,0,0,0,0,0
0,0,0,0,0,0,0,0
0,0,0,0,0,0,0,0
10,20,30,40,50,60,70,80
""")
chess1 = Chess(path=None, configuration=configuration1)
utility1 = minimax_decision(chess1.initial_state, chess1, configuration1.depth_limit)
self.assertEqual(((1, 0), (0, 1)), utility1[0])
self.assertEqual(130, utility1[1]) # myopic
self.assertEqual(90, utility1[2])
self.assertEqual(26, utility1[3])
chess1.initial_state.to_move = configuration1.player
utility2 = alphabeta_cutoff_search(chess1.initial_state, chess1, configuration1.depth_limit)
self.assertEqual(utility1[0], utility2[0])
self.assertEqual(utility1[1], utility2[1])
self.assertEqual(utility1[2], utility2[2])
self.assertGreaterEqual(utility1[3], utility2[3])
def test_depth_limit3(self):
configuration1 = Configuration(path=None)
configuration1.generate_configuration_from_string(
"""Circle
MINIMAX
2
0,S2,0,0,0,0,0,0
S1,0,0,0,0,0,0,0
0,0,0,0,0,0,0,C1
0,0,0,0,0,0,S1,0
0,0,0,0,0,S1,0,0
0,0,0,0,0,0,0,0
0,0,0,0,0,0,0,0
0,0,0,0,0,0,0,0
10,20,30,40,50,60,70,80""")
chess1 = Chess(path=None, configuration=configuration1)
utility1 = minimax_decision(chess1.initial_state, chess1, configuration1.depth_limit)
self.assertEqual('Noop', utility1[0])
self.assertEqual(-290, utility1[1]) # myopic
self.assertEqual(-300, utility1[2])
self.assertEqual(5, utility1[3])
chess1.initial_state.to_move = configuration1.player
utility2 = alphabeta_cutoff_search(chess1.initial_state, chess1, configuration1.depth_limit)
self.assertEqual(utility1[0], utility2[0])
self.assertEqual(utility1[1], utility2[1])
self.assertEqual(utility1[2], utility2[2])
self.assertGreaterEqual(utility1[3], utility2[3])
def test_depth_limit4(self):
configuration1 = Configuration(path=None)
configuration1.generate_configuration_from_string(
"""Star
MINIMAX
7
0,C1,0,C1,0,C1,0,C1
C1,0,C1,0,C1,0,C1,0
0,S1,0,S1,0,S1,0,S1
S1,0,S1,0,S1,0,S1,0
0,0,0,0,0,0,0,0
0,0,0,0,0,0,0,0
0,0,0,0,0,0,0,0
0,0,0,0,0,0,0,0
10,20,30,40,52,70,90,1000
""")
chess1 = Chess(path=None, configuration=configuration1)
utility1 = minimax_decision(chess1.initial_state, chess1, configuration1.depth_limit)
self.assertEqual((('Noop')), utility1[0])
self.assertEqual(368, utility1[1]) # myopic
self.assertEqual(368, utility1[2])
self.assertGreaterEqual(utility1[3], 3)
utility2 = alphabeta_cutoff_search(chess1.initial_state, chess1, configuration1.depth_limit)
self.assertEqual(utility1[0], utility2[0])
self.assertEqual(utility1[1], utility2[1])
self.assertEqual(utility1[2], utility2[2])
self.assertGreaterEqual(utility1[3], utility2[3])
def test_tie_break_star(self):
configuration1 = Configuration(path=None)
configuration1.generate_configuration_from_string(
"""Star
MINIMAX
7
0,0,0,0,0,0,0,0
0,S1,0,0,0,S1,0,0
0,0,0,0,0,0,0,0
0,0,0,0,0,0,0,0
0,0,0,0,0,0,0,0
0,0,0,0,0,0,0,0
0,0,0,0,0,0,0,0
0,0,0,0,0,0,0,C1
10,20,30,40,52,70,90,1000
""")
chess1 = Chess(path=None, configuration=configuration1)
utility1 = minimax_decision(chess1.initial_state, chess1, 1024)
self.assertEqual((((1, 1), (0, 0))), utility1[0])
self.assertEqual(1000 + 90 - 1000, utility1[1]) # myopic
self.assertEqual(1000, utility1[2])
self.assertEqual(utility1[3], 33)
utility2 = alphabeta_cutoff_search(chess1.initial_state, chess1, configuration1.depth_limit)
self.assertEqual(utility1[0], utility2[0])
self.assertEqual(utility1[1], utility2[1])
self.assertEqual(utility1[2], utility2[2])
self.assertGreaterEqual(utility1[3], 33)
def test_tie_break_circle(self):
configuration1 = Configuration(path=None)
configuration1.generate_configuration_from_string(
"""Circle
MINIMAX
7
0,0,0,0,0,0,S1,0
0,0,0,0,0,0,0,0
0,0,0,0,0,0,0,0
0,0,0,0,0,0,0,0
0,0,0,0,0,0,0,0
0,0,0,0,0,0,0,0
0,0,C1,0,0,C1,0,0
0,0,0,0,0,0,0,0
10,20,30,40,52,70,90,1000
""")
chess1 = Chess(path=None, configuration=configuration1)
utility1 = minimax_decision(chess1.initial_state, chess1, 1024)
self.assertEqual((((6, 2), (7, 1))), utility1[0])
self.assertEqual(1000 + 90 - 1000, utility1[1]) # myopic
self.assertEqual(1000, utility1[2])
self.assertEqual(utility1[3], 33)
utility2 = alphabeta_cutoff_search(chess1.initial_state, chess1, configuration1.depth_limit)
self.assertEqual(utility1[0], utility2[0])
self.assertEqual(utility1[1], utility2[1])
self.assertEqual(utility1[2], utility2[2])
self.assertGreaterEqual(utility1[3], 33)
def test_translate(self):
configuration1 = Configuration(path=None)
configuration1.generate_configuration_from_string(
"""Star
MINIMAX
7
0,C1,0,C1,0,C1,0,C1
C1,0,C1,0,C1,0,C1,0
0,S1,0,S1,0,S1,0,S1
S1,0,S1,0,S1,0,S1,0
0,0,0,0,0,0,0,0
0,0,0,0,0,0,0,0
0,0,0,0,0,0,0,0
0,0,0,0,0,0,0,0
10,20,30,40,52,70,90,1000
""")
chess1 = Chess(path=None, configuration=configuration1)
utility = (((1, 0), (0, 1)), 130, 90, 26)
self.assertEqual("""G1-H2
130
90
26
""", chess1.translate(utility))
def test_translate_no_action(self):
configuration1 = Configuration(path=None)
configuration1.generate_configuration_from_string(
"""Star
MINIMAX
7
0,C1,0,C1,0,C1,0,C1
C1,0,C1,0,C1,0,C1,0
0,S1,0,S1,0,S1,0,S1
S1,0,S1,0,S1,0,S1,0
0,0,0,0,0,0,0,0
0,0,0,0,0,0,0,0
0,0,0,0,0,0,0,0
0,0,0,0,0,0,0,0
10,20,30,40,52,70,90,1000""")
chess1 = Chess(path=None, configuration=configuration1)
utility = (('Noop'), 130, 90, 26)
self.assertEqual("""pass
130
90
26
""", chess1.translate(utility))
def test_integration_test1(self):
chess = Chess(path="../res/test_case_1.txt", configuration=None)
result = chess.translate(minimax_decision(game=chess, state=chess.initial_state, depth_limit=chess.config.depth_limit))
self.assertEqual("F4-H2\n160\n160\n5\n", result)
chess.write_to_file(result)
| 19,076
| 732
| 23
|
ab8bb7e5f5c38a866610aeab2dfd90be6b13a3ea
| 44,907
|
py
|
Python
|
redun/backends/db/dataflow.py
|
cclauss/redun
|
55792921b42b430571eafc30ab21eb50eb4f64b3
|
[
"Apache-2.0"
] | null | null | null |
redun/backends/db/dataflow.py
|
cclauss/redun
|
55792921b42b430571eafc30ab21eb50eb4f64b3
|
[
"Apache-2.0"
] | null | null | null |
redun/backends/db/dataflow.py
|
cclauss/redun
|
55792921b42b430571eafc30ab21eb50eb4f64b3
|
[
"Apache-2.0"
] | null | null | null |
"""
Dataflow visualization.
An upstream dataflow visualization explains the derivation of a value. Take
for example this dataflow visualization of the derivation of a VCF file
from a bioinformatic analysis:
```
value = File(path=sample4.vcf, hash=********)
value <-- <********> call_variants(bam, ref_genome)
bam = <********> File(path=sample4.bam, hash=********)
ref_genome = <********> File(path=ref_genome, hash=********)
bam <-- argument of <********> call_variants_all(bams, ref_genome)
<-- <********> align_reads_all(fastqs, ref_genome)
<-- <********> align_reads(fastq, ref_genome)
fastq = <********> File(path=sample4.fastq, hash=********)
ref_genome = <********> File(path=ref_genome, hash=********)
fastq <-- argument of <********> align_reads_all(fastqs, ref_genome)
<-- argument of <********> main(fastqs, ref_genome)
<-- origin
ref_genome <-- argument of <********> align_reads_all(fastqs, ref_genome)
<-- argument of <********> main(fastqs, ref_genome)
<-- origin
```
Hash values are indicated by * above. For reference, here is what the
workflow might have been:
```
@task()
def align_reads(fastq: File, ref_genome: File) -> File:
reads = cast(str, fastq.read())
ref = cast(str, ref_genome.read())
bam = File(fastq.path.replace("fastq", "bam"))
bam.write("align({}, {})".format(reads, ref))
return bam
@task()
def call_variants(bam: File, ref_genome: File) -> File:
align = cast(str, bam.read())
ref = cast(str, ref_genome.read())
vcf = File(bam.path.replace("bam", "vcf"))
vcf.write("calls({}, {})".format(align, ref))
return vcf
@task()
def align_reads_all(fastqs: List[File], ref_genome: File):
bams = [align_reads(fastq, ref_genome) for fastq in fastqs]
return bams
@task()
def call_variants_all(bams: List[File], ref_genome: File):
vcfs = [call_variants(bam, ref_genome) for bam in bams]
return vcfs
@task()
def main(fastqs: List[File], ref_genome: File):
bams = align_reads_all(fastqs, ref_genome)
vcfs = call_variants_all(bams, ref_genome)
return vcfs
```
A dataflow visualization consists of a series of paragraphs called
"dataflow sections" that describe how one of the values is derived. Here
is the section for the `bam` value:
```
bam <-- argument of <********> call_variants_all(bams, ref_genome)
<-- <********> align_reads_all(fastqs, ref_genome)
<-- <********> align_reads(fastq, ref_genome_2)
fastq = <********> File(path=sample4.fastq, hash=********)
ref_genome = <********> File(path=ref_genome, hash=********)
```
A section is made of three clauses: assignment, routing, and arguments.
The assignment clause indicates which CallNode produced this value:
```
bam <-- argument of <********> call_variants_all(bams, ref_genome)
```
Routing clauses, if present, describe a series of additional CallNodes
that "route" the value by passing via arguments from parent CallNode to child
CallNode, or by results from child CallNode to parent CallNode.
```
<-- result of <********> align_reads_all(fastqs, ref_genome)
<-- <********> align_reads(fastq, ref_genome_2)
```
Argument clauses define the value for each argument in the final CallNode.
```
fastq = <********> File(path=sample4.fastq, hash=********)
ref_genome = <********> File(path=ref_genome, hash=********)
```
To build this visualization, the following strategy is used:
- Given a starting value (e.g. a VCF file in the example above), walk the
CallGraph backwards (i.e. upstream) to determine relevant nodes. These are
call DataflowNodes, which are connected by DataflowEdges.
- DataflowEdges are then grouped into sections.
- Each section is then reorganized into a DataflowSectionDOM. A DataflowDOM
is the collection of DataflowSectionDOMs. The DOM(document object model) is
an intermediate representation that can be rendered in multiple ways.
- Once a DataflowDOM is created, it can either be rendered into a textual
format, or serialized into JSON for the web frontend.
"""
import ast
import re
from collections import defaultdict
from enum import Enum
from itertools import chain
from textwrap import dedent
from typing import (
Any,
Callable,
Dict,
Iterable,
Iterator,
List,
NamedTuple,
Optional,
Set,
Tuple,
TypeVar,
Union,
cast,
)
from redun.backends.db import Argument, CallNode, RedunBackendDb, Task, Value
from redun.utils import assert_never, trim_string
T = TypeVar("T")
REDUN_INTERNAL_TASKS = {
"redun.postprocess_script",
"redun.script",
}
def iter_unique(items: Iterable[T], key: Callable[[T], Any] = lambda x: x) -> Iterator[T]:
"""
Iterate through unique items.
"""
seen: Set[T] = set()
for item in items:
item_key = key(item)
if item_key not in seen:
yield item
seen.add(item_key)
class ArgumentValue(NamedTuple):
"""
A DataflowNode used for tracing one subvalue in an argument.
"""
argument: Argument
value: Value
class CallNodeValue(NamedTuple):
"""
A DataflowNode used for tracing one subvalue of a CallNode result.
"""
call_node: CallNode
value: Value
# There are several kinds of DataflowNodes.
DataflowNode = Union[ArgumentValue, CallNodeValue, CallNode, Value]
class DataflowEdge(NamedTuple):
"""
An edge in a Dataflow graph.
"""
src: DataflowNode
dest: Optional[DataflowNode]
# A grouping of DataflowEdges that are displayed as one "paragraph".
DataflowSection = List[DataflowEdge]
class DataflowSectionKind(Enum):
"""
Each dataflow section describes either a task call or a data manipulation.
"""
CALL = "call"
DATA = "data"
class DataflowAssign(NamedTuple):
"""
The assignment clause in a Dataflow DOM.
"""
var_name: str
prefix: str
node_display: str
node: Optional[DataflowNode]
class DataflowRouting(NamedTuple):
"""
A routing clause in a Dataflow DOM.
"""
prefix: str
node_display: str
node: Optional[DataflowNode]
class DataflowArg(NamedTuple):
"""
An argument clause in a Dataflow DOM.
"""
var_name: str
value: Value
class DataflowSectionDOM(NamedTuple):
"""
A section in Dataflow DOM.
"""
assign: DataflowAssign
routing: List[DataflowRouting]
args: List[DataflowArg]
# The top-level Dataflow DOM.
DataflowDOM = Iterable[DataflowSectionDOM]
def get_task_args(task: Task) -> List[str]:
"""
Returns list of argument names of a Task. Raises a SyntaxError if the task source code is not
properly formatted.
"""
# Since we don't currently record the name of positional arguments,
# we have to infer them from the source code.
code = ast.parse(dedent(task.source))
if not isinstance(code.body[0], ast.FunctionDef):
raise SyntaxError("Source code is not a properly formatted function.")
# Type ignore is needed since the AST lib does seem to use proper types
# everywhere.
return [arg.arg for arg in code.body[0].args.args] # type: ignore
def make_var_name(var_name_base: str, name2var: Dict[str, DataflowNode], suffix: int = 2) -> str:
"""
Generate a new variable using a unique suffix (e.g. myvar_2).
"""
# Strip numerical suffix.
var_name_base = re.sub(r"_\d+$", "", var_name_base)
if var_name_base not in name2var:
# This variable name is already unique.
return var_name_base
# Search increase suffixes until we find a unique variable name.
while True:
new_var_name = var_name_base + "_" + str(suffix)
if new_var_name not in name2var:
return new_var_name
suffix += 1
def get_default_arg_name(pos: int) -> str:
"""
Generate the default name for argument.
"""
return f"arg{pos}"
class DataflowVars:
"""
Manages variable names for nodes in a dataflow.
"""
def __getitem__(self, node: DataflowNode) -> str:
"""
Get a variable name for a DataflowNode.
"""
return self.var2name[node]
def __setitem__(self, node: DataflowNode, var_name: str) -> str:
"""
Set a new variable name for a DataflowNode.
"""
self.var2name[node] = var_name
self.name2var[var_name] = node
return var_name
def __contains__(self, node: DataflowNode) -> bool:
"""
Returns True if node has a variable name.
"""
return node in self.var2name
def get_task_args(self, task: Task) -> List[str]:
"""
Returns the parameter names of a Task.
"""
args = self.task2args.get(task)
if not args:
# Cache task arg names.
# TODO: Properly handle variadic args.
try:
args = self.task2args[task] = get_task_args(task)
except SyntaxError:
# The argument names cannot be properly parsed.
return []
return args
def new_var_name(
self, node: DataflowNode, base_var_name: Optional[str] = None
) -> Tuple[str, Optional[str]]:
"""
Get or create a new variable name for a DataflowNode.
"""
var_name = self.var2name.get(node)
if var_name:
# Node is already named.
return var_name, None
if not base_var_name:
# Autogenerate base var name from ArgumentValue.
assert isinstance(node, ArgumentValue)
argument, value = node
# Determine new variable name.
if argument.arg_key:
base_var_name = argument.arg_key
else:
arg_names = self.get_task_args(argument.call_node.task)
if not arg_names:
# Use default argument names.
base_var_name = get_default_arg_name(argument.arg_position)
else:
base_var_name = arg_names[argument.arg_position]
# Ensure variable name is unique.
var_name = make_var_name(base_var_name, self.name2var)
self[node] = var_name
return var_name, base_var_name
def walk_dataflow_value(backend: RedunBackendDb, value: Value) -> Iterator[DataflowEdge]:
"""
Iterates through the edges in the upstream dataflow graph of a Value.
"""
# Find upstream CallNodes.
# A value can be produced by many CallNodes and it can be a subvalue of
# a value produced from many CallNodes.
call_nodes = set(value.results) | {
call_node for parent in value.parents for call_node in parent.results
}
call_nodes = {call_node for call_node in call_nodes if not is_internal_task(call_node.task)}
# Determine which CallNodes are just routing CallNodes.
# A routing CallNode is an upstream CallNode that is also an ancestor
# of another upstream CallNode.
seen: Set[CallNode] = set()
routing_call_nodes = set()
for call_node in call_nodes:
for ancestor in walk_parents(call_node, seen):
if ancestor in call_nodes:
routing_call_nodes.add(ancestor)
break
# Determine originating CallNodes (upstream and non-routing).
originating_call_nodes = call_nodes - routing_call_nodes
# Prefer the most recent CallNodes.
start_time_call_nodes = [
(job.start_time, call_node)
for call_node in originating_call_nodes
for job in call_node.jobs
]
max_start_time = max((start_time for start_time, _ in start_time_call_nodes), default=None)
upstream_call_node = next(
(
call_node
for start_time, call_node in start_time_call_nodes
if start_time == max_start_time
),
None,
)
# Emit Value-CallNode edges in dataflow.
if upstream_call_node:
yield DataflowEdge(value, upstream_call_node)
else:
yield DataflowEdge(value, None)
def get_callnode_arguments(call_node: CallNode) -> List[Argument]:
"""
Returns a CallNode's arguments in sorted order.
"""
pos_args = []
kw_args = []
for arg in call_node.arguments:
if arg.arg_position is not None:
pos_args.append(arg)
else:
kw_args.append(arg)
pos_args.sort(key=lambda arg: arg.arg_position)
kw_args.sort(key=lambda arg: arg.arg_key)
return pos_args + kw_args
def walk_dataflow_callnode(backend: RedunBackendDb, call_node: CallNode) -> Iterator[DataflowEdge]:
"""
Iterates through the upstream Arguments of a CallNode.
"""
# Emit CallNode-ArgumentValue edges in dataflow.
# Reversing the arguments will lead to the traversal to be in original
# argument order, which is nicer for display.
arguments = get_callnode_arguments(call_node)
if arguments:
for argument in arguments:
yield DataflowEdge(call_node, ArgumentValue(argument, argument.value))
else:
# There are no arguments, this is an origin of the dataflow.
yield DataflowEdge(call_node, None)
def is_internal_task(task: Task) -> bool:
"""
Returns True if task is an internal redun task.
We skip such tasks in the dataflow to avoid clutter.
"""
return task.fullname in REDUN_INTERNAL_TASKS
def walk_dataflow_callnode_value(
backend: RedunBackendDb, call_node_value: CallNodeValue
) -> Iterator[DataflowEdge]:
"""
Iterates through the upstream dataflow edges of a CallNodeValue.
The edges either go deeper to the child CallNodes or stay with this CallNode.
"""
call_node, value = call_node_value
# Prefer the flow from children of call_node over call_node.
child_matches = [
child_node
for child_node in call_node.children
if not is_internal_task(child_node.task) and is_subvalue(value, child_node.value)
]
if len(child_matches) == 1:
# There is one obvious child CallNode that produced this value.
# Follow the dataflow through this child CallNode.
[child_node] = child_matches
yield DataflowEdge(call_node_value, CallNodeValue(child_node, value))
else:
# Otherwise, we follow the dataflow for the whole CallNode.
yield DataflowEdge(call_node_value, call_node)
def walk_dataflow_argument_value(
backend: RedunBackendDb, argument_value: ArgumentValue
) -> Iterator[DataflowEdge]:
"""
Iterates through the upstream dataflow edges of an ArgumentValue.
Edge types:
- ArgumentValue <-- CallNodeValue:
- Value came from the result of a CallNode.
- ArgumentValue <-- ArgumentValue
- Value came from argument of parent CallNode, i.e. argument-to-argument routing.
- ArgumentValue <-- Origin
- Value came directly from user or task.
"""
argument, value = argument_value
is_terminal = True
# Determine the most recent common parent CallNode of all the upstream CallNodes.
call_node_parents = [set(call_node.parents) for call_node in argument.upstream]
call_node_parents.append(set(argument.call_node.parents))
context_call_node = next(
iter(
sorted(
set.intersection(*call_node_parents),
key=lambda call_node: call_node.timestamp,
reverse=True,
)
),
None,
)
# Process upstream CallNodes in a consistent order (call_order).
if context_call_node:
# Use reverse order during emission to get correct order during display.
upstream_order = sorted(
[
(call_node, edge.call_order)
for call_node in argument.upstream
for edge in call_node.parent_edges
if edge.parent_node == context_call_node
],
key=lambda pair: pair[1],
reverse=True,
)
upstream = [call_node for call_node, _ in upstream_order]
else:
# Fallback if we can't determine context call node.
upstream = argument.upstream
# Emit upstream sibling CallNodes.
for call_node in iter_unique(upstream):
# Order subvalues for consistency.
subvalues = sorted(call_node.value.children, key=lambda child: child.value_hash)
result_values = iter_unique(chain([call_node.value], subvalues))
match = False
for result_value in result_values:
if value.value_hash == result_value.value_hash:
is_terminal = False
match = True
yield DataflowEdge(
argument_value,
CallNodeValue(call_node, result_value),
)
if not match:
# Default to emitting the whole result of the CallNode.
is_terminal = False
yield DataflowEdge(
argument_value,
CallNodeValue(call_node, call_node.value),
)
# Emit upstream argument from parent CallNode.
# Prefer most recent parent.
parent_call_nodes = sorted(
argument.call_node.parents, key=lambda parent: parent.timestamp, reverse=True
)
for parent_call_node in parent_call_nodes[:1]:
for parent_argument in parent_call_node.arguments:
parent_values = chain([parent_argument.value], parent_argument.value.children)
for parent_value in parent_values:
if value.value_hash == parent_value.value_hash:
is_terminal = False
yield DataflowEdge(
argument_value,
ArgumentValue(parent_argument, value),
)
# Emit terminal origin value.
if is_terminal:
yield DataflowEdge(argument_value, None)
def walk_dataflow_node(backend: RedunBackendDb, node: DataflowNode) -> Iterator[DataflowEdge]:
"""
Iterates through the upstream dataflow edges of a any DataflowNode.
"""
if isinstance(node, Value):
return walk_dataflow_value(backend, node)
elif isinstance(node, CallNode):
return walk_dataflow_callnode(backend, node)
elif isinstance(node, ArgumentValue):
return walk_dataflow_argument_value(backend, node)
elif isinstance(node, CallNodeValue):
return walk_dataflow_callnode_value(backend, node)
else:
assert_never(node)
def walk_dataflow(backend: RedunBackendDb, init_node: DataflowNode) -> Iterator[DataflowEdge]:
"""
Iterate through all the upstream dataflow edges of a 'node' in the CallGraph.
A 'node' can be a Value, CallNode, CallNodeValue, or an ArgumentValue.
"""
# Perform depth-first traversal.
queue: List[DataflowNode] = [init_node]
seen: Set[DataflowNode] = set()
while queue:
node: DataflowNode = queue.pop()
child_edges = list(walk_dataflow_node(backend, node))
yield from child_edges
# Reverse edges before pushing on to stack to maintain sibling order.
for edge in reversed(child_edges):
node2 = edge.dest
if node2 is None:
# Terminal node of type 'Origin'.
continue
# Determine whether dest node is unique and should be added to queue.
if node2 not in seen:
queue.append(node2)
seen.add(node2)
def get_section_edge_type(edge: DataflowEdge) -> str:
"""
Classifies a DataflowEdge.
"""
src, dest = edge
if (
isinstance(src, ArgumentValue)
and isinstance(dest, CallNodeValue)
and src.value.value_hash != dest.value.value_hash
):
return "data"
elif isinstance(src, CallNode) and isinstance(dest, ArgumentValue):
return "call_arg"
elif isinstance(src, (Value, CallNodeValue)) and isinstance(dest, CallNode):
return "call_result"
elif isinstance(src, CallNodeValue) and isinstance(dest, CallNodeValue):
return "call_result_routing"
elif isinstance(src, ArgumentValue) and isinstance(dest, CallNodeValue):
return "call_arg_result_routing"
elif isinstance(src, ArgumentValue) and isinstance(dest, ArgumentValue):
return "call_arg_routing"
elif isinstance(src, CallNode) and dest is None:
return "call_origin"
elif isinstance(src, ArgumentValue) and dest is None:
return "call_arg_origin"
elif isinstance(src, Value) and dest is None:
return "call_value_origin"
else:
raise AssertionError(f"Unknown edge type {edge}")
def toposort_edges(edges: Iterable[DataflowEdge]) -> Iterator[DataflowEdge]:
"""
Topologically sort DataflowEdges in depth-first order.
"""
# Compute indegree.
indegrees: Dict[DataflowNode, int] = defaultdict(int)
src2edge: Dict[DataflowNode, List[DataflowEdge]] = defaultdict(
cast(Callable[[], List[DataflowEdge]], list)
)
for edge in edges:
src2edge[edge.src].append(edge)
# Ensure every node is present in indegrees, including roots.
indegrees[edge.src]
if edge.dest:
indegrees[edge.dest] += 1
# Initialize queue with roots.
queue: List[DataflowNode] = [node for node, degree in indegrees.items() if degree == 0]
while queue:
node = queue.pop()
yield from src2edge[node]
# Reverse edges before pushing on stack in order to maintain sibling order.
for edge in reversed(src2edge[node]):
if edge.dest:
indegrees[edge.dest] -= 1
if indegrees[edge.dest] == 0:
# All parents have been visited, we can enqueue dest.
queue.append(edge.dest)
def rewrite_call_node_merges(edges: List[DataflowEdge]) -> List[DataflowEdge]:
"""
Rewrites dataflow graphs to enforce one CallNodeValue per CallNode.
This function identifies CallNode that have multilple parent CallNodeValues
like this:
ArgumentValue --> CallNodeValue --\
V
CallNode (merge_node)
^
ArgumentValue --> CallNodeValue --/
and rewrite them to unify the CallNodeValues like this:
ArgumentValue --\
V
CallNodeValue --> CallNode
^
ArgumentValue --/
"""
# Build graph dict.
src2edges: Dict[Optional[DataflowNode], List[DataflowEdge]] = defaultdict(list)
dest2edges = defaultdict(list)
nodes = []
for edge in edges:
nodes.append(edge.src)
src2edges[edge.src].append(edge)
if edge.dest:
dest2edges[edge.dest].append(edge)
# Find merge nodes.
merge_nodes = [
node for node, edges in dest2edges.items() if isinstance(node, CallNode) and len(edges) > 1
]
# Rewrite CallNode merge nodes.
for merge_node in merge_nodes:
# Find relevant edges and nodes.
cv_cn_edges = dest2edges[merge_node]
call_node_values = [edge.src for edge in cv_cn_edges]
upstream_edges = [
edge for call_node_value in call_node_values for edge in dest2edges[call_node_value]
]
upstream_nodes = list(iter_unique(edge.src for edge in upstream_edges))
old_edges = set(cv_cn_edges) | set(upstream_edges)
# Create new unified CallNodeValue from call_node_values.
unified_node = CallNodeValue(call_node=merge_node, value=merge_node.value)
# Create new edges.
new_edges = [
DataflowEdge(
src=upstream_node,
dest=unified_node,
)
for upstream_node in upstream_nodes
] + [DataflowEdge(src=unified_node, dest=merge_node)]
# Remove old edges from edges.
edges2 = [edge for edge in edges if edge not in old_edges]
# To keep edges in traversal order, insert new edges right before
# the first appearance of the merge_node.
insert_index = min(i for i, edge in enumerate(edges2) if edge.src == merge_node)
edges = edges2[:insert_index] + new_edges + edges2[insert_index:]
return edges
def iter_subsections(section: DataflowSection) -> Iterator[DataflowSection]:
"""
Determines if a section should be broken down into small sections.
In real life dataflows, there are some cases where the dataflow merges
such that the structure is a DAG, not just a tree. These merges represent a
value that was passed to two or more different tasks and then their outputs
eventually combine again, either into a single Value like a list or as arguments
into a common task. For example, `value` is a merge node in the upstream
dataflow of `result`.
value = task0()
output1 = task1(a=value)
output2 = task2(b=value)
result = task3(c=output1, d=output2)
The upstream dataflow graph of `result` is:
Value(result)
|
V
CallNode(task3) ---------------\
| |
V V
ArgumentValue(task3, key=a) ArgumentValue(task3, key=b)
| |
V V
CallNode(task1) CallNode(task2)
| |
V V
ArgumentValue(task1, key=c) ArgumentValue(task2, key=d)
| |
V |
CallNodeValue(task0, value) <--/
|
V
CallNode(task0)
|
V
Origin
The function `iter_dataflow_section()` will break this graph right after
every the `CallNode --> ArgumentValue` edge, resulting in three sections.
One of those sections will have a "merge node", `CallNode(task0)`:
ArgumentValue(task1, key=c) ArgumentValue(task2, key=d)
| |
V |
CallNodeValue(task0, value) <--/
|
V
CallNode(task0)
|
V
Origin
This function will further break this section into subsections like this:
Subsection 1:
ArgumentValue(task1, key=c)
|
V
CallNodeValue(task0, value)
Subsection 2:
ArgumentValue(task2, key=d)
|
V
CallNodeValue(task0, value)
Subsection 3:
CallNodeValue(task0, value)
|
V
CallNode(task0)
|
V
Origin
Ultimately these three subsections get rendered as:
c <-- c_2
d <-- c_2
c_2 <-- task()
<-- origin
"""
# Build graph dict.
src2edges: Dict[Optional[DataflowNode], List[DataflowEdge]] = defaultdict(list)
dest2edges = defaultdict(list)
nodes = []
for edge in section:
nodes.append(edge.src)
src2edges[edge.src].append(edge)
if edge.dest:
dest2edges[edge.dest].append(edge)
# Find roots. Maintain order of appearence in section.
roots = [node for node in iter_unique(nodes) if len(dest2edges[node]) == 0]
# Find merge nodes.
merge_nodes = [node for node, edges in dest2edges.items() if len(edges) > 1]
if not merge_nodes:
# No merge nodes. Keep section as is.
yield section
return
# Determine subsections.
subsection: DataflowSection = []
node: Optional[DataflowNode]
for node in roots + merge_nodes:
while True:
next_edges = src2edges[node]
if len(next_edges) != 1:
# We have hit the end of the section.
# There are either no more edges, or we hitting the arguments.
subsection.extend(next_edges)
yield subsection
subsection = []
break
# Path should always be linear.
[edge] = next_edges
subsection.append(edge)
if edge.dest not in merge_nodes:
# Follow edge.
node = edge.dest
else:
# We have hit the merge node, stop.
yield subsection
subsection = []
break
def iter_dataflow_sections(
dataflow_edges: Iterable[DataflowEdge],
) -> Iterator[Tuple[DataflowSectionKind, DataflowSection]]:
"""
Yields dataflow sections from an iterable of dataflow edges.
A dataflow section is a group of edges representing one 'paragraph' in a
dataflow display.
value <-- <1234abcd> call_node(arg1, arg2)
<-- result of <2345abcd> call_node2(arg3, arg4)
arg3 = <3456abcd> 'hello_world'
arg4 = <4567abcd> File('foo.txt')
"""
node2call_section: Dict[DataflowNode, List[Tuple[int, DataflowEdge]]] = {}
node2data_section: Dict[DataflowNode, List[Tuple[int, DataflowEdge]]] = defaultdict(
cast(Callable[[], List[Tuple[int, DataflowEdge]]], list)
)
new_vars: Set[ArgumentValue] = set()
section: List[Tuple[int, DataflowEdge]]
edges = toposort_edges(dataflow_edges)
edge_list = rewrite_call_node_merges(list(edges))
# Group dataflow edges into display sections.
# Retain the appearance order of each edge so we can sort sections later.
for i, edge in enumerate(edge_list):
edge_type = get_section_edge_type(edge)
if edge_type == "data":
# Edge is a data section edge.
node2data_section[edge.src].append((i, edge))
continue
if edge_type == "call_arg":
# New variable.
assert isinstance(edge.dest, ArgumentValue)
new_vars.add(edge.dest)
if edge.src in new_vars:
# src is a new variable so we start a new section.
section = [(i, edge)]
else:
# Get or create section associated with src and add this edge.
if edge.src not in node2call_section:
section = node2call_section[edge.src] = []
else:
section = node2call_section[edge.src]
section.append((i, edge))
# Get section associated with dest and union with src section.
if edge.dest:
if edge.dest not in node2call_section:
# dest_section same as src.
node2call_section[edge.dest] = section
else:
# Union dest_section with section.
# Although this extend might have dups, I use iter_unique to clean it up.
dest_section = node2call_section[edge.dest]
dest_section.extend(section)
node2call_section[edge.src] = dest_section
# Get unique sections.
call_sections = iter_unique(node2call_section.values(), key=id)
data_sections = node2data_section.values()
def get_order_section(
int_edges: List[Tuple[int, DataflowEdge]]
) -> Tuple[int, List[DataflowEdge]]:
"""
Returns a tuple of section appearance order and the section.
The appearance order of a section is the maximum order of its edges.
We also clean up any duplicate edges that may have been added to the section.
"""
ints = []
section = []
for i, edge in int_edges:
ints.append(i)
section.append(edge)
return (max(ints), list(iter_unique(section)))
# Label each section with its type ("call" or "data") and determine section order.
sections = [
(DataflowSectionKind.CALL,) + get_order_section(int_edges) for int_edges in call_sections
]
sections.extend(
(DataflowSectionKind.DATA,) + get_order_section(int_edges) for int_edges in data_sections
)
# Sort sections.
sections = sorted(sections, key=lambda row: row[1])
# Yield sections.
for kind, _, section2 in sections:
if kind == DataflowSectionKind.CALL:
# If there is path merging, then we will emit multiple sections.
for subsection in iter_subsections(section2):
yield (kind, subsection)
else:
yield (kind, section2)
def make_section_dom(
section: DataflowSection,
dataflow_vars: DataflowVars,
new_varname: str = "value",
) -> DataflowSectionDOM:
"""
Returns DOM for a dataflow section.
"""
# Determine assign information from first edge in section.
src, assign_node = section[0]
if isinstance(src, Value):
# Start new variable.
assign_var_name = dataflow_vars[src] = new_varname
elif isinstance(src, (ArgumentValue, CallNodeValue)):
# Value should be named already.
assign_var_name = dataflow_vars[src]
else:
assert not isinstance(src, CallNode)
assert_never(src)
routing_defs: List[Optional[DataflowNode]] = []
arg_defs: List[Tuple[str, Value]] = []
renames: Dict[str, str] = {}
# Compute whether this section ends with a variable.
is_var2var = isinstance(assign_node, (ArgumentValue, CallNodeValue))
last_routing_node = None
# Process remaining edges.
for src, dest in section[1:]:
if isinstance(src, CallNode) and isinstance(dest, ArgumentValue):
# Argument definition edge.
var_name, base_var_name = dataflow_vars.new_var_name(dest)
if base_var_name:
renames[base_var_name] = var_name
arg_defs.append((var_name, dest.value))
else:
# Routing edge.
last_routing_node = dest
if isinstance(dest, CallNode):
is_var2var = False
# Skip unnecessary routing edge, ignore.
if not (isinstance(src, CallNodeValue) and isinstance(dest, CallNode)):
routing_defs.append(dest)
# The last routing def or assign (if there are no routing defs) needs
# to rename its variables to be unique.
last_line = len(routing_defs)
# Create assign clause.
if is_var2var and not routing_defs:
# Assignment is a var2var.
assert assign_node
# Name the last node after the first node, if it doesn't have a name yet.
base_name = dataflow_vars[section[0].src]
var_name, _ = dataflow_vars.new_var_name(assign_node, base_name)
dom_assign = DataflowAssign(assign_var_name, "", var_name, assign_node)
else:
prefix, node_display = display_node(assign_node, renames if last_line == 0 else {})
dom_assign = DataflowAssign(assign_var_name, prefix, node_display, assign_node)
# Create routing clauses.
dom_routing: List[DataflowRouting] = []
for i, dest in enumerate(routing_defs, 1):
prefix, node_display = display_node(dest, renames if i == last_line else {})
dom_routing.append(DataflowRouting(prefix, node_display, dest))
# If last routing node is a Value, as opposed to a CallNode, then this was a
# merge node and we should end with a new variable.
if is_var2var and isinstance(last_routing_node, (ArgumentValue, CallNodeValue)):
# This is a merge node, give it a variable name.
assert not arg_defs
# Name the last node after the first node, if it doesn't have a name yet.
base_name = dataflow_vars[section[0].src]
var_name, _ = dataflow_vars.new_var_name(last_routing_node, base_name)
dom_routing.append(DataflowRouting("", var_name, last_routing_node))
# Create argument definitions.
dom_args: List[DataflowArg] = [DataflowArg(var_name, var) for var_name, var in arg_defs]
return DataflowSectionDOM(dom_assign, dom_routing, dom_args)
def make_data_section_dom(
section: DataflowSection,
dataflow_vars: DataflowVars,
new_varname: str = "value",
) -> DataflowSectionDOM:
"""
Returns a DOM for a data section.
A data section describes how one value was constructed from several subvalues.
For example this dataflow section:
parent_value <-- derives from
subvalue1 = File(path='file1')
subvalue2 = File(path='file2')
corresponds to this kind of code:
subvalue1 = task1()
subvalue2 = task2()
parent_value = [subvalue1, subvalue2]
task3(parent_value)
"""
downstream_nodes: Set[DataflowNode] = set()
upstream_nodes: List[CallNodeValue] = []
for edge in section:
assert isinstance(edge.dest, CallNodeValue)
downstream_nodes.add(edge.src)
upstream_nodes.append(edge.dest)
# Determine assign clause.
# We only support one downstream node currently.
[downstream_node] = downstream_nodes
assign_var_name = dataflow_vars[downstream_node]
# Determine arg clauses.
args = []
for upstream_node in iter_unique(upstream_nodes):
call_node, value = upstream_node
# Ensure variable name is unique.
base_var_name = upstream_node.call_node.task.name + "_result"
new_var_name, _ = dataflow_vars.new_var_name(upstream_node, base_var_name)
args.append(DataflowArg(new_var_name, value))
return DataflowSectionDOM(
assign=DataflowAssign(
var_name=assign_var_name, prefix="", node_display="derives from", node=None
),
routing=[],
args=args,
)
def make_dataflow_dom(
dataflow_edges: Iterable[DataflowEdge], new_varname: str = "value"
) -> Iterable[DataflowSectionDOM]:
"""
Yields dataflow section DOMs from an iterable of dataflow edges.
It also performs variable renaming to give every value a unique variable name.
"""
dataflow_vars = DataflowVars()
for kind, section in iter_dataflow_sections(dataflow_edges):
if kind == DataflowSectionKind.CALL:
yield make_section_dom(section, dataflow_vars, new_varname=new_varname)
elif kind == DataflowSectionKind.DATA:
yield make_data_section_dom(section, dataflow_vars, new_varname=new_varname)
else:
raise NotImplementedError(f"Unknown kind '{kind}'.")
def get_dataflow_call_node(node: Optional[DataflowNode]) -> Optional[CallNode]:
"""
Returns the CallNode for a DataflowNode.
"""
if isinstance(node, CallNode):
return node
elif isinstance(node, ArgumentValue):
return node.argument.call_node
elif isinstance(node, CallNodeValue):
return node.call_node
elif node is None:
return None
else:
assert_never(node)
def get_node_hash(node: Optional[DataflowNode]) -> Optional[str]:
"""
Formats hash for a DataflowNode.
"""
if isinstance(node, Value):
return node.value_hash
call_node = get_dataflow_call_node(node)
if call_node:
return call_node.call_hash
return None
def display_node(node: Optional[DataflowNode], renames: Dict[str, str]) -> Tuple[str, str]:
"""
Formats a dataflow node to a string.
"""
if isinstance(node, CallNode):
return ("", display_call_node(node, renames))
elif isinstance(node, ArgumentValue):
return (
"argument of",
display_call_node(node.argument.call_node, renames),
)
elif isinstance(node, CallNodeValue):
return ("", display_call_node(node.call_node, renames))
elif node is None:
return ("", "origin")
else:
assert_never(node)
def display_call_node(call_node: CallNode, renames: Dict[str, str]) -> str:
"""
Formats a CallNode to a string.
"""
try:
arg_names = get_task_args(call_node.task)
except SyntaxError:
arg_names = [get_default_arg_name(i) for i in range(len(call_node.arguments))]
args = [renames.get(arg, arg) for arg in arg_names]
return "{task_name}({args})".format(task_name=call_node.task.name, args=", ".join(args))
def display_value(value: Value) -> str:
"""
Format a Value to a string.
"""
return trim_string(repr(value.value_parsed))
def display_hash(node: Optional[DataflowNode]) -> str:
"""
Formats hash for a DataflowNode.
"""
node_hash = get_node_hash(node)
if node_hash:
return "<{}> ".format(node_hash[:8])
else:
return ""
def display_section(dom: DataflowSectionDOM) -> Iterator[str]:
"""
Yields lines for displaying a dataflow section DOM.
"""
# Display assign line.
assign = dom.assign
yield "{var_name} <-- {prefix}{value_hash}{value}".format(
var_name=assign.var_name,
prefix=assign.prefix + " " if assign.prefix else "",
value_hash=display_hash(assign.node),
value=assign.node_display,
)
indent = len(assign.var_name) + 1
# Display routing.
for routing_def in dom.routing:
yield "{indent}<-- {prefix}{node_hash}{node}".format(
indent=" " * indent,
prefix=routing_def.prefix + " " if routing_def.prefix else "",
node_hash=display_hash(routing_def.node),
node=routing_def.node_display,
)
# Display argument definitions.
if dom.args:
max_var_len = max(len(arg.var_name) for arg in dom.args)
for arg in dom.args:
yield " {var_name}{padding} = <{value_hash}> {var}".format(
var_name=arg.var_name,
padding=" " * (max_var_len - len(arg.var_name)),
value_hash=arg.value.value_hash[:8],
var=display_value(arg.value),
)
def display_dataflow(dom: DataflowDOM) -> Iterator[str]:
"""
Yields for lines displaying a dataflow DOM.
"""
for dom_section in dom:
yield from display_section(dom_section)
yield ""
def serialize_section(dom: DataflowSectionDOM) -> dict:
"""
Serialize a DataflowSectionDOM to JSON.
"""
# Serialize assignment.
assign_hash: Optional[str]
if isinstance(dom.assign.node, Value):
assign_hash = dom.assign.node.value_hash
assign_job_id = None
assign_execution_id = None
elif dom.assign.node:
call_node = get_dataflow_call_node(dom.assign.node)
assert call_node
assign_hash = call_node.call_hash
# Get latest job of call_node.
assign_job = sorted(call_node.jobs, key=lambda job: job.start_time, reverse=True)[0]
assign_job_id = assign_job.id
assign_execution_id = assign_job.execution.id
else:
assign_hash = None
assign_job_id = None
assign_execution_id = None
# Serialize routing.
routing = []
for routing_def in dom.routing:
call_node = get_dataflow_call_node(routing_def.node)
if call_node:
call_hash: Optional[str] = call_node.call_hash
# Get latest job of call_node.
job = sorted(call_node.jobs, key=lambda job: job.start_time, reverse=True)[0]
job_id: Optional[str] = job.id
execution_id: Optional[str] = job.execution.id
else:
call_hash = None
job_id = None
execution_id = None
routing.append(
{
"prefix": routing_def.prefix,
"hash": call_hash,
"display": routing_def.node_display,
"job_id": job_id,
"execution_id": execution_id,
}
)
# Serialize arguments.
args = [
{
"var_name": arg.var_name,
"hash": arg.value.value_hash,
"value_display": display_value(arg.value),
}
for arg in dom.args
]
return {
"assign": {
"var_name": dom.assign.var_name,
"prefix": dom.assign.prefix,
"display": dom.assign.node_display,
"hash": assign_hash,
"job_id": assign_job_id,
"execution_id": assign_execution_id,
},
"routing": routing,
"args": args,
}
def serialize_dataflow(dom: DataflowDOM) -> Iterator[dict]:
"""
Serialize DataflowDOM to JSON.
"""
for dom_section in dom:
yield serialize_section(dom_section)
| 31.939545
| 99
| 0.628944
|
"""
Dataflow visualization.
An upstream dataflow visualization explains the derivation of a value. Take
for example this dataflow visualization of the derivation of a VCF file
from a bioinformatic analysis:
```
value = File(path=sample4.vcf, hash=********)
value <-- <********> call_variants(bam, ref_genome)
bam = <********> File(path=sample4.bam, hash=********)
ref_genome = <********> File(path=ref_genome, hash=********)
bam <-- argument of <********> call_variants_all(bams, ref_genome)
<-- <********> align_reads_all(fastqs, ref_genome)
<-- <********> align_reads(fastq, ref_genome)
fastq = <********> File(path=sample4.fastq, hash=********)
ref_genome = <********> File(path=ref_genome, hash=********)
fastq <-- argument of <********> align_reads_all(fastqs, ref_genome)
<-- argument of <********> main(fastqs, ref_genome)
<-- origin
ref_genome <-- argument of <********> align_reads_all(fastqs, ref_genome)
<-- argument of <********> main(fastqs, ref_genome)
<-- origin
```
Hash values are indicated by * above. For reference, here is what the
workflow might have been:
```
@task()
def align_reads(fastq: File, ref_genome: File) -> File:
reads = cast(str, fastq.read())
ref = cast(str, ref_genome.read())
bam = File(fastq.path.replace("fastq", "bam"))
bam.write("align({}, {})".format(reads, ref))
return bam
@task()
def call_variants(bam: File, ref_genome: File) -> File:
align = cast(str, bam.read())
ref = cast(str, ref_genome.read())
vcf = File(bam.path.replace("bam", "vcf"))
vcf.write("calls({}, {})".format(align, ref))
return vcf
@task()
def align_reads_all(fastqs: List[File], ref_genome: File):
bams = [align_reads(fastq, ref_genome) for fastq in fastqs]
return bams
@task()
def call_variants_all(bams: List[File], ref_genome: File):
vcfs = [call_variants(bam, ref_genome) for bam in bams]
return vcfs
@task()
def main(fastqs: List[File], ref_genome: File):
bams = align_reads_all(fastqs, ref_genome)
vcfs = call_variants_all(bams, ref_genome)
return vcfs
```
A dataflow visualization consists of a series of paragraphs called
"dataflow sections" that describe how one of the values is derived. Here
is the section for the `bam` value:
```
bam <-- argument of <********> call_variants_all(bams, ref_genome)
<-- <********> align_reads_all(fastqs, ref_genome)
<-- <********> align_reads(fastq, ref_genome_2)
fastq = <********> File(path=sample4.fastq, hash=********)
ref_genome = <********> File(path=ref_genome, hash=********)
```
A section is made of three clauses: assignment, routing, and arguments.
The assignment clause indicates which CallNode produced this value:
```
bam <-- argument of <********> call_variants_all(bams, ref_genome)
```
Routing clauses, if present, describe a series of additional CallNodes
that "route" the value by passing via arguments from parent CallNode to child
CallNode, or by results from child CallNode to parent CallNode.
```
<-- result of <********> align_reads_all(fastqs, ref_genome)
<-- <********> align_reads(fastq, ref_genome_2)
```
Argument clauses define the value for each argument in the final CallNode.
```
fastq = <********> File(path=sample4.fastq, hash=********)
ref_genome = <********> File(path=ref_genome, hash=********)
```
To build this visualization, the following strategy is used:
- Given a starting value (e.g. a VCF file in the example above), walk the
CallGraph backwards (i.e. upstream) to determine relevant nodes. These are
call DataflowNodes, which are connected by DataflowEdges.
- DataflowEdges are then grouped into sections.
- Each section is then reorganized into a DataflowSectionDOM. A DataflowDOM
is the collection of DataflowSectionDOMs. The DOM(document object model) is
an intermediate representation that can be rendered in multiple ways.
- Once a DataflowDOM is created, it can either be rendered into a textual
format, or serialized into JSON for the web frontend.
"""
import ast
import re
from collections import defaultdict
from enum import Enum
from itertools import chain
from textwrap import dedent
from typing import (
Any,
Callable,
Dict,
Iterable,
Iterator,
List,
NamedTuple,
Optional,
Set,
Tuple,
TypeVar,
Union,
cast,
)
from redun.backends.db import Argument, CallNode, RedunBackendDb, Task, Value
from redun.utils import assert_never, trim_string
T = TypeVar("T")
REDUN_INTERNAL_TASKS = {
"redun.postprocess_script",
"redun.script",
}
def iter_unique(items: Iterable[T], key: Callable[[T], Any] = lambda x: x) -> Iterator[T]:
"""
Iterate through unique items.
"""
seen: Set[T] = set()
for item in items:
item_key = key(item)
if item_key not in seen:
yield item
seen.add(item_key)
class ArgumentValue(NamedTuple):
"""
A DataflowNode used for tracing one subvalue in an argument.
"""
argument: Argument
value: Value
class CallNodeValue(NamedTuple):
"""
A DataflowNode used for tracing one subvalue of a CallNode result.
"""
call_node: CallNode
value: Value
# There are several kinds of DataflowNodes.
DataflowNode = Union[ArgumentValue, CallNodeValue, CallNode, Value]
class DataflowEdge(NamedTuple):
"""
An edge in a Dataflow graph.
"""
src: DataflowNode
dest: Optional[DataflowNode]
# A grouping of DataflowEdges that are displayed as one "paragraph".
DataflowSection = List[DataflowEdge]
class DataflowSectionKind(Enum):
"""
Each dataflow section describes either a task call or a data manipulation.
"""
CALL = "call"
DATA = "data"
class DataflowAssign(NamedTuple):
"""
The assignment clause in a Dataflow DOM.
"""
var_name: str
prefix: str
node_display: str
node: Optional[DataflowNode]
class DataflowRouting(NamedTuple):
"""
A routing clause in a Dataflow DOM.
"""
prefix: str
node_display: str
node: Optional[DataflowNode]
class DataflowArg(NamedTuple):
"""
An argument clause in a Dataflow DOM.
"""
var_name: str
value: Value
class DataflowSectionDOM(NamedTuple):
"""
A section in Dataflow DOM.
"""
assign: DataflowAssign
routing: List[DataflowRouting]
args: List[DataflowArg]
# The top-level Dataflow DOM.
DataflowDOM = Iterable[DataflowSectionDOM]
def get_task_args(task: Task) -> List[str]:
"""
Returns list of argument names of a Task. Raises a SyntaxError if the task source code is not
properly formatted.
"""
# Since we don't currently record the name of positional arguments,
# we have to infer them from the source code.
code = ast.parse(dedent(task.source))
if not isinstance(code.body[0], ast.FunctionDef):
raise SyntaxError("Source code is not a properly formatted function.")
# Type ignore is needed since the AST lib does seem to use proper types
# everywhere.
return [arg.arg for arg in code.body[0].args.args] # type: ignore
def make_var_name(var_name_base: str, name2var: Dict[str, DataflowNode], suffix: int = 2) -> str:
"""
Generate a new variable using a unique suffix (e.g. myvar_2).
"""
# Strip numerical suffix.
var_name_base = re.sub(r"_\d+$", "", var_name_base)
if var_name_base not in name2var:
# This variable name is already unique.
return var_name_base
# Search increase suffixes until we find a unique variable name.
while True:
new_var_name = var_name_base + "_" + str(suffix)
if new_var_name not in name2var:
return new_var_name
suffix += 1
def get_default_arg_name(pos: int) -> str:
"""
Generate the default name for argument.
"""
return f"arg{pos}"
class DataflowVars:
"""
Manages variable names for nodes in a dataflow.
"""
def __init__(self):
self.var2name: Dict[DataflowNode, str] = {}
self.name2var: Dict[str, DataflowNode] = {}
self.task2args: Dict[Task, List[str]] = {}
def __getitem__(self, node: DataflowNode) -> str:
"""
Get a variable name for a DataflowNode.
"""
return self.var2name[node]
def __setitem__(self, node: DataflowNode, var_name: str) -> str:
"""
Set a new variable name for a DataflowNode.
"""
self.var2name[node] = var_name
self.name2var[var_name] = node
return var_name
def __contains__(self, node: DataflowNode) -> bool:
"""
Returns True if node has a variable name.
"""
return node in self.var2name
def get_task_args(self, task: Task) -> List[str]:
"""
Returns the parameter names of a Task.
"""
args = self.task2args.get(task)
if not args:
# Cache task arg names.
# TODO: Properly handle variadic args.
try:
args = self.task2args[task] = get_task_args(task)
except SyntaxError:
# The argument names cannot be properly parsed.
return []
return args
def new_var_name(
self, node: DataflowNode, base_var_name: Optional[str] = None
) -> Tuple[str, Optional[str]]:
"""
Get or create a new variable name for a DataflowNode.
"""
var_name = self.var2name.get(node)
if var_name:
# Node is already named.
return var_name, None
if not base_var_name:
# Autogenerate base var name from ArgumentValue.
assert isinstance(node, ArgumentValue)
argument, value = node
# Determine new variable name.
if argument.arg_key:
base_var_name = argument.arg_key
else:
arg_names = self.get_task_args(argument.call_node.task)
if not arg_names:
# Use default argument names.
base_var_name = get_default_arg_name(argument.arg_position)
else:
base_var_name = arg_names[argument.arg_position]
# Ensure variable name is unique.
var_name = make_var_name(base_var_name, self.name2var)
self[node] = var_name
return var_name, base_var_name
def walk_dataflow_value(backend: RedunBackendDb, value: Value) -> Iterator[DataflowEdge]:
"""
Iterates through the edges in the upstream dataflow graph of a Value.
"""
# Find upstream CallNodes.
# A value can be produced by many CallNodes and it can be a subvalue of
# a value produced from many CallNodes.
call_nodes = set(value.results) | {
call_node for parent in value.parents for call_node in parent.results
}
call_nodes = {call_node for call_node in call_nodes if not is_internal_task(call_node.task)}
def walk_parents(node: CallNode, seen: set) -> Iterator[CallNode]:
for parent in node.parents:
if parent not in seen:
yield parent
seen.add(parent)
yield from walk_parents(parent, seen)
# Determine which CallNodes are just routing CallNodes.
# A routing CallNode is an upstream CallNode that is also an ancestor
# of another upstream CallNode.
seen: Set[CallNode] = set()
routing_call_nodes = set()
for call_node in call_nodes:
for ancestor in walk_parents(call_node, seen):
if ancestor in call_nodes:
routing_call_nodes.add(ancestor)
break
# Determine originating CallNodes (upstream and non-routing).
originating_call_nodes = call_nodes - routing_call_nodes
# Prefer the most recent CallNodes.
start_time_call_nodes = [
(job.start_time, call_node)
for call_node in originating_call_nodes
for job in call_node.jobs
]
max_start_time = max((start_time for start_time, _ in start_time_call_nodes), default=None)
upstream_call_node = next(
(
call_node
for start_time, call_node in start_time_call_nodes
if start_time == max_start_time
),
None,
)
# Emit Value-CallNode edges in dataflow.
if upstream_call_node:
yield DataflowEdge(value, upstream_call_node)
else:
yield DataflowEdge(value, None)
def get_callnode_arguments(call_node: CallNode) -> List[Argument]:
"""
Returns a CallNode's arguments in sorted order.
"""
pos_args = []
kw_args = []
for arg in call_node.arguments:
if arg.arg_position is not None:
pos_args.append(arg)
else:
kw_args.append(arg)
pos_args.sort(key=lambda arg: arg.arg_position)
kw_args.sort(key=lambda arg: arg.arg_key)
return pos_args + kw_args
def walk_dataflow_callnode(backend: RedunBackendDb, call_node: CallNode) -> Iterator[DataflowEdge]:
"""
Iterates through the upstream Arguments of a CallNode.
"""
# Emit CallNode-ArgumentValue edges in dataflow.
# Reversing the arguments will lead to the traversal to be in original
# argument order, which is nicer for display.
arguments = get_callnode_arguments(call_node)
if arguments:
for argument in arguments:
yield DataflowEdge(call_node, ArgumentValue(argument, argument.value))
else:
# There are no arguments, this is an origin of the dataflow.
yield DataflowEdge(call_node, None)
def is_internal_task(task: Task) -> bool:
"""
Returns True if task is an internal redun task.
We skip such tasks in the dataflow to avoid clutter.
"""
return task.fullname in REDUN_INTERNAL_TASKS
def walk_dataflow_callnode_value(
backend: RedunBackendDb, call_node_value: CallNodeValue
) -> Iterator[DataflowEdge]:
"""
Iterates through the upstream dataflow edges of a CallNodeValue.
The edges either go deeper to the child CallNodes or stay with this CallNode.
"""
call_node, value = call_node_value
def is_subvalue(query_value: Value, target_value: Value) -> bool:
value_hashes = chain(
[target_value.value_hash], (child.value_hash for child in target_value.children)
)
return query_value.value_hash in value_hashes
# Prefer the flow from children of call_node over call_node.
child_matches = [
child_node
for child_node in call_node.children
if not is_internal_task(child_node.task) and is_subvalue(value, child_node.value)
]
if len(child_matches) == 1:
# There is one obvious child CallNode that produced this value.
# Follow the dataflow through this child CallNode.
[child_node] = child_matches
yield DataflowEdge(call_node_value, CallNodeValue(child_node, value))
else:
# Otherwise, we follow the dataflow for the whole CallNode.
yield DataflowEdge(call_node_value, call_node)
def walk_dataflow_argument_value(
backend: RedunBackendDb, argument_value: ArgumentValue
) -> Iterator[DataflowEdge]:
"""
Iterates through the upstream dataflow edges of an ArgumentValue.
Edge types:
- ArgumentValue <-- CallNodeValue:
- Value came from the result of a CallNode.
- ArgumentValue <-- ArgumentValue
- Value came from argument of parent CallNode, i.e. argument-to-argument routing.
- ArgumentValue <-- Origin
- Value came directly from user or task.
"""
argument, value = argument_value
is_terminal = True
# Determine the most recent common parent CallNode of all the upstream CallNodes.
call_node_parents = [set(call_node.parents) for call_node in argument.upstream]
call_node_parents.append(set(argument.call_node.parents))
context_call_node = next(
iter(
sorted(
set.intersection(*call_node_parents),
key=lambda call_node: call_node.timestamp,
reverse=True,
)
),
None,
)
# Process upstream CallNodes in a consistent order (call_order).
if context_call_node:
# Use reverse order during emission to get correct order during display.
upstream_order = sorted(
[
(call_node, edge.call_order)
for call_node in argument.upstream
for edge in call_node.parent_edges
if edge.parent_node == context_call_node
],
key=lambda pair: pair[1],
reverse=True,
)
upstream = [call_node for call_node, _ in upstream_order]
else:
# Fallback if we can't determine context call node.
upstream = argument.upstream
# Emit upstream sibling CallNodes.
for call_node in iter_unique(upstream):
# Order subvalues for consistency.
subvalues = sorted(call_node.value.children, key=lambda child: child.value_hash)
result_values = iter_unique(chain([call_node.value], subvalues))
match = False
for result_value in result_values:
if value.value_hash == result_value.value_hash:
is_terminal = False
match = True
yield DataflowEdge(
argument_value,
CallNodeValue(call_node, result_value),
)
if not match:
# Default to emitting the whole result of the CallNode.
is_terminal = False
yield DataflowEdge(
argument_value,
CallNodeValue(call_node, call_node.value),
)
# Emit upstream argument from parent CallNode.
# Prefer most recent parent.
parent_call_nodes = sorted(
argument.call_node.parents, key=lambda parent: parent.timestamp, reverse=True
)
for parent_call_node in parent_call_nodes[:1]:
for parent_argument in parent_call_node.arguments:
parent_values = chain([parent_argument.value], parent_argument.value.children)
for parent_value in parent_values:
if value.value_hash == parent_value.value_hash:
is_terminal = False
yield DataflowEdge(
argument_value,
ArgumentValue(parent_argument, value),
)
# Emit terminal origin value.
if is_terminal:
yield DataflowEdge(argument_value, None)
def walk_dataflow_node(backend: RedunBackendDb, node: DataflowNode) -> Iterator[DataflowEdge]:
"""
Iterates through the upstream dataflow edges of a any DataflowNode.
"""
if isinstance(node, Value):
return walk_dataflow_value(backend, node)
elif isinstance(node, CallNode):
return walk_dataflow_callnode(backend, node)
elif isinstance(node, ArgumentValue):
return walk_dataflow_argument_value(backend, node)
elif isinstance(node, CallNodeValue):
return walk_dataflow_callnode_value(backend, node)
else:
assert_never(node)
def walk_dataflow(backend: RedunBackendDb, init_node: DataflowNode) -> Iterator[DataflowEdge]:
"""
Iterate through all the upstream dataflow edges of a 'node' in the CallGraph.
A 'node' can be a Value, CallNode, CallNodeValue, or an ArgumentValue.
"""
# Perform depth-first traversal.
queue: List[DataflowNode] = [init_node]
seen: Set[DataflowNode] = set()
while queue:
node: DataflowNode = queue.pop()
child_edges = list(walk_dataflow_node(backend, node))
yield from child_edges
# Reverse edges before pushing on to stack to maintain sibling order.
for edge in reversed(child_edges):
node2 = edge.dest
if node2 is None:
# Terminal node of type 'Origin'.
continue
# Determine whether dest node is unique and should be added to queue.
if node2 not in seen:
queue.append(node2)
seen.add(node2)
def get_section_edge_type(edge: DataflowEdge) -> str:
"""
Classifies a DataflowEdge.
"""
src, dest = edge
if (
isinstance(src, ArgumentValue)
and isinstance(dest, CallNodeValue)
and src.value.value_hash != dest.value.value_hash
):
return "data"
elif isinstance(src, CallNode) and isinstance(dest, ArgumentValue):
return "call_arg"
elif isinstance(src, (Value, CallNodeValue)) and isinstance(dest, CallNode):
return "call_result"
elif isinstance(src, CallNodeValue) and isinstance(dest, CallNodeValue):
return "call_result_routing"
elif isinstance(src, ArgumentValue) and isinstance(dest, CallNodeValue):
return "call_arg_result_routing"
elif isinstance(src, ArgumentValue) and isinstance(dest, ArgumentValue):
return "call_arg_routing"
elif isinstance(src, CallNode) and dest is None:
return "call_origin"
elif isinstance(src, ArgumentValue) and dest is None:
return "call_arg_origin"
elif isinstance(src, Value) and dest is None:
return "call_value_origin"
else:
raise AssertionError(f"Unknown edge type {edge}")
def toposort_edges(edges: Iterable[DataflowEdge]) -> Iterator[DataflowEdge]:
"""
Topologically sort DataflowEdges in depth-first order.
"""
# Compute indegree.
indegrees: Dict[DataflowNode, int] = defaultdict(int)
src2edge: Dict[DataflowNode, List[DataflowEdge]] = defaultdict(
cast(Callable[[], List[DataflowEdge]], list)
)
for edge in edges:
src2edge[edge.src].append(edge)
# Ensure every node is present in indegrees, including roots.
indegrees[edge.src]
if edge.dest:
indegrees[edge.dest] += 1
# Initialize queue with roots.
queue: List[DataflowNode] = [node for node, degree in indegrees.items() if degree == 0]
while queue:
node = queue.pop()
yield from src2edge[node]
# Reverse edges before pushing on stack in order to maintain sibling order.
for edge in reversed(src2edge[node]):
if edge.dest:
indegrees[edge.dest] -= 1
if indegrees[edge.dest] == 0:
# All parents have been visited, we can enqueue dest.
queue.append(edge.dest)
def rewrite_call_node_merges(edges: List[DataflowEdge]) -> List[DataflowEdge]:
"""
Rewrites dataflow graphs to enforce one CallNodeValue per CallNode.
This function identifies CallNode that have multilple parent CallNodeValues
like this:
ArgumentValue --> CallNodeValue --\
V
CallNode (merge_node)
^
ArgumentValue --> CallNodeValue --/
and rewrite them to unify the CallNodeValues like this:
ArgumentValue --\
V
CallNodeValue --> CallNode
^
ArgumentValue --/
"""
# Build graph dict.
src2edges: Dict[Optional[DataflowNode], List[DataflowEdge]] = defaultdict(list)
dest2edges = defaultdict(list)
nodes = []
for edge in edges:
nodes.append(edge.src)
src2edges[edge.src].append(edge)
if edge.dest:
dest2edges[edge.dest].append(edge)
# Find merge nodes.
merge_nodes = [
node for node, edges in dest2edges.items() if isinstance(node, CallNode) and len(edges) > 1
]
# Rewrite CallNode merge nodes.
for merge_node in merge_nodes:
# Find relevant edges and nodes.
cv_cn_edges = dest2edges[merge_node]
call_node_values = [edge.src for edge in cv_cn_edges]
upstream_edges = [
edge for call_node_value in call_node_values for edge in dest2edges[call_node_value]
]
upstream_nodes = list(iter_unique(edge.src for edge in upstream_edges))
old_edges = set(cv_cn_edges) | set(upstream_edges)
# Create new unified CallNodeValue from call_node_values.
unified_node = CallNodeValue(call_node=merge_node, value=merge_node.value)
# Create new edges.
new_edges = [
DataflowEdge(
src=upstream_node,
dest=unified_node,
)
for upstream_node in upstream_nodes
] + [DataflowEdge(src=unified_node, dest=merge_node)]
# Remove old edges from edges.
edges2 = [edge for edge in edges if edge not in old_edges]
# To keep edges in traversal order, insert new edges right before
# the first appearance of the merge_node.
insert_index = min(i for i, edge in enumerate(edges2) if edge.src == merge_node)
edges = edges2[:insert_index] + new_edges + edges2[insert_index:]
return edges
def iter_subsections(section: DataflowSection) -> Iterator[DataflowSection]:
"""
Determines if a section should be broken down into small sections.
In real life dataflows, there are some cases where the dataflow merges
such that the structure is a DAG, not just a tree. These merges represent a
value that was passed to two or more different tasks and then their outputs
eventually combine again, either into a single Value like a list or as arguments
into a common task. For example, `value` is a merge node in the upstream
dataflow of `result`.
value = task0()
output1 = task1(a=value)
output2 = task2(b=value)
result = task3(c=output1, d=output2)
The upstream dataflow graph of `result` is:
Value(result)
|
V
CallNode(task3) ---------------\
| |
V V
ArgumentValue(task3, key=a) ArgumentValue(task3, key=b)
| |
V V
CallNode(task1) CallNode(task2)
| |
V V
ArgumentValue(task1, key=c) ArgumentValue(task2, key=d)
| |
V |
CallNodeValue(task0, value) <--/
|
V
CallNode(task0)
|
V
Origin
The function `iter_dataflow_section()` will break this graph right after
every the `CallNode --> ArgumentValue` edge, resulting in three sections.
One of those sections will have a "merge node", `CallNode(task0)`:
ArgumentValue(task1, key=c) ArgumentValue(task2, key=d)
| |
V |
CallNodeValue(task0, value) <--/
|
V
CallNode(task0)
|
V
Origin
This function will further break this section into subsections like this:
Subsection 1:
ArgumentValue(task1, key=c)
|
V
CallNodeValue(task0, value)
Subsection 2:
ArgumentValue(task2, key=d)
|
V
CallNodeValue(task0, value)
Subsection 3:
CallNodeValue(task0, value)
|
V
CallNode(task0)
|
V
Origin
Ultimately these three subsections get rendered as:
c <-- c_2
d <-- c_2
c_2 <-- task()
<-- origin
"""
# Build graph dict.
src2edges: Dict[Optional[DataflowNode], List[DataflowEdge]] = defaultdict(list)
dest2edges = defaultdict(list)
nodes = []
for edge in section:
nodes.append(edge.src)
src2edges[edge.src].append(edge)
if edge.dest:
dest2edges[edge.dest].append(edge)
# Find roots. Maintain order of appearence in section.
roots = [node for node in iter_unique(nodes) if len(dest2edges[node]) == 0]
# Find merge nodes.
merge_nodes = [node for node, edges in dest2edges.items() if len(edges) > 1]
if not merge_nodes:
# No merge nodes. Keep section as is.
yield section
return
# Determine subsections.
subsection: DataflowSection = []
node: Optional[DataflowNode]
for node in roots + merge_nodes:
while True:
next_edges = src2edges[node]
if len(next_edges) != 1:
# We have hit the end of the section.
# There are either no more edges, or we hitting the arguments.
subsection.extend(next_edges)
yield subsection
subsection = []
break
# Path should always be linear.
[edge] = next_edges
subsection.append(edge)
if edge.dest not in merge_nodes:
# Follow edge.
node = edge.dest
else:
# We have hit the merge node, stop.
yield subsection
subsection = []
break
def iter_dataflow_sections(
dataflow_edges: Iterable[DataflowEdge],
) -> Iterator[Tuple[DataflowSectionKind, DataflowSection]]:
"""
Yields dataflow sections from an iterable of dataflow edges.
A dataflow section is a group of edges representing one 'paragraph' in a
dataflow display.
value <-- <1234abcd> call_node(arg1, arg2)
<-- result of <2345abcd> call_node2(arg3, arg4)
arg3 = <3456abcd> 'hello_world'
arg4 = <4567abcd> File('foo.txt')
"""
node2call_section: Dict[DataflowNode, List[Tuple[int, DataflowEdge]]] = {}
node2data_section: Dict[DataflowNode, List[Tuple[int, DataflowEdge]]] = defaultdict(
cast(Callable[[], List[Tuple[int, DataflowEdge]]], list)
)
new_vars: Set[ArgumentValue] = set()
section: List[Tuple[int, DataflowEdge]]
edges = toposort_edges(dataflow_edges)
edge_list = rewrite_call_node_merges(list(edges))
# Group dataflow edges into display sections.
# Retain the appearance order of each edge so we can sort sections later.
for i, edge in enumerate(edge_list):
edge_type = get_section_edge_type(edge)
if edge_type == "data":
# Edge is a data section edge.
node2data_section[edge.src].append((i, edge))
continue
if edge_type == "call_arg":
# New variable.
assert isinstance(edge.dest, ArgumentValue)
new_vars.add(edge.dest)
if edge.src in new_vars:
# src is a new variable so we start a new section.
section = [(i, edge)]
else:
# Get or create section associated with src and add this edge.
if edge.src not in node2call_section:
section = node2call_section[edge.src] = []
else:
section = node2call_section[edge.src]
section.append((i, edge))
# Get section associated with dest and union with src section.
if edge.dest:
if edge.dest not in node2call_section:
# dest_section same as src.
node2call_section[edge.dest] = section
else:
# Union dest_section with section.
# Although this extend might have dups, I use iter_unique to clean it up.
dest_section = node2call_section[edge.dest]
dest_section.extend(section)
node2call_section[edge.src] = dest_section
# Get unique sections.
call_sections = iter_unique(node2call_section.values(), key=id)
data_sections = node2data_section.values()
def get_order_section(
int_edges: List[Tuple[int, DataflowEdge]]
) -> Tuple[int, List[DataflowEdge]]:
"""
Returns a tuple of section appearance order and the section.
The appearance order of a section is the maximum order of its edges.
We also clean up any duplicate edges that may have been added to the section.
"""
ints = []
section = []
for i, edge in int_edges:
ints.append(i)
section.append(edge)
return (max(ints), list(iter_unique(section)))
# Label each section with its type ("call" or "data") and determine section order.
sections = [
(DataflowSectionKind.CALL,) + get_order_section(int_edges) for int_edges in call_sections
]
sections.extend(
(DataflowSectionKind.DATA,) + get_order_section(int_edges) for int_edges in data_sections
)
# Sort sections.
sections = sorted(sections, key=lambda row: row[1])
# Yield sections.
for kind, _, section2 in sections:
if kind == DataflowSectionKind.CALL:
# If there is path merging, then we will emit multiple sections.
for subsection in iter_subsections(section2):
yield (kind, subsection)
else:
yield (kind, section2)
def make_section_dom(
section: DataflowSection,
dataflow_vars: DataflowVars,
new_varname: str = "value",
) -> DataflowSectionDOM:
"""
Returns DOM for a dataflow section.
"""
# Determine assign information from first edge in section.
src, assign_node = section[0]
if isinstance(src, Value):
# Start new variable.
assign_var_name = dataflow_vars[src] = new_varname
elif isinstance(src, (ArgumentValue, CallNodeValue)):
# Value should be named already.
assign_var_name = dataflow_vars[src]
else:
assert not isinstance(src, CallNode)
assert_never(src)
routing_defs: List[Optional[DataflowNode]] = []
arg_defs: List[Tuple[str, Value]] = []
renames: Dict[str, str] = {}
# Compute whether this section ends with a variable.
is_var2var = isinstance(assign_node, (ArgumentValue, CallNodeValue))
last_routing_node = None
# Process remaining edges.
for src, dest in section[1:]:
if isinstance(src, CallNode) and isinstance(dest, ArgumentValue):
# Argument definition edge.
var_name, base_var_name = dataflow_vars.new_var_name(dest)
if base_var_name:
renames[base_var_name] = var_name
arg_defs.append((var_name, dest.value))
else:
# Routing edge.
last_routing_node = dest
if isinstance(dest, CallNode):
is_var2var = False
# Skip unnecessary routing edge, ignore.
if not (isinstance(src, CallNodeValue) and isinstance(dest, CallNode)):
routing_defs.append(dest)
# The last routing def or assign (if there are no routing defs) needs
# to rename its variables to be unique.
last_line = len(routing_defs)
# Create assign clause.
if is_var2var and not routing_defs:
# Assignment is a var2var.
assert assign_node
# Name the last node after the first node, if it doesn't have a name yet.
base_name = dataflow_vars[section[0].src]
var_name, _ = dataflow_vars.new_var_name(assign_node, base_name)
dom_assign = DataflowAssign(assign_var_name, "", var_name, assign_node)
else:
prefix, node_display = display_node(assign_node, renames if last_line == 0 else {})
dom_assign = DataflowAssign(assign_var_name, prefix, node_display, assign_node)
# Create routing clauses.
dom_routing: List[DataflowRouting] = []
for i, dest in enumerate(routing_defs, 1):
prefix, node_display = display_node(dest, renames if i == last_line else {})
dom_routing.append(DataflowRouting(prefix, node_display, dest))
# If last routing node is a Value, as opposed to a CallNode, then this was a
# merge node and we should end with a new variable.
if is_var2var and isinstance(last_routing_node, (ArgumentValue, CallNodeValue)):
# This is a merge node, give it a variable name.
assert not arg_defs
# Name the last node after the first node, if it doesn't have a name yet.
base_name = dataflow_vars[section[0].src]
var_name, _ = dataflow_vars.new_var_name(last_routing_node, base_name)
dom_routing.append(DataflowRouting("", var_name, last_routing_node))
# Create argument definitions.
dom_args: List[DataflowArg] = [DataflowArg(var_name, var) for var_name, var in arg_defs]
return DataflowSectionDOM(dom_assign, dom_routing, dom_args)
def make_data_section_dom(
section: DataflowSection,
dataflow_vars: DataflowVars,
new_varname: str = "value",
) -> DataflowSectionDOM:
"""
Returns a DOM for a data section.
A data section describes how one value was constructed from several subvalues.
For example this dataflow section:
parent_value <-- derives from
subvalue1 = File(path='file1')
subvalue2 = File(path='file2')
corresponds to this kind of code:
subvalue1 = task1()
subvalue2 = task2()
parent_value = [subvalue1, subvalue2]
task3(parent_value)
"""
downstream_nodes: Set[DataflowNode] = set()
upstream_nodes: List[CallNodeValue] = []
for edge in section:
assert isinstance(edge.dest, CallNodeValue)
downstream_nodes.add(edge.src)
upstream_nodes.append(edge.dest)
# Determine assign clause.
# We only support one downstream node currently.
[downstream_node] = downstream_nodes
assign_var_name = dataflow_vars[downstream_node]
# Determine arg clauses.
args = []
for upstream_node in iter_unique(upstream_nodes):
call_node, value = upstream_node
# Ensure variable name is unique.
base_var_name = upstream_node.call_node.task.name + "_result"
new_var_name, _ = dataflow_vars.new_var_name(upstream_node, base_var_name)
args.append(DataflowArg(new_var_name, value))
return DataflowSectionDOM(
assign=DataflowAssign(
var_name=assign_var_name, prefix="", node_display="derives from", node=None
),
routing=[],
args=args,
)
def make_dataflow_dom(
dataflow_edges: Iterable[DataflowEdge], new_varname: str = "value"
) -> Iterable[DataflowSectionDOM]:
"""
Yields dataflow section DOMs from an iterable of dataflow edges.
It also performs variable renaming to give every value a unique variable name.
"""
dataflow_vars = DataflowVars()
for kind, section in iter_dataflow_sections(dataflow_edges):
if kind == DataflowSectionKind.CALL:
yield make_section_dom(section, dataflow_vars, new_varname=new_varname)
elif kind == DataflowSectionKind.DATA:
yield make_data_section_dom(section, dataflow_vars, new_varname=new_varname)
else:
raise NotImplementedError(f"Unknown kind '{kind}'.")
def get_dataflow_call_node(node: Optional[DataflowNode]) -> Optional[CallNode]:
"""
Returns the CallNode for a DataflowNode.
"""
if isinstance(node, CallNode):
return node
elif isinstance(node, ArgumentValue):
return node.argument.call_node
elif isinstance(node, CallNodeValue):
return node.call_node
elif node is None:
return None
else:
assert_never(node)
def get_node_hash(node: Optional[DataflowNode]) -> Optional[str]:
"""
Formats hash for a DataflowNode.
"""
if isinstance(node, Value):
return node.value_hash
call_node = get_dataflow_call_node(node)
if call_node:
return call_node.call_hash
return None
def display_node(node: Optional[DataflowNode], renames: Dict[str, str]) -> Tuple[str, str]:
"""
Formats a dataflow node to a string.
"""
if isinstance(node, CallNode):
return ("", display_call_node(node, renames))
elif isinstance(node, ArgumentValue):
return (
"argument of",
display_call_node(node.argument.call_node, renames),
)
elif isinstance(node, CallNodeValue):
return ("", display_call_node(node.call_node, renames))
elif node is None:
return ("", "origin")
else:
assert_never(node)
def display_call_node(call_node: CallNode, renames: Dict[str, str]) -> str:
"""
Formats a CallNode to a string.
"""
try:
arg_names = get_task_args(call_node.task)
except SyntaxError:
arg_names = [get_default_arg_name(i) for i in range(len(call_node.arguments))]
args = [renames.get(arg, arg) for arg in arg_names]
return "{task_name}({args})".format(task_name=call_node.task.name, args=", ".join(args))
def display_value(value: Value) -> str:
"""
Format a Value to a string.
"""
return trim_string(repr(value.value_parsed))
def display_hash(node: Optional[DataflowNode]) -> str:
"""
Formats hash for a DataflowNode.
"""
node_hash = get_node_hash(node)
if node_hash:
return "<{}> ".format(node_hash[:8])
else:
return ""
def display_section(dom: DataflowSectionDOM) -> Iterator[str]:
"""
Yields lines for displaying a dataflow section DOM.
"""
# Display assign line.
assign = dom.assign
yield "{var_name} <-- {prefix}{value_hash}{value}".format(
var_name=assign.var_name,
prefix=assign.prefix + " " if assign.prefix else "",
value_hash=display_hash(assign.node),
value=assign.node_display,
)
indent = len(assign.var_name) + 1
# Display routing.
for routing_def in dom.routing:
yield "{indent}<-- {prefix}{node_hash}{node}".format(
indent=" " * indent,
prefix=routing_def.prefix + " " if routing_def.prefix else "",
node_hash=display_hash(routing_def.node),
node=routing_def.node_display,
)
# Display argument definitions.
if dom.args:
max_var_len = max(len(arg.var_name) for arg in dom.args)
for arg in dom.args:
yield " {var_name}{padding} = <{value_hash}> {var}".format(
var_name=arg.var_name,
padding=" " * (max_var_len - len(arg.var_name)),
value_hash=arg.value.value_hash[:8],
var=display_value(arg.value),
)
def display_dataflow(dom: DataflowDOM) -> Iterator[str]:
"""
Yields for lines displaying a dataflow DOM.
"""
for dom_section in dom:
yield from display_section(dom_section)
yield ""
def serialize_section(dom: DataflowSectionDOM) -> dict:
"""
Serialize a DataflowSectionDOM to JSON.
"""
# Serialize assignment.
assign_hash: Optional[str]
if isinstance(dom.assign.node, Value):
assign_hash = dom.assign.node.value_hash
assign_job_id = None
assign_execution_id = None
elif dom.assign.node:
call_node = get_dataflow_call_node(dom.assign.node)
assert call_node
assign_hash = call_node.call_hash
# Get latest job of call_node.
assign_job = sorted(call_node.jobs, key=lambda job: job.start_time, reverse=True)[0]
assign_job_id = assign_job.id
assign_execution_id = assign_job.execution.id
else:
assign_hash = None
assign_job_id = None
assign_execution_id = None
# Serialize routing.
routing = []
for routing_def in dom.routing:
call_node = get_dataflow_call_node(routing_def.node)
if call_node:
call_hash: Optional[str] = call_node.call_hash
# Get latest job of call_node.
job = sorted(call_node.jobs, key=lambda job: job.start_time, reverse=True)[0]
job_id: Optional[str] = job.id
execution_id: Optional[str] = job.execution.id
else:
call_hash = None
job_id = None
execution_id = None
routing.append(
{
"prefix": routing_def.prefix,
"hash": call_hash,
"display": routing_def.node_display,
"job_id": job_id,
"execution_id": execution_id,
}
)
# Serialize arguments.
args = [
{
"var_name": arg.var_name,
"hash": arg.value.value_hash,
"value_display": display_value(arg.value),
}
for arg in dom.args
]
return {
"assign": {
"var_name": dom.assign.var_name,
"prefix": dom.assign.prefix,
"display": dom.assign.node_display,
"hash": assign_hash,
"job_id": assign_job_id,
"execution_id": assign_execution_id,
},
"routing": routing,
"args": args,
}
def serialize_dataflow(dom: DataflowDOM) -> Iterator[dict]:
"""
Serialize DataflowDOM to JSON.
"""
for dom_section in dom:
yield serialize_section(dom_section)
| 616
| 0
| 81
|
b0c91e83a9b74bb222678d78da749bca5e26d7a9
| 1,131
|
py
|
Python
|
visualization_scripts/depth_distributions.py
|
crmauceri/pytorch-deeplab-xception
|
aec2cb7b0c09c346519c6bf22c2cbf419021fdc7
|
[
"MIT"
] | 1
|
2021-12-11T08:21:19.000Z
|
2021-12-11T08:21:19.000Z
|
visualization_scripts/depth_distributions.py
|
crmauceri/rgbd_deeplab
|
aec2cb7b0c09c346519c6bf22c2cbf419021fdc7
|
[
"MIT"
] | null | null | null |
visualization_scripts/depth_distributions.py
|
crmauceri/rgbd_deeplab
|
aec2cb7b0c09c346519c6bf22c2cbf419021fdc7
|
[
"MIT"
] | null | null | null |
import matplotlib.pyplot as plt
import scipy.stats
import numpy as np
from tqdm import tqdm
from PIL import Image
from deeplab3.config.defaults import get_cfg_defaults
from dataloaders.utils import sample_distribution
from dataloaders.datasets.cityscapes import CityscapesSegmentation
from dataloaders.datasets.coco import COCOSegmentation
from dataloaders.datasets.sunrgbd import RGBDSegmentation
from dataloaders.SampleLoader import SampleLoader
city_rgbd = get_cfg_defaults()
city_rgbd.merge_from_file('configs/cityscapes_rgbd.yaml')
city_rgbd.merge_from_list(['DATASET.ROOT', 'datasets/cityscapes/',
'DATASET.CITYSCAPES.DEPTH_DIR', 'completed_depth'])
sunrgbd_rgbd = get_cfg_defaults()
sunrgbd_rgbd.merge_from_file('configs/sunrgbd.yaml')
sunrgbd_rgbd.merge_from_list(['DATASET.ROOT', 'datasets/SUNRGBD/'])
sunrgbd_rgbd_dist_train = sample_distribution(RGBDSegmentation(sunrgbd_rgbd, split='train'), n=100)
sunrgbd_rgbd_dist_test = sample_distribution(RGBDSegmentation(sunrgbd_rgbd, split='test'), n=100)
city_rgbd_dist = sample_distribution(CityscapesSegmentation(city_rgbd, split='train'))
| 37.7
| 99
| 0.821397
|
import matplotlib.pyplot as plt
import scipy.stats
import numpy as np
from tqdm import tqdm
from PIL import Image
from deeplab3.config.defaults import get_cfg_defaults
from dataloaders.utils import sample_distribution
from dataloaders.datasets.cityscapes import CityscapesSegmentation
from dataloaders.datasets.coco import COCOSegmentation
from dataloaders.datasets.sunrgbd import RGBDSegmentation
from dataloaders.SampleLoader import SampleLoader
city_rgbd = get_cfg_defaults()
city_rgbd.merge_from_file('configs/cityscapes_rgbd.yaml')
city_rgbd.merge_from_list(['DATASET.ROOT', 'datasets/cityscapes/',
'DATASET.CITYSCAPES.DEPTH_DIR', 'completed_depth'])
sunrgbd_rgbd = get_cfg_defaults()
sunrgbd_rgbd.merge_from_file('configs/sunrgbd.yaml')
sunrgbd_rgbd.merge_from_list(['DATASET.ROOT', 'datasets/SUNRGBD/'])
sunrgbd_rgbd_dist_train = sample_distribution(RGBDSegmentation(sunrgbd_rgbd, split='train'), n=100)
sunrgbd_rgbd_dist_test = sample_distribution(RGBDSegmentation(sunrgbd_rgbd, split='test'), n=100)
city_rgbd_dist = sample_distribution(CityscapesSegmentation(city_rgbd, split='train'))
| 0
| 0
| 0
|
3934a4a0ed4d91e9682e7a523fecefc8304cc24f
| 7,933
|
py
|
Python
|
src/cc_catalog_airflow/dags/provider_api_scripts/test_walters_art_museum.py
|
gauravahlawat81/cccatalog
|
cabfa11c4e1d68c66390ed46649282b7d33e2c58
|
[
"MIT"
] | 65
|
2018-05-25T00:47:18.000Z
|
2021-11-30T05:58:43.000Z
|
src/cc_catalog_airflow/dags/provider_api_scripts/test_walters_art_museum.py
|
cc-archive/cccatalog
|
bc95ccc159ed7f1444d44e1db08d9a11a16c6d12
|
[
"MIT"
] | 463
|
2018-05-01T14:35:42.000Z
|
2021-06-11T20:32:50.000Z
|
src/cc_catalog_airflow/dags/provider_api_scripts/test_walters_art_museum.py
|
cc-archive/cccatalog
|
bc95ccc159ed7f1444d44e1db08d9a11a16c6d12
|
[
"MIT"
] | 81
|
2018-05-05T20:33:12.000Z
|
2021-04-28T02:23:10.000Z
|
import os
import json
import logging
import requests
from unittest.mock import patch, MagicMock
import walters_art_museum as wam
RESOURCES = os.path.join(
os.path.abspath(os.path.dirname(__file__)),
'tests/resources/waltersartmuseum'
)
logging.basicConfig(
format='%(asctime)s - %(name)s - %(levelname)s: %(message)s',
level=logging.DEBUG
)
logger = logging.getLogger(__name__)
# _get_image_list test suite
# _build_query_param test suite
# This test fails if default constants change.
# _extract_image_list_from_json test suite
# _process_image test suite
# _get_creator_info test suite
# get_meta_data test suite
| 32.646091
| 101
| 0.708307
|
import os
import json
import logging
import requests
from unittest.mock import patch, MagicMock
import walters_art_museum as wam
RESOURCES = os.path.join(
os.path.abspath(os.path.dirname(__file__)),
'tests/resources/waltersartmuseum'
)
logging.basicConfig(
format='%(asctime)s - %(name)s - %(levelname)s: %(message)s',
level=logging.DEBUG
)
logger = logging.getLogger(__name__)
def _get_resource_json(json_resource):
with open(os.path.join(RESOURCES, json_resource)) as file:
json_resource = json.load(file)
return json_resource
# _get_image_list test suite
def test_get_image_list_retries_with_none_response():
with patch.object(
wam.delayed_requester,
'get_response_json',
return_value=None
) as mock_get:
wam._get_image_list('some_class', retries=3)
assert mock_get.call_count == 1
def test_get_image_list_retries_with_non_ok_response():
response_json = _get_resource_json('walters_full_response_example.json')
r = requests.Response()
r.status_code = 504
r.json = MagicMock(return_value=response_json)
with patch.object(
wam.delayed_requester,
'get_response_json',
return_value=r.json()
) as mock_get:
wam._get_image_list('some_class', retries=3)
assert mock_get.call_count == 1
def test_get_image_list_with_full_response():
response_json = _get_resource_json('walters_full_response_example.json')
r = requests.Response()
r.status_code = 200
r.json = MagicMock(return_value=response_json)
with patch.object(
wam.delayed_requester,
'get_response_json',
return_value=r.json()
) as mock_get:
image_list = wam._get_image_list('Manuscripts & Rare Books', retries=3)
# Here image list is same as items list because test example has only one
# page and image. In case of more than one pages the image list will be
# contain all images of more than one items list.
expect_image_list = _get_resource_json('items_list_example.json')
assert mock_get.call_count == 1
assert image_list == expect_image_list
# _build_query_param test suite
# This test fails if default constants change.
def test_build_query_param_default():
walters_api_key = 'notset'
actual_param_made = wam._build_query_param(
apikey=walters_api_key
)
expected_param = {
'accept': 'json',
'apikey': walters_api_key,
'pageSize': 100,
'orderBy': 'classification',
'classification': None,
'Page': 1
}
assert actual_param_made == expected_param
def test_build_query_param_given():
walters_api_key = 'notset'
class_param = 'someclass'
actual_param_made = wam._build_query_param(
class_param=class_param,
apikey=walters_api_key
)
expected_param = {
'accept': 'json',
'apikey': walters_api_key,
'pageSize': 100,
'orderBy': 'classification',
'classification': class_param,
'Page': 1
}
assert actual_param_made == expected_param
# _extract_image_list_from_json test suite
def test_extract_items_list_from_json_returns_expected_output():
json_response_inpydict_form = _get_resource_json(
'walters_full_response_example.json'
)
actual_items_list = wam._extract_items_list_from_json(
json_response_inpydict_form
)
expect_items_list = _get_resource_json('items_list_example.json')
assert actual_items_list == expect_items_list
def test_extract_items_list_from_json_returns_nones_given_false_return_stat():
test_dict = {
"ReturnStatus": False,
"ReturnCode": 404
}
assert wam._extract_items_list_from_json(test_dict) is None
def test_extract_items_list_from_json_handles_missing_Items():
test_dict = {
"ReturnStatus": True,
"ReturnCode": 200
}
assert wam._extract_items_list_from_json(test_dict) is None
def test_extract_items_list_from_json_handles_missing_imgs_in_Items():
test_dict = {
"Items": [],
"ReturnStatus": True,
"ReturnCode": 200
}
assert wam._extract_items_list_from_json(test_dict) is None
def test_extract_items_list_from_json_returns_nones_given_none_json():
assert wam._extract_items_list_from_json(None) is None
# _process_image test suite
def test_process_image_returns_expected_output_given_right_input():
image = _get_resource_json('full_image_object.json')
with patch.object(
wam.image_store,
'add_item',
return_value=100
) as mock_add_item:
total_images = wam._process_image(image)
expect_meta_data = {
"ObjectNumber": "W.569.4A",
"PublicAccessDate": "2014-04-25T13:19:25.22",
"Collection": "Manuscripts",
"Medium": "ink and pigments on thick cream-colored, gold-flecked paper",
"Classification": "Manuscripts & Rare Books",
"Description": "abc",
"CreditLine": "Acquired by Henry Walters",
}
mock_add_item.assert_called_once_with(
foreign_landing_url="http://art.thewalters.org/detail/2",
image_url="http://static.thewalters.org/images/CPS_W.569.4a_Fp_DD.jpg",
thumbnail_url="http://static.thewalters.org/images/CPS_W.569.4a_Fp_DD.jpg?width=100",
license_url="https://creativecommons.org/publicdomain/zero/1.0/",
foreign_identifier="W.569.4A",
creator="Iranian",
creator_url="https://art.thewalters.org/browse/iranian",
title="Leaf from Qur'an",
meta_data=expect_meta_data
)
assert total_images == 100
# _get_creator_info test suite
def test_get_creator_info_returns_expected_output_given_right_input():
response_json = _get_resource_json('full_image_object.json')
actual_creator, actual_creator_url = wam._get_creator_info(response_json)
expected_creator = "Iranian"
expected_creator_url = "https://art.thewalters.org/browse/iranian"
assert actual_creator == expected_creator
assert actual_creator_url == expected_creator_url
def test_get_creator_info_returns_none_given_no_creator_info():
response_json = _get_resource_json('no_creator_info.json')
actual_creator, actual_creator_url = wam._get_creator_info(response_json)
expected_creator = None
expected_creator_url = None
assert actual_creator == expected_creator
assert actual_creator_url == expected_creator_url
# get_meta_data test suite
def test_get_image_meta_data_returns_full_meta_data_given_right_input():
response_json = _get_resource_json("full_image_object.json")
actual_metadata = wam._get_image_meta_data(response_json)
expected_metadata = {
"ObjectNumber": "W.569.4A",
"PublicAccessDate": "2014-04-25T13:19:25.22",
"Collection": "Manuscripts",
"Medium": "ink and pigments on thick cream-colored, gold-flecked paper",
"Classification": "Manuscripts & Rare Books",
"Description": "abc",
"CreditLine": "Acquired by Henry Walters",
}
assert actual_metadata == expected_metadata
def test_get_image_meta_data_returns_partial_meta_data():
response_json = _get_resource_json("partial_meta_data.json")
actual_metadata = wam._get_image_meta_data(response_json)
expected_metadata = {
"ObjectNumber": "W.569.4A",
"PublicAccessDate": "2014-04-25T13:19:25.22",
"Collection": "Manuscripts",
"Medium": "ink and pigments on thick cream-colored, gold-flecked paper"
}
assert actual_metadata == expected_metadata
def test_get_image_meta_data_return_empty_dict_given_no_meta_data():
response_json = _get_resource_json("no_meta_data.json")
actual_metadata = wam._get_image_meta_data(response_json)
expected_metadata = {}
assert actual_metadata == expected_metadata
| 6,889
| 0
| 385
|
61df26516d0787ba8bcc1bd61f1621598a533158
| 89
|
py
|
Python
|
program_files/__init__.py
|
Vinxenx/SESMG
|
ca8616ee2b175f949de737890d6f5cd4533f6ee7
|
[
"MIT"
] | 13
|
2020-12-01T08:41:40.000Z
|
2021-12-01T22:07:12.000Z
|
program_files/__init__.py
|
Vinxenx/SESMG
|
ca8616ee2b175f949de737890d6f5cd4533f6ee7
|
[
"MIT"
] | 66
|
2020-08-07T10:34:12.000Z
|
2022-03-31T13:07:07.000Z
|
program_files/__init__.py
|
Vinxenx/SESMG
|
ca8616ee2b175f949de737890d6f5cd4533f6ee7
|
[
"MIT"
] | 6
|
2020-08-07T10:26:35.000Z
|
2022-01-12T09:36:54.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 14 09:35:53 2020
@author: Christian
"""
| 11.125
| 35
| 0.58427
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 14 09:35:53 2020
@author: Christian
"""
| 0
| 0
| 0
|
73716934c248a6b00656fef6bc54f715e7c1296c
| 12,778
|
py
|
Python
|
scripts/convert.py
|
kumasento/deacon
|
d9bf1adfc93d176930ddc43757eb039714c92657
|
[
"MIT"
] | 2
|
2021-04-11T11:01:34.000Z
|
2021-04-12T09:18:02.000Z
|
scripts/convert.py
|
kumasento/deacon
|
d9bf1adfc93d176930ddc43757eb039714c92657
|
[
"MIT"
] | 3
|
2021-04-10T21:06:42.000Z
|
2021-04-10T21:06:42.000Z
|
scripts/convert.py
|
kumasento/maxdeep
|
d9bf1adfc93d176930ddc43757eb039714c92657
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
""" Convert from pre-trained models. """
import argparse
import math
import os
import sys
from collections import OrderedDict
from dataclasses import dataclass
from typing import Dict
import numpy as np
import onnx
import toml
from onnx import shape_inference
from pydeacon.graph import (
DeaconGraph,
Globals,
LayerType,
Node,
Output,
OutputType,
Parallelism,
Seq,
Shape,
)
@dataclass
if __name__ == "__main__":
main()
| 36.824207
| 121
| 0.487792
|
#!/usr/bin/env python3
""" Convert from pre-trained models. """
import argparse
import math
import os
import sys
from collections import OrderedDict
from dataclasses import dataclass
from typing import Dict
import numpy as np
import onnx
import toml
from onnx import shape_inference
from pydeacon.graph import (
DeaconGraph,
Globals,
LayerType,
Node,
Output,
OutputType,
Parallelism,
Seq,
Shape,
)
def is_power_of_two(n: int) -> bool:
return (n & (n - 1) == 0) and n > 0
def next_power_of_two(x: int) -> int:
return pow(2, math.ceil(math.log(x) / math.log(2)))
@dataclass
class Tensor:
layer_name: str # from which layer generates this tensor.
index: int # which output port
class ONNXConverter:
def __init__(self, last_padded: bool = False, bit_width: int = 16):
self.last_padded = last_padded
self.bit_width = bit_width
def convert_name(self, name: str):
return name.replace("_", "").lower()
def get_shape(self, name: str, value_info: dict):
shape = value_info[name].type.tensor_type.shape
return [x.dim_value for x in shape.dim[1:]]
def parse_attrs(self, node) -> Dict:
attrs = {}
for attr in node.attribute:
if attr.name == "kernel_shape":
assert np.unique(attr.ints).size == 1
attrs["K"] = attr.ints[0]
if attr.name == "pads":
assert np.unique(attr.ints).size == 1
attrs["P"] = attr.ints[0]
if attr.name == "strides":
assert np.unique(attr.ints).size == 1
attrs["S"] = attr.ints[0]
if attr.name == "group":
attrs["G"] = attr.i
return attrs
def get_config_suffix(self) -> str:
suffix = "_onnx"
if self.last_padded:
suffix += "_last_padded"
if self.bit_width != 16:
suffix += f"_b{self.bit_width}"
return suffix
def convert(self, src_file: str, dst_file: str, init_seq: str):
model = onnx.load(src_file)
model = shape_inference.infer_shapes(model)
G = DeaconGraph(
name="_".join(os.path.basename(src_file).split(".")[:-1]).replace("-", "_")
+ self.get_config_suffix(),
globals=Globals(
a_bw=self.bit_width,
w_bw=self.bit_width,
freq=200,
coeff_on_chip=True,
use_dram=True,
num_frac_bits=8 if self.bit_width == 16 else 0,
),
)
value_info = {info.name: info for info in model.graph.value_info}
for input_tensor in model.graph.input:
value_info[input_tensor.name] = input_tensor
prev_d_node = None
tensor_map: Dict[str, Tensor] = {}
for node in model.graph.node:
d_node = None
if node.op_type not in [
"Conv",
"MaxPool",
"Relu",
"Concat",
"Dropout",
"Clip",
"Add",
"BatchNormalization",
"Cast",
]:
print(node.op_type + " not supported. Break.")
break
out_shape = self.get_shape(node.output[0], value_info)
in_shape = self.get_shape(node.input[0], value_info)
attrs = self.parse_attrs(node)
print(node.name, node.op_type, in_shape, " -> ", out_shape)
new_node = True
if node.op_type == "Conv":
assert len(node.output) == 1
d_node = Node(
name=self.convert_name(node.name),
shape=Shape(
H=out_shape[1], W=out_shape[2], F=out_shape[0], C=in_shape[0]
),
K=attrs["K"],
P=attrs["P"],
S=attrs["S"],
seq=Seq.FILTER_MAJOR,
layer_type=LayerType.STANDARD
if "G" not in attrs or attrs["G"] != in_shape[0]
else LayerType.DEPTHWISE,
)
# Fuse into depthwise separable
if node.input[0] in tensor_map:
input_name = tensor_map[node.input[0]].layer_name
if (
d_node.K == 1
and G.node_map[input_name].layer_type == LayerType.DEPTHWISE
):
p_node = G.node_map[self.convert_name(input_name)]
p_node.layer_type = LayerType.DEPTHWISE_SEPARABLE
p_node.shape.F = d_node.shape.F
d_node = p_node
new_node = False
elif node.op_type == "Add":
na = G.node_map[tensor_map[node.input[0]].layer_name]
nb = G.node_map[tensor_map[node.input[1]].layer_name]
nc = G.node_map[nb.inputs[0]]
# na --> nc --> nb
if (
nb.layer_type == LayerType.DEPTHWISE_SEPARABLE
and nc.inputs[0] == na.name
and nc.layer_type == LayerType.STANDARD
): # inverted bottleneck
nc.outputs.append(Output(output_type=OutputType.IFMAP))
nb.residual = nc.name + "_" + str(len(nc.outputs) - 1)
elif (
nb.layer_type == LayerType.STANDARD
and nc.inputs[0] == na.name
and nc.layer_type == LayerType.STANDARD
): # resnet-18 stack type
nc.outputs.append(Output(output_type=OutputType.IFMAP))
nb.residual = nc.name + "_" + str(len(nc.outputs) - 1)
elif (
nb.layer_type == LayerType.STANDARD
and nc.layer_type == LayerType.STANDARD
and nc.inputs[0].split("_")[0] == na.inputs[0].split("_")[0]
): # resnet-18 shortcut
# na is the shortcut convolution
# should erase na, add a new input to nb, from a duplicated ifmap of nc, and assign residual to that.
nc.outputs.append(Output(output_type=OutputType.IFMAP))
extra_input = f"{nc.name}_{len(nc.outputs)-1}"
nb.inputs.append(extra_input)
nb.residual = extra_input
nc.par = Parallelism(P_F=[nc.shape.F // nc.shape.C, 1], P_C=[1])
nb.par = Parallelism(P_C=[nc.shape.F // nc.shape.C, 1], P_F=[1])
# make sure the first output is taken by nc.
if "_" in nc.inputs[0]:
nc.inputs[0], na.inputs[0] = na.inputs[0], nc.inputs[0]
output_node, index = G.get_output(nc.inputs[0])
output_node.outputs[0], output_node.outputs[1] = (
output_node.outputs[1],
output_node.outputs[0],
)
output_node.output_nodes[0], output_node.output_nodes[1] = (
output_node.output_nodes[1],
output_node.output_nodes[0],
)
G.node_map = {k: v for k, v in G.node_map.items() if v != na}
for input_name in na.inputs:
output_node, index = G.get_output(input_name)
assert index == len(output_node.outputs) - 1
output_node.outputs.pop(index)
output_node.output_nodes.pop(index)
else:
print("na = ", na)
print("nc = ", nc)
print("nb = ", nb)
assert False
d_node = nb
new_node = False
elif node.op_type == "MaxPool":
d_node = Node(
name=self.convert_name(node.name),
shape=Shape(
H=out_shape[1], W=out_shape[2], F=out_shape[0], C=in_shape[0]
),
K=attrs["K"],
P=attrs["P"],
S=attrs["S"],
seq=Seq.FILTER_MAJOR,
layer_type=LayerType.POOLING,
)
elif node.op_type in [
"Relu",
"Dropout",
"Clip",
"BatchNormalization",
"Cast",
]:
assert prev_d_node
d_node = prev_d_node
new_node = False
elif node.op_type == "Concat":
d_node = Node(
name=self.convert_name(node.name),
shape=Shape(
H=out_shape[1], W=out_shape[2], F=out_shape[0], C=in_shape[0]
),
K=1,
P=0,
S=1,
seq=Seq.FILTER_MAJOR,
layer_type=LayerType.CONCAT,
)
else:
assert False
assert len(node.input) >= 1
# append the current node as one of the output node of its inputs.
if new_node:
if node.input[0] == "data" or node.input[0] == "input":
d_node.inputs = [] # first layer
else:
for tensor_name in node.input:
if tensor_name not in tensor_map:
continue # possibly a weight tensor
input_name = tensor_map[tensor_name].layer_name
key = self.convert_name(input_name)
if key == d_node.name: # merged
continue
if key in G.node_map:
input_name = G.node_map[key].name
if len(G.node_map[key].output_nodes) >= 1:
input_name += "_" + str(
len(G.node_map[key].output_nodes)
)
d_node.inputs.append(input_name)
for input_name in d_node.inputs:
input_node, index = G.get_output(input_name)
input_node.output_nodes.append(d_node.name)
assert len(input_node.output_nodes) == index + 1
# We assume only the original output will be used.
input_node.outputs.append(
Output(output_type=OutputType.OFMAP, index=0)
)
# create entries in the tensor map
for i, tensor_name in enumerate(node.output):
tensor_map[tensor_name] = Tensor(layer_name=d_node.name, index=i)
G.node_map[self.convert_name(node.name)] = d_node
prev_d_node = d_node
if new_node:
print(d_node)
prev_d_node.outputs.append(Output(output_type=OutputType.OFMAP))
if self.last_padded:
if prev_d_node and not is_power_of_two(prev_d_node.shape.F):
print(prev_d_node.shape.F)
prev_d_node.shape.F = next_power_of_two(prev_d_node.shape.F)
vis = set()
for d_node in G.node_map.values():
if d_node.name in vis:
continue
vis.add(d_node.name)
G.initialize_parallelism(d_node)
G.initialize_seq(
start=Seq.FILTER_MAJOR if init_seq == "FM" else Seq.CHANNEL_MAJOR
)
G.sanity_check()
G.dump(dst_file)
G.dump_spreadsheet(
os.path.join(
os.path.dirname(dst_file),
os.path.basename(dst_file).split(".")[0] + ".csv",
)
)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-f", "--model-file", type=str, help="Model file")
parser.add_argument(
"-o", "--config-file", type=str, help="Dump converted config file"
)
parser.add_argument(
"--last-padded",
action="store_true",
help="Whether to pad the last layer to power of 2",
)
parser.add_argument("--init-seq", type=str, default="FM", help="Initial seq")
parser.add_argument("--bw", type=int, default=8, help="Bit width")
args = parser.parse_args()
ONNXConverter(last_padded=args.last_padded, bit_width=args.bw).convert(
args.model_file, args.config_file, init_seq=args.init_seq
)
if __name__ == "__main__":
main()
| 11,919
| 90
| 275
|
47312b896ce1ded775eb5694ed034f119703f054
| 3,999
|
py
|
Python
|
pma_api/utils.py
|
joeflack4/pma-api
|
de213833c93ad0c90b127188526c9eced31edc75
|
[
"MIT"
] | 2
|
2018-08-24T14:27:25.000Z
|
2020-05-11T18:59:24.000Z
|
pma_api/utils.py
|
joeflack4/pma-api
|
de213833c93ad0c90b127188526c9eced31edc75
|
[
"MIT"
] | 36
|
2018-07-13T15:49:50.000Z
|
2019-07-17T18:29:28.000Z
|
pma_api/utils.py
|
joeflack4/pma-api
|
de213833c93ad0c90b127188526c9eced31edc75
|
[
"MIT"
] | 4
|
2018-07-12T19:24:52.000Z
|
2021-03-09T16:08:38.000Z
|
"""Assortment of utilities for application."""
import itertools
import operator
import os
import random
from typing import List
from flask_sqlalchemy import Model, SQLAlchemy
from pma_api.app import PmaApiFlask
B64_CHAR_SET = ''.join(('abcdefghijklmnopqrstuvwxyz',
'ABCDEFGHIJKLMNOPQRSTUVWXYZ',
'0123456789-_'))
seen = {None}
random.seed(2020)
def next64():
"""Random string generator.
Returns:
str: Randomly generated string.
"""
n_char = 8
result = None
while result in seen:
result = ''.join(random.choice(B64_CHAR_SET) for _ in range(n_char))
seen.add(result)
return result
def most_common(a_list: list):
"""Get most common element in a list
Args:
a_list (list): Any arbitrary list
Returns:
any: pick the highest-count/earliest item
"""
# get an iterable of (item, iterable) pairs
sorted_list = sorted((x, i) for i, x in enumerate(a_list))
groups = itertools.groupby(sorted_list, key=operator.itemgetter(0))
def _auxfun(grp):
"""Auxiliary function to get "quality" for an item
This function should be used in tandem with max()
Args:
grp (iterable): an object to returned by max() if the provided
iterable passed to max() is empty.
"""
item, iterable = grp
count = 0
min_index = len(a_list)
for _, where in iterable:
count += 1
min_index = min(min_index, where)
return count, -min_index
return max(groups, key=_auxfun)[0]
def dict_to_pretty_json(dictionary: {}) -> '':
"""Given a dictionary, pretty print JSON str
Args:
dictionary (dict): dictionary
Returns:
str: Prettified JSON string
"""
import json
return json.dumps(
dictionary,
sort_keys=True,
indent=4,
separators=(',', ': '))
def join_url_parts(*args: str) -> str:
"""Join parts of a url string
Parts of a URL string may come from different sources, so joining them
directly together may yield too many or too few '/' delimiters.
Args:
*args:
Returns:
str: Well-formed url
"""
base_str = '/'.join(args)
return 'http://' + base_str.replace('http://', '').replace('//', '/')
def get_db_models(db: SQLAlchemy) -> List[Model]:
"""Get list of models from SqlAlchemy
Args:
db: SqlAlchemy db object
Returns:
list(Model): List of registered SqlAlchemy models
"""
# noinspection PyProtectedMember
models: List[Model] = \
[cls for cls in db.Model._decl_class_registry.values()
if isinstance(cls, type) and issubclass(cls, db.Model)]
return models
# TODO 2019.03.10-jef: Get this to work
def stderr_stdout_captured(func):
"""Capture stderr and stdout
Args:
func: A function
Returns:
str, str, any: stderr output, stdout output, return of function
"""
import sys
from io import StringIO
old_stdout = sys.stdout
old_stderr = sys.stderr
captured_stderr = sys.stderr = StringIO()
captured_stdout = sys.stdout = StringIO()
returned_value = func()
_err: str = captured_stderr.getvalue()
_out: str = captured_stdout.getvalue()
sys.stdout = old_stdout
sys.stderr = old_stderr
return _err, _out, returned_value
def get_app_instance() -> PmaApiFlask:
"""Get reference to copy of currently running application instance
Returns:
PmaApiFlask: PmaApiFlask application instance.
"""
err = 'A current running app was not able to be found.'
try:
from flask import current_app
app: PmaApiFlask = current_app
if app.__repr__() == '<LocalProxy unbound>':
raise RuntimeError(err)
except RuntimeError:
from pma_api import create_app
app: PmaApiFlask = create_app(os.getenv('ENV_NAME', 'default'))
return app
| 23.946108
| 76
| 0.628657
|
"""Assortment of utilities for application."""
import itertools
import operator
import os
import random
from typing import List
from flask_sqlalchemy import Model, SQLAlchemy
from pma_api.app import PmaApiFlask
B64_CHAR_SET = ''.join(('abcdefghijklmnopqrstuvwxyz',
'ABCDEFGHIJKLMNOPQRSTUVWXYZ',
'0123456789-_'))
seen = {None}
random.seed(2020)
def next64():
"""Random string generator.
Returns:
str: Randomly generated string.
"""
n_char = 8
result = None
while result in seen:
result = ''.join(random.choice(B64_CHAR_SET) for _ in range(n_char))
seen.add(result)
return result
def most_common(a_list: list):
"""Get most common element in a list
Args:
a_list (list): Any arbitrary list
Returns:
any: pick the highest-count/earliest item
"""
# get an iterable of (item, iterable) pairs
sorted_list = sorted((x, i) for i, x in enumerate(a_list))
groups = itertools.groupby(sorted_list, key=operator.itemgetter(0))
def _auxfun(grp):
"""Auxiliary function to get "quality" for an item
This function should be used in tandem with max()
Args:
grp (iterable): an object to returned by max() if the provided
iterable passed to max() is empty.
"""
item, iterable = grp
count = 0
min_index = len(a_list)
for _, where in iterable:
count += 1
min_index = min(min_index, where)
return count, -min_index
return max(groups, key=_auxfun)[0]
def dict_to_pretty_json(dictionary: {}) -> '':
"""Given a dictionary, pretty print JSON str
Args:
dictionary (dict): dictionary
Returns:
str: Prettified JSON string
"""
import json
return json.dumps(
dictionary,
sort_keys=True,
indent=4,
separators=(',', ': '))
def join_url_parts(*args: str) -> str:
"""Join parts of a url string
Parts of a URL string may come from different sources, so joining them
directly together may yield too many or too few '/' delimiters.
Args:
*args:
Returns:
str: Well-formed url
"""
base_str = '/'.join(args)
return 'http://' + base_str.replace('http://', '').replace('//', '/')
def get_db_models(db: SQLAlchemy) -> List[Model]:
"""Get list of models from SqlAlchemy
Args:
db: SqlAlchemy db object
Returns:
list(Model): List of registered SqlAlchemy models
"""
# noinspection PyProtectedMember
models: List[Model] = \
[cls for cls in db.Model._decl_class_registry.values()
if isinstance(cls, type) and issubclass(cls, db.Model)]
return models
# TODO 2019.03.10-jef: Get this to work
def stderr_stdout_captured(func):
"""Capture stderr and stdout
Args:
func: A function
Returns:
str, str, any: stderr output, stdout output, return of function
"""
import sys
from io import StringIO
old_stdout = sys.stdout
old_stderr = sys.stderr
captured_stderr = sys.stderr = StringIO()
captured_stdout = sys.stdout = StringIO()
returned_value = func()
_err: str = captured_stderr.getvalue()
_out: str = captured_stdout.getvalue()
sys.stdout = old_stdout
sys.stderr = old_stderr
return _err, _out, returned_value
def get_app_instance() -> PmaApiFlask:
"""Get reference to copy of currently running application instance
Returns:
PmaApiFlask: PmaApiFlask application instance.
"""
err = 'A current running app was not able to be found.'
try:
from flask import current_app
app: PmaApiFlask = current_app
if app.__repr__() == '<LocalProxy unbound>':
raise RuntimeError(err)
except RuntimeError:
from pma_api import create_app
app: PmaApiFlask = create_app(os.getenv('ENV_NAME', 'default'))
return app
| 0
| 0
| 0
|
dce593f083f710d9b47b53386014bb2642bc2023
| 1,084
|
py
|
Python
|
Projects/Online Workouts/w3resource/Basic - Part-I/program-81.py
|
ivenpoker/Python-Projects
|
2975e1bd687ec8dbcc7a4842c13466cb86292679
|
[
"MIT"
] | 1
|
2019-09-23T15:51:45.000Z
|
2019-09-23T15:51:45.000Z
|
Projects/Online Workouts/w3resource/Basic - Part-I/program-81.py
|
ivenpoker/Python-Projects
|
2975e1bd687ec8dbcc7a4842c13466cb86292679
|
[
"MIT"
] | 5
|
2021-02-08T20:47:19.000Z
|
2022-03-12T00:35:44.000Z
|
Projects/Online Workouts/w3resource/Basic - Part-I/program-81.py
|
ivenpoker/Python-Projects
|
2975e1bd687ec8dbcc7a4842c13466cb86292679
|
[
"MIT"
] | null | null | null |
# !/usr/bin/env python3
#######################################################################################
# #
# Program purpose: Concatenates N-strings. #
# Program Author : Happi Yvan <ivensteinpoker@gmail.com> #
# Creation Date : August 27, 2019 #
# #
#######################################################################################
if __name__ == "__main__":
num_str = int(input("Enter number of strings: "))
strs = read_strs(num_str)
print(f"\nStrings are: {strs}")
# joining strings on '-'
print(f"Concatenated strings: {'-'.join(strs)}")
| 40.148148
| 87
| 0.350554
|
# !/usr/bin/env python3
#######################################################################################
# #
# Program purpose: Concatenates N-strings. #
# Program Author : Happi Yvan <ivensteinpoker@gmail.com> #
# Creation Date : August 27, 2019 #
# #
#######################################################################################
def read_strs(max_cnt=0):
strings = []
tmp = 1
while tmp is not max_cnt+1:
tmp_str = str(input(f"Enter string #{tmp}: "))
strings.append(tmp_str)
tmp += 1
return strings
if __name__ == "__main__":
num_str = int(input("Enter number of strings: "))
strs = read_strs(num_str)
print(f"\nStrings are: {strs}")
# joining strings on '-'
print(f"Concatenated strings: {'-'.join(strs)}")
| 188
| 0
| 23
|
83cdf744bb4105c7bd913cf6fac47cf20ff9faa7
| 1,741
|
py
|
Python
|
python/test.py
|
MorenoJoshua/freeswitchManager
|
b1488f6c9e2a2e3008e758dde46ea9ca22fdec26
|
[
"MIT"
] | null | null | null |
python/test.py
|
MorenoJoshua/freeswitchManager
|
b1488f6c9e2a2e3008e758dde46ea9ca22fdec26
|
[
"MIT"
] | null | null | null |
python/test.py
|
MorenoJoshua/freeswitchManager
|
b1488f6c9e2a2e3008e758dde46ea9ca22fdec26
|
[
"MIT"
] | null | null | null |
import requests
import hmac
import hashlib
import datetime as dt
import simplejson as json
access_key = '80473469' # example
secret_key = 'GhDzk8Lc00xUzUjHFJqDqLztMNq5KMgU' # example
# Generate the X-Timestamp
t = dt.datetime.utcnow().replace(microsecond=0)
timestamp = t.isoformat()
timestamp = '2015-10-29T14:33:46'
headers = {
'accept': "application/json",
'x-timestamp': timestamp
}
# Generate the MD5 hash of the body
body = ''
body_md5 = hashlib.md5(body).hexdigest() if body != '' else ''
# Creating URI info
query_params = ['limit=1','page=2'] # Since this is a simple request, we won't set any query params
query_params.sort()
url_scheme = 'https'
net_location = 'api.flowroute.com'
method = 'GET'
path = '/v1/routes/'
ordered_query_params = u'&'.join(query_params)
canonical_uri = '{0}://{1}{2}\n{3}'.format(url_scheme, net_location, path, ordered_query_params)
# Create the message string
tokens = (timestamp, method, body_md5, canonical_uri)
message_string = u'\n'.join(tokens).encode('utf-8')
# Generate the signature
signature = hmac.new(secret_key, message_string, digestmod=hashlib.sha1).hexdigest()
# Make the request
request_url = '{0}://{1}{2}?{3}'.format(url_scheme, net_location, path, ordered_query_params) # append ordered query params here
#request = requests.get(request_url, auth=(access_key, signature), headers=headers)
#result = json.loads(request.text)
print "timestamp: " + str(timestamp)
print "tokens: " + str(tokens)
print "canonical_uri: " + str(canonical_uri)
print "request_uri: " + str(request_url)
print "message_string: " + str(message_string)
print "access_key: " + str(access_key)
print "signature: " + str(signature)
print "headers: " + str(headers)
print ""
#print str(result)
| 32.240741
| 129
| 0.731189
|
import requests
import hmac
import hashlib
import datetime as dt
import simplejson as json
access_key = '80473469' # example
secret_key = 'GhDzk8Lc00xUzUjHFJqDqLztMNq5KMgU' # example
# Generate the X-Timestamp
t = dt.datetime.utcnow().replace(microsecond=0)
timestamp = t.isoformat()
timestamp = '2015-10-29T14:33:46'
headers = {
'accept': "application/json",
'x-timestamp': timestamp
}
# Generate the MD5 hash of the body
body = ''
body_md5 = hashlib.md5(body).hexdigest() if body != '' else ''
# Creating URI info
query_params = ['limit=1','page=2'] # Since this is a simple request, we won't set any query params
query_params.sort()
url_scheme = 'https'
net_location = 'api.flowroute.com'
method = 'GET'
path = '/v1/routes/'
ordered_query_params = u'&'.join(query_params)
canonical_uri = '{0}://{1}{2}\n{3}'.format(url_scheme, net_location, path, ordered_query_params)
# Create the message string
tokens = (timestamp, method, body_md5, canonical_uri)
message_string = u'\n'.join(tokens).encode('utf-8')
# Generate the signature
signature = hmac.new(secret_key, message_string, digestmod=hashlib.sha1).hexdigest()
# Make the request
request_url = '{0}://{1}{2}?{3}'.format(url_scheme, net_location, path, ordered_query_params) # append ordered query params here
#request = requests.get(request_url, auth=(access_key, signature), headers=headers)
#result = json.loads(request.text)
print "timestamp: " + str(timestamp)
print "tokens: " + str(tokens)
print "canonical_uri: " + str(canonical_uri)
print "request_uri: " + str(request_url)
print "message_string: " + str(message_string)
print "access_key: " + str(access_key)
print "signature: " + str(signature)
print "headers: " + str(headers)
print ""
#print str(result)
| 0
| 0
| 0
|
366c1976344b76f7e0aa3018661a6d511956da45
| 4,053
|
py
|
Python
|
oecp/proxy/requests_proxy.py
|
openeuler-mirror/oecp
|
967ed6b9e53f2da5f795f49bb5b5fc0423372863
|
[
"MulanPSL-1.0"
] | null | null | null |
oecp/proxy/requests_proxy.py
|
openeuler-mirror/oecp
|
967ed6b9e53f2da5f795f49bb5b5fc0423372863
|
[
"MulanPSL-1.0"
] | null | null | null |
oecp/proxy/requests_proxy.py
|
openeuler-mirror/oecp
|
967ed6b9e53f2da5f795f49bb5b5fc0423372863
|
[
"MulanPSL-1.0"
] | null | null | null |
# -*- encoding=utf-8 -*-
"""
# **********************************************************************************
# Copyright (c) Huawei Technologies Co., Ltd. 2020-2020. All rights reserved.
# [oecp] is licensed under the Mulan PSL v1.
# You can use this software according to the terms and conditions of the Mulan PSL v1.
# You may obtain a copy of Mulan PSL v1 at:
# http://license.coscl.org.cn/MulanPSL
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR
# PURPOSE.
# See the Mulan PSL v1 for more details.
# Author:
# Create: 2021-09-03
# Description: requests api proxy
# **********************************************************************************
"""
import logging
import sys
import traceback
from time import time
from urllib.request import urlretrieve
from urllib.error import URLError
from tqdm import tqdm
from oecp.utils.unit_convert import convert_bytes
logger = logging.getLogger("oecp")
def do_download(url, file_path, progress=False, step=1):
"""
从url下载,保存到file_path
:param url: 远程地址
:param file_path: 文件路径
:param progress: 展示下载进度
:param step: 每次展示进度步数
:return:
"""
def current_timestamp():
"""
当前秒
:return:
"""
return int(time())
class ReportHook(object):
"""
回调类
"""
last_block = 0
download = 0
total = 0
percent = 0
last_time = current_timestamp()
def cb(self, block_num=1, block_size=1, total_size=None):
"""
retrieve reporthook
:param block_num: 块编号
:param block_size: 块大小
:param total_size: 总下载大小
:return:
"""
if not total_size:
return
self.total = total_size
download = (block_num - self.last_block) * block_size
now = current_timestamp()
interval = now - self.last_time
if not interval:
return
speed = download // interval
if not speed:
return
self.download += download
percent = self.download * 100 // self.total
est = (self.total - self.download) // speed
if percent >= self.percent + step:
sys.stderr.write(f"{percent}% [{convert_bytes(self.download)}/{convert_bytes(self.total)}] complete, "
f"estimate to take another {est} seconds\n")
self.percent = percent
self.last_block = block_num
self.last_time = now
#logger.debug("recommended to use requests.proxy.do_download_tqdm instead if stdout support \"\\r\"")
reporthook = ReportHook().cb if progress else None
try:
logger.debug(f"download {url} to {file_path}")
return urlretrieve(url, file_path, reporthook=reporthook)
except:
logger.debug(f"urlretrieve {url} exception {traceback.format_exc()}")
def do_download_tqdm(url, file_path):
"""
使用tqdm展示下载进度条
:param url:
:param file_path:
:return:
"""
class ReportHook(tqdm):
"""
回调类
"""
last_block = 0
def cb(self, block_num=1, block_size=1, total_size=None):
"""
retrieve reporthook
:param block_num: 块编号
:param block_size: 块大小
:param total_size: 总下载大小
:return:
"""
if total_size:
self.total = total_size
self.update((block_num - self.last_block) * block_size)
self.last_block = block_num
with ReportHook(unit='iB', unit_scale=True) as progress_bar:
try:
logger.debug(f"download {url} to {file_path}")
return urlretrieve(url, file_path, progress_bar.cb)
except:
logger.debug(f"urlretrieve {url} exception {traceback.format_exc()}")
| 30.022222
| 118
| 0.564767
|
# -*- encoding=utf-8 -*-
"""
# **********************************************************************************
# Copyright (c) Huawei Technologies Co., Ltd. 2020-2020. All rights reserved.
# [oecp] is licensed under the Mulan PSL v1.
# You can use this software according to the terms and conditions of the Mulan PSL v1.
# You may obtain a copy of Mulan PSL v1 at:
# http://license.coscl.org.cn/MulanPSL
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR
# PURPOSE.
# See the Mulan PSL v1 for more details.
# Author:
# Create: 2021-09-03
# Description: requests api proxy
# **********************************************************************************
"""
import logging
import sys
import traceback
from time import time
from urllib.request import urlretrieve
from urllib.error import URLError
from tqdm import tqdm
from oecp.utils.unit_convert import convert_bytes
logger = logging.getLogger("oecp")
def do_download(url, file_path, progress=False, step=1):
"""
从url下载,保存到file_path
:param url: 远程地址
:param file_path: 文件路径
:param progress: 展示下载进度
:param step: 每次展示进度步数
:return:
"""
def current_timestamp():
"""
当前秒
:return:
"""
return int(time())
class ReportHook(object):
"""
回调类
"""
last_block = 0
download = 0
total = 0
percent = 0
last_time = current_timestamp()
def cb(self, block_num=1, block_size=1, total_size=None):
"""
retrieve reporthook
:param block_num: 块编号
:param block_size: 块大小
:param total_size: 总下载大小
:return:
"""
if not total_size:
return
self.total = total_size
download = (block_num - self.last_block) * block_size
now = current_timestamp()
interval = now - self.last_time
if not interval:
return
speed = download // interval
if not speed:
return
self.download += download
percent = self.download * 100 // self.total
est = (self.total - self.download) // speed
if percent >= self.percent + step:
sys.stderr.write(f"{percent}% [{convert_bytes(self.download)}/{convert_bytes(self.total)}] complete, "
f"estimate to take another {est} seconds\n")
self.percent = percent
self.last_block = block_num
self.last_time = now
#logger.debug("recommended to use requests.proxy.do_download_tqdm instead if stdout support \"\\r\"")
reporthook = ReportHook().cb if progress else None
try:
logger.debug(f"download {url} to {file_path}")
return urlretrieve(url, file_path, reporthook=reporthook)
except:
logger.debug(f"urlretrieve {url} exception {traceback.format_exc()}")
def do_download_tqdm(url, file_path):
"""
使用tqdm展示下载进度条
:param url:
:param file_path:
:return:
"""
class ReportHook(tqdm):
"""
回调类
"""
last_block = 0
def cb(self, block_num=1, block_size=1, total_size=None):
"""
retrieve reporthook
:param block_num: 块编号
:param block_size: 块大小
:param total_size: 总下载大小
:return:
"""
if total_size:
self.total = total_size
self.update((block_num - self.last_block) * block_size)
self.last_block = block_num
with ReportHook(unit='iB', unit_scale=True) as progress_bar:
try:
logger.debug(f"download {url} to {file_path}")
return urlretrieve(url, file_path, progress_bar.cb)
except:
logger.debug(f"urlretrieve {url} exception {traceback.format_exc()}")
| 0
| 0
| 0
|
de17091d3df24d73c76bf0f93b09db5b447175d3
| 1,946
|
py
|
Python
|
.vim/plugged/after/pythonx/my_snippet_helpers.py
|
sharils/home
|
bccac132b68d8d7cc46f21cae343b9ce99af6e01
|
[
"Unlicense"
] | 3
|
2018-08-03T11:58:44.000Z
|
2019-12-12T14:44:36.000Z
|
.vim/plugged/after/pythonx/my_snippet_helpers.py
|
sharils/home
|
bccac132b68d8d7cc46f21cae343b9ce99af6e01
|
[
"Unlicense"
] | 1
|
2020-08-02T01:14:37.000Z
|
2020-08-02T01:15:27.000Z
|
.vim/plugged/after/pythonx/my_snippet_helpers.py
|
sharils/home
|
bccac132b68d8d7cc46f21cae343b9ce99af6e01
|
[
"Unlicense"
] | 1
|
2019-03-02T10:08:10.000Z
|
2019-03-02T10:08:10.000Z
|
import re
import os
import vim
| 27.408451
| 112
| 0.666495
|
import re
import os
import vim
def around_assign(snip):
return '=' in snip.buffer[snip.line]
def around_catch(snip):
return 'catch' in snip.buffer[snip.line - 1]
def around_class(snip):
return 'class' in snip.buffer[snip.line]
def around_const_let_var(snip):
return re.search('const|let|var', snip.buffer[snip.line]) and '=' not in snip.buffer[snip.line]
def around_expect(snip):
return 'expect' in snip.buffer[snip.line]
def around_export(snip):
return 'export' in snip.buffer[snip.line]
def around_first_line_or_require(snip):
return 'const' not in snip.buffer[snip.line] and (snip.line == 0 or 'require' in snip.buffer[snip.line - 1])
def around_for(snip):
return 'for' in snip.buffer[snip.line]
def around_function(snip):
return re.search('=>|function|^\s+\w+\(.*\)', snip.buffer[snip.line])
def around_import(snip):
return 'import' in snip.buffer[snip.line]
def around_json_parse(snip):
return 'JSON.parse' in snip.v
def around_regex(snip):
return re.search('/.+/', snip.buffer[snip.line])
def around_require(snip):
return 'require' in snip.buffer[snip.line]
def around_switch(snip):
return re.search('break|switch', snip.buffer[snip.line - 1]) or (
snip.line - 2 >= 0 and
'case' in snip.buffer[snip.line - 2] and
'return' in snip.buffer[snip.line - 1]
)
def around_template_string(snip):
return '`' in snip.buffer[snip.line]
def around_throw(snip):
return 'throw' in snip.buffer[snip.line]
def importname(path):
return to_camel(re.sub('\..*', '', os.path.basename(path)))
def path_and_line():
return vim.eval("'\"'. expand('%') . ':' . line('.') . '\"'")
def singularize(plural):
return re.sub('(?<!e)es|s$', '', plural)
def singularize_idx(plural):
return singularize(plural) + 'Idx' if len(plural) > 0 else plural
def to_camel(text):
return re.sub('[-_]([a-zA-Z0-9])', lambda m: m.group(1).upper(), text)
| 1,432
| 0
| 483
|
681bb0f26e4f2b6b3ed857eda5a41e22243ccaba
| 3,488
|
py
|
Python
|
angr/procedures/definitions/win32_api-ms-win-wsl-api-l1-1-0.py
|
r4b3rt/angr
|
c133cfd4f83ffea2a1d9e064241e9459eaabc55f
|
[
"BSD-2-Clause"
] | null | null | null |
angr/procedures/definitions/win32_api-ms-win-wsl-api-l1-1-0.py
|
r4b3rt/angr
|
c133cfd4f83ffea2a1d9e064241e9459eaabc55f
|
[
"BSD-2-Clause"
] | null | null | null |
angr/procedures/definitions/win32_api-ms-win-wsl-api-l1-1-0.py
|
r4b3rt/angr
|
c133cfd4f83ffea2a1d9e064241e9459eaabc55f
|
[
"BSD-2-Clause"
] | null | null | null |
# pylint:disable=line-too-long
import logging
from ...sim_type import SimTypeFunction, SimTypeShort, SimTypeInt, SimTypeLong, SimTypeLongLong, SimTypeDouble, SimTypeFloat, SimTypePointer, SimTypeChar, SimStruct, SimTypeFixedSizeArray, SimTypeBottom, SimUnion, SimTypeBool
from ...calling_conventions import SimCCStdcall, SimCCMicrosoftAMD64
from .. import SIM_PROCEDURES as P
from . import SimLibrary
_l = logging.getLogger(name=__name__)
lib = SimLibrary()
lib.set_default_cc('X86', SimCCStdcall)
lib.set_default_cc('AMD64', SimCCMicrosoftAMD64)
lib.set_library_names("api-ms-win-wsl-api-l1-1-0.dll")
prototypes = \
{
#
'WslIsDistributionRegistered': SimTypeFunction([SimTypePointer(SimTypeChar(label="Char"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["distributionName"]),
#
'WslRegisterDistribution': SimTypeFunction([SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["distributionName", "tarGzFilename"]),
#
'WslUnregisterDistribution': SimTypeFunction([SimTypePointer(SimTypeChar(label="Char"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["distributionName"]),
#
'WslConfigureDistribution': SimTypeFunction([SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypeInt(signed=False, label="WSL_DISTRIBUTION_FLAGS")], SimTypeInt(signed=True, label="Int32"), arg_names=["distributionName", "defaultUID", "wslDistributionFlags"]),
#
'WslGetDistributionConfiguration': SimTypeFunction([SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0), SimTypePointer(SimTypeInt(signed=False, label="WSL_DISTRIBUTION_FLAGS"), offset=0), SimTypePointer(SimTypePointer(SimTypePointer(SimTypeChar(label="Byte"), offset=0), offset=0), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["distributionName", "distributionVersion", "defaultUID", "wslDistributionFlags", "defaultEnvironmentVariables", "defaultEnvironmentVariableCount"]),
#
'WslLaunchInteractive': SimTypeFunction([SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypeInt(signed=True, label="Int32"), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["distributionName", "command", "useCurrentWorkingDirectory", "exitCode"]),
#
'WslLaunch': SimTypeFunction([SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypeInt(signed=True, label="Int32"), SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["distributionName", "command", "useCurrentWorkingDirectory", "stdIn", "stdOut", "stdErr", "process"]),
}
lib.set_prototypes(prototypes)
| 96.888889
| 706
| 0.746273
|
# pylint:disable=line-too-long
import logging
from ...sim_type import SimTypeFunction, SimTypeShort, SimTypeInt, SimTypeLong, SimTypeLongLong, SimTypeDouble, SimTypeFloat, SimTypePointer, SimTypeChar, SimStruct, SimTypeFixedSizeArray, SimTypeBottom, SimUnion, SimTypeBool
from ...calling_conventions import SimCCStdcall, SimCCMicrosoftAMD64
from .. import SIM_PROCEDURES as P
from . import SimLibrary
_l = logging.getLogger(name=__name__)
lib = SimLibrary()
lib.set_default_cc('X86', SimCCStdcall)
lib.set_default_cc('AMD64', SimCCMicrosoftAMD64)
lib.set_library_names("api-ms-win-wsl-api-l1-1-0.dll")
prototypes = \
{
#
'WslIsDistributionRegistered': SimTypeFunction([SimTypePointer(SimTypeChar(label="Char"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["distributionName"]),
#
'WslRegisterDistribution': SimTypeFunction([SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["distributionName", "tarGzFilename"]),
#
'WslUnregisterDistribution': SimTypeFunction([SimTypePointer(SimTypeChar(label="Char"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["distributionName"]),
#
'WslConfigureDistribution': SimTypeFunction([SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypeInt(signed=False, label="WSL_DISTRIBUTION_FLAGS")], SimTypeInt(signed=True, label="Int32"), arg_names=["distributionName", "defaultUID", "wslDistributionFlags"]),
#
'WslGetDistributionConfiguration': SimTypeFunction([SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0), SimTypePointer(SimTypeInt(signed=False, label="WSL_DISTRIBUTION_FLAGS"), offset=0), SimTypePointer(SimTypePointer(SimTypePointer(SimTypeChar(label="Byte"), offset=0), offset=0), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["distributionName", "distributionVersion", "defaultUID", "wslDistributionFlags", "defaultEnvironmentVariables", "defaultEnvironmentVariableCount"]),
#
'WslLaunchInteractive': SimTypeFunction([SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypeInt(signed=True, label="Int32"), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["distributionName", "command", "useCurrentWorkingDirectory", "exitCode"]),
#
'WslLaunch': SimTypeFunction([SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypeInt(signed=True, label="Int32"), SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["distributionName", "command", "useCurrentWorkingDirectory", "stdIn", "stdOut", "stdErr", "process"]),
}
lib.set_prototypes(prototypes)
| 0
| 0
| 0
|