hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
933e1d3bf96fcd6d9b9ec55a821268f440bca98b | 4,232 | py | Python | model.py | andriikushch/CarND-Behavioral-Cloning-P3 | 89bd19c3a52f1f34126c7aeb97e94437bde25e0c | [
"MIT"
] | 1 | 2019-09-19T04:10:30.000Z | 2019-09-19T04:10:30.000Z | model.py | andriikushch/CarND-Behavioral-Cloning-P3 | 89bd19c3a52f1f34126c7aeb97e94437bde25e0c | [
"MIT"
] | null | null | null | model.py | andriikushch/CarND-Behavioral-Cloning-P3 | 89bd19c3a52f1f34126c7aeb97e94437bde25e0c | [
"MIT"
] | null | null | null | import csv
from math import ceil
import cv2
import numpy as np
from sklearn.model_selection import train_test_split
import sklearn
from keras.models import Sequential
from keras.layers import Flatten, Dense, Lambda, BatchNormalization, Dropout, Cropping2D
from keras.layers.convolutional import Convolution2D
from keras.callbacks import EarlyStopping
batch_size = 32
lines = []
# load stored data
with open('./data/driving_log.csv') as csvfile:
reader = csv.reader(csvfile)
for line in reader:
lines.append(line)
# helper function to read image from path
def read_image_from_disk(source_path):
file_name = source_path.split('/')[-1]
current_path = "./data/IMG/" + file_name
image = cv2.imread(current_path)
return image
# splitting data into train_samples and validation_samples
train_samples, validation_samples = train_test_split(lines, test_size=0.2)
# create a generator for memory efficiency
def generator(samples, batch_size=32):
num_samples = len(samples)
while 1: # Loop forever so the generator never terminates
sklearn.utils.shuffle(samples)
for offset in range(0, num_samples, batch_size):
batch_samples = samples[offset:offset + batch_size]
images, measurements = [], []
for sample in batch_samples:
# create adjusted steering measurements for the center and side camera images
# center image
measurement = float(sample[3])
center_image = read_image_from_disk(sample[0])
images.append(center_image)
measurements.append(measurement)
images.append(cv2.flip(center_image, 1))
measurements.append(measurement * -1.0)
# side images
left_image = read_image_from_disk(sample[1])
right_image = read_image_from_disk(sample[2])
correction = 0.2 # this is a parameter to tune
steering_left = measurement + correction
steering_right = measurement - correction
measurements.extend([steering_left, steering_right])
images.extend([left_image, right_image])
# convert images and measurements to np.array
X_train = np.array(images)
y_train = np.array(measurements)
yield sklearn.utils.shuffle(X_train, y_train)
# compile and train the model using the generator function
train_generator = generator(train_samples, batch_size=batch_size)
validation_generator = generator(validation_samples, batch_size=batch_size)
callbacks = [EarlyStopping(monitor='val_loss', min_delta=0, patience=0, verbose=0, mode='auto', baseline=None,
restore_best_weights=True)]
# define model
model = Sequential()
# preprocess input normalize and crop
model.add(Lambda(lambda x: x / 255.0 - 0.5, input_shape=(160, 320, 3)))
model.add(Cropping2D(cropping=((50, 20), (0, 0))))
# add Convolution2D layers
model.add(Convolution2D(filters=24, kernel_size=(5, 5), padding='valid', activation='relu'))
model.add(Convolution2D(filters=36, kernel_size=(5, 5), padding='valid', activation='relu'))
model.add(Convolution2D(filters=48, kernel_size=(5, 5), padding='valid', activation='relu'))
model.add(Convolution2D(filters=64, kernel_size=(3, 3), padding='valid', activation='relu'))
model.add(Convolution2D(filters=64, kernel_size=(3, 3), padding='valid', activation='relu'))
# add fully connected layers
model.add(Flatten())
model.add(Dense(100, activation='relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Dense(50, activation='relu'))
model.add(BatchNormalization())
model.add(Dense(50, activation='relu'))
model.add(Dense(10, activation='relu'))
model.add(BatchNormalization())
model.add(Dense(1))
model.compile(loss='mse', optimizer='adam')
model.fit_generator(train_generator,
steps_per_epoch=ceil(len(train_samples) / batch_size),
validation_data=validation_generator,
validation_steps=ceil(len(validation_samples) / batch_size),
epochs=5, verbose=1, callbacks=callbacks)
# save result
model.save('model.h5')
| 36.17094 | 110 | 0.688563 | 0 | 0 | 1,484 | 0.350662 | 0 | 0 | 0 | 0 | 727 | 0.171786 |
933e73421f8903c5d4b0d41ce7311e8338acfeb3 | 8,854 | py | Python | src/natcap/invest/__init__.py | dcdenu4/invest | 3d115d4d903674236f1e9e9962d737029ebd0701 | [
"BSD-3-Clause"
] | null | null | null | src/natcap/invest/__init__.py | dcdenu4/invest | 3d115d4d903674236f1e9e9962d737029ebd0701 | [
"BSD-3-Clause"
] | 28 | 2020-04-09T23:39:38.000Z | 2020-04-17T00:30:47.000Z | src/natcap/invest/__init__.py | dcdenu4/invest | 3d115d4d903674236f1e9e9962d737029ebd0701 | [
"BSD-3-Clause"
] | null | null | null | """init module for natcap.invest."""
import dataclasses
import logging
import os
import sys
import pkg_resources
LOGGER = logging.getLogger('natcap.invest')
LOGGER.addHandler(logging.NullHandler())
__all__ = ['local_dir', ]
try:
__version__ = pkg_resources.get_distribution(__name__).version
except pkg_resources.DistributionNotFound:
# package is not installed. Log the exception for debugging.
LOGGER.exception('Could not load natcap.invest version information')
@dataclasses.dataclass
class _MODELMETA:
"""Dataclass to store frequently used model metadata."""
model_title: str # display name for the model
pyname: str # importable python module name for the model
gui: str # importable python class for the corresponding Qt UI
userguide: str # name of the corresponding built userguide file
aliases: tuple # alternate names for the model, if any
MODEL_METADATA = {
'annual_water_yield': _MODELMETA(
model_title='Annual Water Yield',
pyname='natcap.invest.annual_water_yield',
gui='annual_water_yield.AnnualWaterYield',
userguide='annual_water_yield.html',
aliases=('hwy', 'awy')),
'carbon': _MODELMETA(
model_title='Carbon Storage and Sequestration',
pyname='natcap.invest.carbon',
gui='carbon.Carbon',
userguide='carbonstorage.html',
aliases=()),
'coastal_blue_carbon': _MODELMETA(
model_title='Coastal Blue Carbon',
pyname='natcap.invest.coastal_blue_carbon.coastal_blue_carbon',
gui='cbc.CoastalBlueCarbon',
userguide='coastal_blue_carbon.html',
aliases=('cbc',)),
'coastal_blue_carbon_preprocessor': _MODELMETA(
model_title='Coastal Blue Carbon Preprocessor',
pyname='natcap.invest.coastal_blue_carbon.preprocessor',
gui='cbc.CoastalBlueCarbonPreprocessor',
userguide='coastal_blue_carbon.html',
aliases=('cbc_pre',)),
'coastal_vulnerability': _MODELMETA(
model_title='Coastal Vulnerability',
pyname='natcap.invest.coastal_vulnerability',
gui='coastal_vulnerability.CoastalVulnerability',
userguide='coastal_vulnerability.html',
aliases=('cv',)),
'crop_production_percentile': _MODELMETA(
model_title='Crop Production: Percentile',
pyname='natcap.invest.crop_production_percentile',
gui='crop_production.CropProductionPercentile',
userguide='crop_production.html',
aliases=('cpp',)),
'crop_production_regression': _MODELMETA(
model_title='Crop Production: Regression',
pyname='natcap.invest.crop_production_regression',
gui='crop_production.CropProductionRegression',
userguide='crop_production.html',
aliases=('cpr',)),
'delineateit': _MODELMETA(
model_title='DelineateIt',
pyname='natcap.invest.delineateit.delineateit',
gui='delineateit.Delineateit',
userguide='delineateit.html',
aliases=()),
'finfish_aquaculture': _MODELMETA(
model_title='Finfish Aquaculture',
pyname='natcap.invest.finfish_aquaculture.finfish_aquaculture',
gui='finfish.FinfishAquaculture',
userguide='marine_fish.html',
aliases=()),
'fisheries': _MODELMETA(
model_title='Fisheries',
pyname='natcap.invest.fisheries.fisheries',
gui='fisheries.Fisheries',
userguide='fisheries.html',
aliases=()),
'fisheries_hst': _MODELMETA(
model_title='Fisheries Habitat Scenario Tool',
pyname='natcap.invest.fisheries.fisheries_hst',
gui='fisheries.FisheriesHST',
userguide='fisheries.html',
aliases=()),
'forest_carbon_edge_effect': _MODELMETA(
model_title='Forest Carbon Edge Effect',
pyname='natcap.invest.forest_carbon_edge_effect',
gui='forest_carbon.ForestCarbonEdgeEffect',
userguide='carbon_edge.html',
aliases=('fc',)),
'globio': _MODELMETA(
model_title='GLOBIO',
pyname='natcap.invest.globio',
gui='globio.GLOBIO',
userguide='globio.html',
aliases=()),
'habitat_quality': _MODELMETA(
model_title='Habitat Quality',
pyname='natcap.invest.habitat_quality',
gui='habitat_quality.HabitatQuality',
userguide='habitat_quality.html',
aliases=('hq',)),
'habitat_risk_assessment': _MODELMETA(
model_title='Habitat Risk Assessment',
pyname='natcap.invest.hra',
gui='hra.HabitatRiskAssessment',
userguide='habitat_risk_assessment.html',
aliases=('hra',)),
'ndr': _MODELMETA(
model_title='Nutrient Delivery Ratio',
pyname='natcap.invest.ndr.ndr',
gui='ndr.Nutrient',
userguide='ndr.html',
aliases=()),
'pollination': _MODELMETA(
model_title='Crop Pollination',
pyname='natcap.invest.pollination',
gui='pollination.Pollination',
userguide='croppollination.html',
aliases=()),
'recreation': _MODELMETA(
model_title='Visitation: Recreation and Tourism',
pyname='natcap.invest.recreation.recmodel_client',
gui='recreation.Recreation',
userguide='recreation.html',
aliases=()),
'routedem': _MODELMETA(
model_title='RouteDEM',
pyname='natcap.invest.routedem',
gui='routedem.RouteDEM',
userguide='routedem.html',
aliases=()),
'scenario_generator_proximity': _MODELMETA(
model_title='Scenario Generator: Proximity Based',
pyname='natcap.invest.scenario_gen_proximity',
gui='scenario_gen.ScenarioGenProximity',
userguide='scenario_gen_proximity.html',
aliases=('sgp',)),
'scenic_quality': _MODELMETA(
model_title='Unobstructed Views: Scenic Quality Provision',
pyname='natcap.invest.scenic_quality.scenic_quality',
gui='scenic_quality.ScenicQuality',
userguide='scenic_quality.html',
aliases=('sq',)),
'sdr': _MODELMETA(
model_title='Sediment Delivery Ratio',
pyname='natcap.invest.sdr.sdr',
gui='sdr.SDR',
userguide='sdr.html',
aliases=()),
'seasonal_water_yield': _MODELMETA(
model_title='Seasonal Water Yield',
pyname='natcap.invest.seasonal_water_yield.seasonal_water_yield',
gui='seasonal_water_yield.SeasonalWaterYield',
userguide='seasonal_water_yield.html',
aliases=('swy',)),
'wave_energy': _MODELMETA(
model_title='Wave Energy Production',
pyname='natcap.invest.wave_energy',
gui='wave_energy.WaveEnergy',
userguide='wave_energy.html',
aliases=()),
'wind_energy': _MODELMETA(
model_title='Wind Energy Production',
pyname='natcap.invest.wind_energy',
gui='wind_energy.WindEnergy',
userguide='wind_energy.html',
aliases=()),
'urban_flood_risk_mitigation': _MODELMETA(
model_title='Urban Flood Risk Mitigation',
pyname='natcap.invest.urban_flood_risk_mitigation',
gui='urban_flood_risk_mitigation.UrbanFloodRiskMitigation',
userguide='urban_flood_risk_mitigation.html',
aliases=('ufrm',)),
'urban_cooling_model': _MODELMETA(
model_title='Urban Cooling',
pyname='natcap.invest.urban_cooling_model',
gui='urban_cooling_model.UrbanCoolingModel',
userguide='urban_cooling_model.html',
aliases=('ucm',)),
}
def local_dir(source_file):
"""Return the path to where `source_file` would be on disk.
If this is frozen (as with PyInstaller), this will be the folder with the
executable in it. If not, it'll just be the foldername of the source_file
being passed in.
"""
source_dirname = os.path.dirname(source_file)
if getattr(sys, 'frozen', False):
# sys.frozen is True when we're in either a py2exe or pyinstaller
# build.
# sys._MEIPASS exists, we're in a Pyinstaller build.
if not getattr(sys, '_MEIPASS', False):
# only one os.path.dirname() results in the path being relative to
# the natcap.invest package, when I actually want natcap/invest to
# be in the filepath.
# relpath would be something like <modelname>/<data_file>
relpath = os.path.relpath(source_file, os.path.dirname(__file__))
pkg_path = os.path.join('natcap', 'invest', relpath)
return os.path.join(
os.path.dirname(sys.executable), os.path.dirname(pkg_path))
else:
# assume that if we're in a frozen build, we're in py2exe. When in
# py2exe, the directory structure is maintained, so we just return
# the source_dirname.
pass
return source_dirname
| 39.351111 | 79 | 0.660605 | 406 | 0.045855 | 0 | 0 | 429 | 0.048453 | 0 | 0 | 4,755 | 0.537045 |
933f0720d869cc058a8608282bd4befb49d6a16b | 18,507 | py | Python | amnesia/modules/account/resources.py | silenius/amnesia | ba5e3ac79a89da599c22206ad1fd17541855f74c | [
"BSD-2-Clause"
] | 4 | 2015-05-08T10:57:56.000Z | 2021-05-17T04:32:11.000Z | amnesia/modules/account/resources.py | silenius/amnesia | ba5e3ac79a89da599c22206ad1fd17541855f74c | [
"BSD-2-Clause"
] | 6 | 2019-12-26T16:43:41.000Z | 2022-02-28T11:07:54.000Z | amnesia/modules/account/resources.py | silenius/amnesia | ba5e3ac79a89da599c22206ad1fd17541855f74c | [
"BSD-2-Clause"
] | 1 | 2019-09-23T14:08:11.000Z | 2019-09-23T14:08:11.000Z | # -*- coding: utf-8 -*-
# pylint: disable=E1101
import logging
import os
import operator
from binascii import hexlify
from pyramid.security import DENY_ALL
from pyramid.security import Everyone
from pyramid.security import Allow
from pyramid.settings import asbool
from pyramid_mailer.message import Message
from sqlalchemy.orm.exc import NoResultFound
from sqlalchemy.orm.exc import MultipleResultsFound
from sqlalchemy.exc import DatabaseError
from sqlalchemy import sql
from zope.sqlalchemy import invalidate
from amnesia.resources import Resource
from amnesia.modules.account import Account
from amnesia.modules.account import Role
from amnesia.modules.account import Permission
from amnesia.modules.account import ACLResource
from amnesia.modules.account import ContentACL
from amnesia.modules.account import GlobalACL
from amnesia.modules.account import AccountRole
from .util import bcrypt_hash_password
from .util import bcrypt_check_password
log = logging.getLogger(__name__)
class AuthResource(Resource):
__name__ = 'auth'
def __init__(self, request, parent):
super().__init__(request)
self.__parent__ = parent
def __acl__(self):
yield Allow, Everyone, 'login'
yield Allow, Everyone, 'logout'
yield Allow, Everyone, 'lost'
if self.registration_enabled:
yield Allow, Everyone, 'register'
yield DENY_ALL
@property
def registration_enabled(self):
return asbool(self.settings.get('registration_enabled'))
class DatabaseAuthResource(AuthResource):
def __getitem__(self, path):
if path.isdigit():
account = self.get_user(path)
if account:
return AccountEntity(self.request, account)
raise KeyError
@property
def query(self):
return sql.select(Account)
def get_user(self, user_id):
return self.dbsession.get(Account, user_id)
def find_login(self, login, **kwargs):
stmt = sql.select(Account).filter_by(login=login)
try:
return self.dbsession.execute(stmt).scalar_one()
except (NoResultFound, MultipleResultsFound):
return None
return None
def find_email(self, email):
stmt = sql.select(Account).filter(
sql.func.lower(email) == sql.func.lower(Account.email)
)
try:
return self.dbsession.execute(stmt).scalar_one()
except (NoResultFound, MultipleResultsFound):
return None
return None
def find_token(self, token):
stmt = sql.select(Account).filter_by(lost_token=token)
try:
return self.dbsession.execulet(stmt).scalar_one()
except (NoResultFound, MultipleResultsFound):
return None
def check_user_password(self, user, password):
try:
return bcrypt_check_password(password, user.password)
except ValueError:
return False
def register(self, data):
new_account = Account(**data)
try:
self.dbsession.add(new_account)
self.dbsession.flush()
return new_account
except DatabaseError:
return False
def send_token(self, principal):
principal.lost_token = hexlify(os.urandom(16)).decode('utf-8')
mailer = self.request.mailer
body = '''
Hello {last_name} {first_name},
You have recently requested to reset the password for your account.
To reset your password please go to this page {url}
If you did not perform this request, you can safely ignore this email.
Your password will not be changed unless you choose to follow the link above.
If you require assistance or further information, contact us at {contact}.
Best whishes,
The Belgian Biodiversity Platform'''.format(
last_name=principal.last_name, first_name=principal.first_name,
url=self.request.resource_url(
self, 'recover', query={'token': principal.lost_token}
),
contact='contact@biodiversity.be'
)
message = Message(
subject='Lost password',
sender='noreply@biodiversity.be',
recipients=[principal.email],
body=body
)
try:
self.dbsession.add(principal)
self.dbsession.flush()
mailer.send(message)
return principal
except DatabaseError:
return False
return False
def reset_password(self, principal, password):
principal.password = bcrypt_hash_password(password)
principal.lost_token = None
try:
self.dbsession.add(principal)
self.dbsession.flush()
return True
except DatabaseError:
return False
return False
class AccountEntity(Resource):
__parent__ = DatabaseAuthResource
def __init__(self, request, entity):
super().__init__(request)
self.entity = entity
@property
def __name__(self):
return self.entity.id
###############################################################################
# ROLE #
###############################################################################
class RoleResource(Resource):
__name__ = 'roles'
def __init__(self, request, parent):
super().__init__(request)
self.parent = parent
@property
def __parent__(self):
return self.parent
def __getitem__(self, path):
if path.isdigit():
entity = self.dbsession.get(Role, path)
if entity:
return RoleEntity(self.request, entity, self)
raise KeyError
def __acl_adapter__(self, ace):
(allow_deny, principal, permission) = ace
try:
_op, _ctx = permission.split('_', 1)
except (AttributeError, ValueError):
yield allow_deny, principal, permission
else:
if _ctx == 'role':
yield allow_deny, principal, _op
def count(self):
stmt = sql.select(
sql.func.count('*')
).select_from(
Role
)
result = self.dbsession.execute(stmt).scalar_one()
return result
def query(self, order_by=None, limit=None, offset=None):
stmt = sql.select(Role)
if order_by is not None:
stmt = stmt.order_by(order_by)
if limit is not None:
stmt = stmt.limit(limit)
if offset is not None:
stmt = stmt.offset(offset)
result = self.dbsession.execute(stmt).scalars()
return result
def create(self, name, description):
role = Role(name=name, description=description)
try:
self.dbsession.add(role)
self.dbsession.flush()
return role
except DatabaseError:
return False
class RoleEntity(Resource):
__acl__ = ()
def __init__(self, request, role, parent):
super().__init__(request)
self.role = role
self.parent = parent
@property
def __name__(self):
return self.role.id
@property
def __parent__(self):
return self.parent
def __getitem__(self, path):
if path == 'acls':
return ACLEntity(self.request, role=self.role, parent=self)
if path == 'members' and not self.role.virtual:
return RoleMember(self.request, role=self.role, parent=self)
raise KeyError
def delete(self):
try:
self.dbsession.delete(self.role)
self.dbsession.flush()
return True
except DatabaseError:
return False
class RoleMember(Resource):
__name__ = 'members'
__acl__ = ()
def __init__(self, request, role, parent):
super().__init__(request)
self.role = role
self.parent = parent
@property
def __parent__(self):
return self.parent
def __getitem__(self, path):
if path.isdigit():
account = self.dbsession.get(Account, path)
if account:
return RoleMemberEntity(
self.request, role=self.role, account=account, parent=self,
name=account.id
)
raise KeyError
def query(self):
return sql.select(AccountRole).filter(
AccountRole == self.role
)
def get_members(self):
stmt = sql.select(Account).filter(
Account.account_roles.any(role=self.role)
)
return self.dbsession.execute(stmt).scalars().all()
def add_member(self, account):
try:
account_role = AccountRole(role=self.role, account=account)
self.role.accounts.append(account_role)
self.dbsession.flush()
return account_role
except DatabaseError:
return False
def delete(self):
stmt = sql.delete(AccountRole).filter(
AccountRole == self.role
)
try:
result = self.dbsession.execute(stmt)
invalidate(self.dbsession)
return result.rowcount
except DatabaseError:
return False
class RoleMemberEntity(Resource):
def __init__(self, request, role, account, parent, name):
super().__init__(request)
self.role = role
self.account = account
self.__parent__ = parent
self.__name__ = name
def query(self):
filters = sql.and_(
AccountRole.role == self.role,
AccountRole.account == self.account
)
stmt = sql.select(AccountRole).filter(filters)
return stmt
def delete(self):
filters = sql.and_(
AccountRole.role == self.role,
AccountRole.account == self.account
)
stmt = sql.delete(AccountRole).filter(filters)
try:
deleted = self.dbsession.execute(stmt)
invalidate(self.dbsession)
return deleted.rowcount
except DatabaseError:
return False
###############################################################################
# ACCESS CONTROL LIST (ACL) #
###############################################################################
class ACLEntity(Resource):
''' Manage ACL for a role '''
__name__ = 'acls'
__acl__ = ()
def __init__(self, request, role, parent):
super().__init__(request)
self.role = role
self.parent = parent
@property
def __parent__(self):
return self.parent
def query(self, order_by=None):
stmt = sql.select(GlobalACL).filter_by(role=self.role)
if order_by is not None:
stmt = stmt.order_by(order_by)
result = self.dbsession.execute(stmt).scalars()
return result
def create(self, permission, allow):
acl = GlobalACL(role=self.role, permission=permission, allow=allow)
try:
self.dbsession.add(acl)
self.dbsession.flush()
return acl
except DatabaseError:
return False
# XXX: add patch= arg ?
def update(self, permission, weight, **data):
stmt = sql.select(
GlobalACL
).filter_by(
role=self.role
).filter_by(
permission=permission
).with_for_update()
try:
role_perm = self.dbsession.execute(stmt).scalar_one()
self.update_permission_weight(permission, weight)
role_perm.feed(**data)
self.dbsession.add(role_perm)
self.dbsession.flush()
return role_perm
except (NoResultFound, DatabaseError):
return False
def get_permissions(self, order_by=None):
stmt = sql.select(Permission)
if order_by is not None:
stmt = stmt.order_by(order_by)
result = self.dbsession.execute(stmt).scalars()
return result
def delete_permission(self, permission_id, **kwargs):
stmt = sql.select(
GlobalACL
).filter_by(
role=self.role
).filter_by(
permission_id=permission_id
).with_for_update()
try:
# FIXME: .delete()
role_perm = self.dbsession.execute(stmt).scalar_one()
except NoResultFound:
return False
try:
self.dbsession.delete(role_perm)
self.dbsession.flush()
return role_perm
except DatabaseError:
return False
def update_permission_weight(self, permission_id, weight):
""" Change the weight of a permission. """
stmt = sql.select(
GlobalACL
).filter_by(
role=self.role
).filter_by(
permission_id=permission_id
).with_for_update()
try:
obj = self.dbsession.execute(stmt).scalar_one()
except NoResultFound:
return False
(min_weight, max_weight) = sorted((weight, obj.weight))
# Do we move downwards or upwards ?
if weight - obj.weight > 0:
operation = operator.sub
whens = {min_weight: max_weight}
else:
operation = operator.add
whens = {max_weight: min_weight}
# Select all the rows between the current weight and the new weight
# Note: The polymorphic identity WHERE criteria is not included for
# single- or joined- table updates - this must be added manually, even
# for single table inheritance.
# See Caveats section at
# https://docs.sqlalchemy.org/en/13/orm/query.html#sqlalchemy.orm.query.Query.update
global_resource = sql.select(
ACLResource.id
).filter_by(
name='GLOBAL'
).subquery()
filters = sql.and_(
GlobalACL.weight.between(min_weight, max_weight),
GlobalACL.resource_id == global_resource.c.id
)
# Swap min_weight/max_weight, or increment/decrement by one depending
# on whether one moves up or down
weight = sql.case(
value=GlobalACL.weight, whens=whens,
else_=operation(GlobalACL.weight, 1)
)
stmt = sql.update(
GlobalACL
).filter(
filters
).values(
weight=weight
).execution_options(
synchronize_session=False
)
try:
# The UPDATE statement
updated = self.dbsession.execute(stmt)
invalidate(self.dbsession)
return updated.rowcount
except DatabaseError:
return None
class ContentACLEntity(Resource):
''' Manage ACL for a Content based entity '''
__name__ = 'acl'
__acl__ = ()
def __init__(self, request, content, parent):
super().__init__(request)
self.content = content
self.parent = parent
@property
def __parent__(self):
return self.parent
def query(self, order_by=None):
stmt = sql.select(
ContentACL
).filter_by(
content=self.content
)
if order_by is not None:
stmt = stmt.order_by(order_by)
result = self.dbsession.execute(stmt).scalars()
return result
def get_permissions(self, order_by=None):
stmt = sql.select(Permission)
if order_by is not None:
stmt = stmt.order_by(order_by)
result = self.dbsession.execute(stmt).scalars()
return result
def create(self, role, permission, allow):
acl = ContentACL(
content=self.content, role=role, permission=permission,
allow=allow
)
try:
self.dbsession.add(acl)
self.dbsession.flush()
return acl
except DatabaseError:
return False
def delete_permission(self, acl_id):
stmt = sql.delete(
ContentACL
).filter_by(
content=self.content
).filter_by(
id=acl_id
)
try:
deleted = self.dbsession.execute(stmt)
invalidate(self.dbsession)
return deleted.rowcount
except DatabaseError:
return False
def set_inherits_parent_acl(self, value):
self.content.inherits_parent_acl = value
try:
self.dbsession.add(self.content)
self.dbsession.flush()
return True
except DatabaseError:
return False
def update_permission_weight(self, role, permission, weight):
""" Change the weight of a permission. """
filters = sql.and_(
ContentACL.content == self.content,
ContentACL.permission == permission,
ContentACL.role == role
)
stmt = sql.select(
ContentACL
).filter(
filters
).with_for_update()
try:
obj = self.dbsession.execute(stmt).scalar_one()
except NoResultFound:
return False
(min_weight, max_weight) = sorted((weight, obj.weight))
# Do we move downwards or upwards ?
if weight - obj.weight > 0:
operation = operator.sub
whens = {min_weight: max_weight}
else:
operation = operator.add
whens = {max_weight: min_weight}
# Select all the rows between the current weight and the new weight
filters = sql.and_(
ContentACL.content == self.content,
ContentACL.weight.between(min_weight, max_weight),
)
# Swap min_weight/max_weight, or increment/decrement by one depending
# on whether one moves up or down
weight = sql.case(
value=ContentACL.weight, whens=whens,
else_=operation(ContentACL.weight, 1)
)
stmt = sql.update(
ContentACL
).filter(
filters
).values(
weight=weight
).execution_options(
synchronize_session=False
)
try:
# The UPDATE statement
updated = self.dbsession.execute(stmt)
invalidate(self.dbsession)
return updated.rowcount
except DatabaseError:
return None
| 26.476395 | 92 | 0.580699 | 17,001 | 0.918625 | 579 | 0.031285 | 609 | 0.032906 | 0 | 0 | 2,090 | 0.11293 |
933f58e32e7a5ca6a9f6e5d52f172b36fc7b7210 | 5,013 | py | Python | unused/more_num.py | monadius/FPTaylor | 55214506eaf1a5fbbecf098221b81c4cc375ac6f | [
"MIT"
] | 21 | 2015-11-24T20:52:23.000Z | 2022-02-18T15:04:48.000Z | unused/more_num.py | monadius/FPTaylor | 55214506eaf1a5fbbecf098221b81c4cc375ac6f | [
"MIT"
] | 24 | 2016-10-31T16:46:00.000Z | 2021-04-07T04:35:37.000Z | unused/more_num.py | monadius/FPTaylor | 55214506eaf1a5fbbecf098221b81c4cc375ac6f | [
"MIT"
] | 7 | 2018-01-11T17:52:41.000Z | 2021-08-24T01:52:59.000Z | import math
import sys
from fractions import Fraction
from random import uniform, randint
import decimal as dec
def log10_floor(f):
b, k = 1, -1
while b <= f:
b *= 10
k += 1
return k
def log10_ceil(f):
b, k = 1, 0
while b < f:
b *= 10
k += 1
return k
def log10_floor(f):
if f <= 0: return -1
t, b, k, k_step = 1, 10, 0, 1
while True:
t1 = t * b
if t1 > f:
if k_step == 1:
break
k_step = 1
b = 10
else:
b *= 10
k += k_step
k_step += 1
t = t1
return k
# for i in range(20):
# f = 10 ** i
# print(f'{f}: {log10_floor(f)}, {log10_floor2(f)}')
# print(log10_floor2(100))
# sys.exit(0)
def str_of_pos_float_hi0(prec, x):
assert x > 0
q = Fraction(x)
n = int(q)
if n > 0:
k = log10_floor(n) + 1
if k >= prec:
b = 10 ** (k - prec)
r, e = n // b, k - prec
else:
b = 10 ** (prec - k)
r, e = n * b + int((q - n) * b), k - prec
else:
k = log10_floor(int(1 / q))
b = 10 ** (k + prec)
r, e = int(q * b), -(k + prec)
if r * Fraction(10) ** e < q:
r += 1
s = str(r)
if len(s) > prec:
s = s[:-1]
e += 1
e += prec - 1
s = f'{s[0]}.{s[1:]}'
if e == 0:
return s
return s + ('e+' if e > 0 else 'e') + str(e)
def str_of_pos_float_hi1(prec, x):
assert x > 0
m, exp = math.frexp(x)
m, exp = int(math.ldexp(m, 53)), exp - 53
mask = (1 << abs(exp)) - 1
if exp >= 0:
n, rem = m << exp, 0
else:
n, rem = m >> -exp, m & mask
if n > 0:
k = log10_floor(n) + 1
if k >= prec:
b = 10 ** (k - prec)
(r, rem2), e = divmod(n, b), k - prec
rem2 = rem2 or rem
else:
b = 10 ** (prec - k)
t = rem * b
t, rem2 = t >> -exp, t & mask
r, e = n * b + t, k - prec
else:
k = log10_floor((1 << -exp) // rem)
b = 10 ** (k + prec)
t = rem * b
r, rem2, e = t >> -exp, t & mask, -(k + prec)
if rem2:
r += 1
s = str(r)
assert prec <= len(s) <= prec + 1
if len(s) > prec:
s = s[:-1]
e += 1
e += prec - 1
s = f'{s[0]}.{s[1:]}'
if e == 0:
return s
return s + ('e+' if e > 0 else 'e') + str(e)
def str_of_pos_float_lo(prec, x):
assert x > 0
m, exp = math.frexp(x)
m, exp = int(math.ldexp(m, 53)), exp - 53
if exp >= 0:
n, rem = m << exp, 0
else:
mask = (1 << abs(exp)) - 1
n, rem = m >> -exp, m & mask
if n > 0:
k = log10_floor(n) + 1
if k >= prec:
b = 10 ** (k - prec)
r, e = n // b, k - prec
else:
b = 10 ** (prec - k)
t = (rem * b) >> -exp
r, e = n * b + t, k - prec
else:
k = log10_floor((1 << -exp) // rem)
b = 10 ** (k + prec)
t = rem * b
r, e = (rem * b) >> -exp, -(k + prec)
s = str(r)
assert len(s) == prec
e += prec - 1
s = f'{s[0]}.{s[1:]}'
if e == 0:
return s
return s + ('e+' if e > 0 else 'e') + str(e)
# print(str_of_pos_float_hi(2, 230454523525e+100))
def decimal_test_hi(prec, x, s=None):
if s is None:
s = str_of_pos_float_hi1(prec, x)
with dec.localcontext() as ctx:
ctx.prec = prec
ctx.rounding = dec.ROUND_UP
v = +dec.Decimal(x)
t = +dec.Decimal(s)
if v != t:
print(f'Error (hi): decimal = {v}, my = {s} (prec = {prec}, x = {x})')
def decimal_test_lo(prec, x, s=None):
if s is None:
s = str_of_pos_float_lo(prec, x)
with dec.localcontext() as ctx:
ctx.prec = prec
ctx.rounding = dec.ROUND_DOWN
v = +dec.Decimal(x)
t = +dec.Decimal(s)
if v != t:
print(f'Error (lo): decimal = {v}, my = {s} (prec = {prec}, x = {x})')
def tests(n, a, b):
for _ in range(n):
x = uniform(a, b)
prec = randint(1, 15)
decimal_test_hi(prec, x)
decimal_test_lo(prec, x)
def tests2(n):
for _ in range(n):
prec = randint(1, 15)
t = randint(-100, 100)
decimal_test_hi(prec, 2.0 ** t)
decimal_test_lo(prec, 2.0 ** t)
tests(10000, 1e-300, 1)
tests(10000, 0.5, 1000)
tests(10000, 1e+10, 1e+100)
tests(10000, 1e-300, 1e+300)
tests2(10000)
#print(str_of_pos_float_hi1(1, 0.47))
#print(str_of_pos_float_hi1(1, 0.5))
# print(str_of_pos_float_hi1(100, 0.3))
def check_ocaml_results(fname):
print(f'Checking: {fname}')
with open(fname, 'r') as f:
for line in f:
x, prec, s0, s1, s_lo = line.strip().split(',')
decimal_test_hi(int(prec), float(x), s0)
decimal_test_hi(int(prec), float(x), s1)
decimal_test_lo(int(prec), float(x), s_lo)
check_ocaml_results('out.txt') | 24.940299 | 82 | 0.448235 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 528 | 0.105326 |
934494c2a4fc3ddbe6eb8bc04fe6967de3193ecb | 876 | py | Python | setup.py | Vanderbeck/example_pkg | 542ae9f8b7283b2984cf569ee040fdcfc8c9d732 | [
"MIT"
] | null | null | null | setup.py | Vanderbeck/example_pkg | 542ae9f8b7283b2984cf569ee040fdcfc8c9d732 | [
"MIT"
] | null | null | null | setup.py | Vanderbeck/example_pkg | 542ae9f8b7283b2984cf569ee040fdcfc8c9d732 | [
"MIT"
] | null | null | null | import setuptools
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name="example-pkg-vanderbeck", # Replace with your own username
version="0.0.1",
author="Lindsay Vanderbeck",
author_email="lindsay.vanderbeck@live.ca",
description="A small example package",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/Vanderbeck/example_pkg.git",
packages=setuptools.find_packages(),
install_requires=[
'regex'
],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: POSIX :: Linux",
],
python_requires='>=3.5',
# entry_points = {
# 'console_scripts' : ['example_pkg = myscript.myscript:main']
# },
)
| 30.206897 | 70 | 0.646119 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 434 | 0.495434 |
9346f4ae3f51e0598235db78c109cd4747e2c15f | 1,820 | py | Python | wiki-parse/node.py | mvwicky/wiki-parse | 1c19e1c771b9dc96cf77fdaa4009dbd97619cc72 | [
"MIT"
] | null | null | null | wiki-parse/node.py | mvwicky/wiki-parse | 1c19e1c771b9dc96cf77fdaa4009dbd97619cc72 | [
"MIT"
] | null | null | null | wiki-parse/node.py | mvwicky/wiki-parse | 1c19e1c771b9dc96cf77fdaa4009dbd97619cc72 | [
"MIT"
] | null | null | null | import os
import random
import sys
import time
from typing import ClassVar, List
from urllib.parse import urlsplit
import attr
from bs4 import BeautifulSoup
import requests
# Epsilon value
EPS = sys.float_info.epsilon
def req(url, verbose=False):
"""Make a request, sleeping for a random period of time afterwards"""
res = requests.get(url) # Make a request
slp_tm = (random.random() + EPS) * 2.5 # Calculate sleep time
if verbose:
print(slp_tm)
time.sleep(slp_tm)
return res
@attr.s(slots=True)
class WikiNode(object):
"""A Graph Node
TODO: Change outpaths to be a list of integers, indices to a global list
Maybe just change everything to indices"""
wiki_url: ClassVar[str] = 'https://en.wikipedia.org'
link: str = attr.ib(type=str)
level: int = attr.ib(type=int)
out_paths: List[str] = attr.ib(default=attr.Factory(list), type=list)
@property
def page_name(self) -> str:
return os.path.split(urlsplit(self.link).path)[1]
@staticmethod
def wiki_links(tag) -> bool:
href = tag.attrs.get('href')
if href is None:
return False
if 'Main_Page' in href:
return False
return href.startswith('/wiki') and (':' not in href)
@classmethod
def with_links(cls, url):
ret = cls(url)
ret.get_links()
return ret
def find_links(self):
links = set()
res = req(self.link)
if res.status_code != requests.codes.ok:
return links
soup = BeautifulSoup(res.content, 'lxml')
for link in soup(self.wiki_links):
links.add(''.join((self.wiki_url, link['href'])))
links -= {self.link}
return links
def get_links(self):
self.out_paths.extend(self.find_links())
| 26.376812 | 76 | 0.626374 | 1,282 | 0.704396 | 0 | 0 | 1,302 | 0.715385 | 0 | 0 | 334 | 0.183516 |
934773febc7f5decd256c9aba66a91ef1956f628 | 761 | py | Python | setup.py | shubhamjain/quick-grayscale | 470f726ec93224ffff57a3d7ba58fcf849fb4617 | [
"MIT"
] | 14 | 2018-11-26T01:32:21.000Z | 2021-11-09T11:04:34.000Z | setup.py | shubhamjain/quick-grayscale | 470f726ec93224ffff57a3d7ba58fcf849fb4617 | [
"MIT"
] | 1 | 2019-03-28T21:38:56.000Z | 2019-03-30T21:34:05.000Z | setup.py | shubhamjain/quick-grayscale | 470f726ec93224ffff57a3d7ba58fcf849fb4617 | [
"MIT"
] | 1 | 2018-12-05T01:10:55.000Z | 2018-12-05T01:10:55.000Z | """
This is a setup.py script generated by py2applet
Usage:
python setup.py py2app
"""
from setuptools import setup
APP = ["quick-gray.py"]
APP_NAME = "QuickGrayscale"
DATA_FILES = ["status-bar-logo.png", "status-bar-logo--dark.png"]
OPTIONS = {
"iconfile":"./assets/gq.icns",
"plist": {
"CFBundleName": "QuickGrayscale",
"CFBundleDisplayName": "Quick Grayscale",
"CFBundleIdentifier": "com.shubham.quickgrayscale",
"CFBundleVersion": "0.1.2,
"CFBundleShortVersionString": "0.1.2",
"LSUIElement": True # Launch on startup
},
"packages": ["rumps"]
}
setup(
name="Quick Grayscale",
app=APP,
data_files=DATA_FILES,
options={"py2app": OPTIONS},
setup_requires=["py2app"]
) | 23.060606 | 65 | 0.634691 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 455 | 0.597898 |
9347f07741dc686da69f70ff599f8901c2b855b9 | 1,074 | py | Python | src/unsupervised_class3/test_stochastic_tensor.py | JouniVatanen/NLP-and-Deep-Learning | 2fddcc2c39787713d33d17e80565de4ed073ca60 | [
"MIT"
] | 1 | 2020-05-24T06:55:31.000Z | 2020-05-24T06:55:31.000Z | Machine Learning/unsupervised_class3/test_stochastic_tensor.py | Ashleshk/Machine-Learning-Data-Science-Deep-Learning | 03357ab98155bf73b8f1d2fd53255cc16bea2333 | [
"MIT"
] | null | null | null | Machine Learning/unsupervised_class3/test_stochastic_tensor.py | Ashleshk/Machine-Learning-Data-Science-Deep-Learning | 03357ab98155bf73b8f1d2fd53255cc16bea2333 | [
"MIT"
] | 1 | 2020-03-16T13:11:14.000Z | 2020-03-16T13:11:14.000Z | # https://deeplearningcourses.com/c/deep-learning-gans-and-variational-autoencoders
# https://www.udemy.com/deep-learning-gans-and-variational-autoencoders
# a simple script to see what StochasticTensor outputs
from __future__ import print_function, division
from builtins import range
# Note: you may need to update your version of future
# sudo pip install -U future
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
st = tf.contrib.bayesflow.stochastic_tensor
Normal = tf.contrib.distributions.Normal
# sample N samples from N(5,3*3)
N = 10000
mean = np.ones(N)*5
scale = np.ones(N)*3
I = tf.Variable(np.ones(N))
with st.value_type(st.SampleValue()):
X = st.StochasticTensor(Normal(loc=mean, scale=scale))
# cannot session.run a stochastic tensor
# but we can session.run a tensor
Y = I * X
init_op = tf.global_variables_initializer()
with tf.Session() as session:
session.run(init_op)
Y_val = session.run(Y)
print("Sample mean:", Y_val.mean())
print("Sample std dev:", Y_val.std())
plt.hist(Y_val, bins=20)
plt.show()
| 24.409091 | 83 | 0.746741 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 425 | 0.395717 |
934a29b592905e47673af9f0dc927945ba059a11 | 3,809 | py | Python | osu/apiV1.py | LostPy/osu-api.py | 695146217c56b7fe414e1c07cd97260a728d96d9 | [
"MIT"
] | 1 | 2021-01-13T00:18:42.000Z | 2021-01-13T00:18:42.000Z | osu/apiV1.py | LostPy/osu-api.py | 695146217c56b7fe414e1c07cd97260a728d96d9 | [
"MIT"
] | null | null | null | osu/apiV1.py | LostPy/osu-api.py | 695146217c56b7fe414e1c07cd97260a728d96d9 | [
"MIT"
] | 1 | 2021-04-13T01:45:19.000Z | 2021-04-13T01:45:19.000Z | """
Description: A Python module to use easily the osu!api V1.
Author: LostPy
License: MIT
Date: 2021-01-11
"""
import requests as req
import json
from . import from_json
base_url ='https://osu.ppy.sh/api'
urls = {
'beatmaps': base_url + '/get_beatmaps?',
'user': base_url + '/get_user?',
'scores': base_url + '/get_scores?',
'user_best': base_url + '/get_user_best?',
'user_recent': base_url + '/get_user_recent?',
'match': base_url + '/get_match?',
'replay': base_url + '/get_replay?'
}
def get_beatmaps(key: str, since: str = None, beatmapset_id: int = None, beatmap_id: int = None, type_return: str = 'dict', **kwargs):
"""Retrieve general beatmap information."""
params = {
'k': key,
'since': since,
's': beatmapset_id,
'b': beatmap_id,
'u': kwargs['user'] if 'user' in kwargs else None,
'type': kwargs['type_'] if 'type_' in kwargs else None,
'mode': kwargs['mode'] if 'mode' in kwargs else None,
'a': kwargs['a'] if 'a' in kwargs else 0,
'h': kwargs['h'] if 'h' in kwargs else None,
'limit': kwargs['limit'] if 'limit' in kwargs else 500,
'mods': kwargs['mods'] if 'mods' in kwargs else None}
r = req.get(urls['beatmaps'], params=params)
return from_json(r.text, type_return)
def get_user(key: str, user: int, type_return: str = 'dict', **kwargs):
"""Retrieve general user information."""
params = {
'k': key,
'u': user,
'm': kwargs['mode'] if 'mode' in kwargs else 0,
'type': kwargs['type_'] if 'type_' in kwargs else None,
'event_days': kwargs['event_days'] if 'event_days' in kwargs else 1}
r = req.get(urls['user'], params=params)
return from_json(r.text, type_return)
def get_scores(key: str, beatmap_id: int, user: int = None, type_return: str = 'dict', **kwargs):
"""Retrieve information about the top 100 scores of a specified beatmap."""
params = {
'k': key,
'b': beatmap_id,
'u': user,
'm': kwargs['mode'] if 'mode' in kwargs else 0,
'mods': kwargs['mods'] if 'mods' in kwargs else 0,
'type': kwargs['type_'] if 'type_' in kwargs else None,
'limit': kwargs['limit'] if 'limit' in kwargs else 50}
r = req.get(urls['scores'], params=params)
return from_json(r.text, type_return)
def get_user_best(key: str, user: int, mode: int = 0, limit: int = 10, type_: str = None, type_return: str = 'dict'):
"""Get the top scores for the specified user."""
params = {
'k': key,
'u': user,
'm': mode,
'limit': limit,
'type': type_}
r = req.get(urls['user_best'], params=params)
return from_json(r.text, type_return)
def get_user_recent(key: str, user: int, mode: int = 0, limit: int = 10, type_: str = None, type_return: str = 'dict'):
"""Gets the user's ten most recent plays over the last 24 hours."""
params = {
'k': key,
'u': user,
'm': mode,
'limit': limit,
'type': type_}
r = req.get(urls['user_recent'], params=params)
return from_json(r.text, type_return)
def get_match(key: str, match_id: int, type_return: str = 'dict'):
"""Retrieve information about multiplayer match."""
r = req.get(urls['match'], {'k': key, 'mp': match_id})
return from_json(r.text, type_return)
def get_replay(key: str, beatmap_id: int, user: int, **kwargs):
"""Get the replay data of a user's score on a map."""
params = {
'k': key,
'b': beatmap_id,
'u': user,
'm': kwargs['mode'] if 'mode' in kwargs else None,
's': kwargs['score_id'] if 'score_id' in kwargs else None,
'type_': kwargs['type_'] if 'type_' in kwargs else None,
'mods': kwargs['mods'] if 'mods' in kwargs else None}
return json.loads(req.get(urls['replay'], params=params).text)
def get_cover_image(beatmapset_id: int):
"""Return url of cover image from beatmapset_id."""
return f"https://assets.ppy.sh/beatmaps/{beatmapset_id}/covers/cover.jpg"
def get_profile_image(user_id: int):
"""Return url of profile image of user."""
return f"http://s.ppy.sh/a/{user_id}"
| 30.96748 | 134 | 0.664479 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,392 | 0.36545 |
934a5376ef2352c58f4fb9ed7be8a3b25bbe9a22 | 572 | py | Python | apps/show_plots.py | avdmitry/convnet | 4ae77c04e898643bb476e0604fab5682f23069d7 | [
"BSD-2-Clause"
] | 293 | 2015-01-01T12:23:24.000Z | 2022-03-28T19:34:17.000Z | apps/show_plots.py | weilaiyxj/convnet | 4ae77c04e898643bb476e0604fab5682f23069d7 | [
"BSD-2-Clause"
] | 16 | 2015-01-05T17:46:04.000Z | 2017-08-13T17:20:26.000Z | apps/show_plots.py | weilaiyxj/convnet | 4ae77c04e898643bb476e0604fab5682f23069d7 | [
"BSD-2-Clause"
] | 181 | 2015-01-04T18:06:45.000Z | 2021-07-30T05:37:36.000Z | import glob
import matplotlib.pyplot as plt
import numpy as np
import sys
plt.ion()
data_files = list(glob.glob(sys.argv[1]+'/mnist_net_*_train.log'))
valid_data_files = list(glob.glob(sys.argv[1]+'/mnist_net_*_valid.log'))
for fname in data_files:
data = np.loadtxt(fname).reshape(-1, 3)
name = fname.split('/')[-1]
plt.plot(data[:, 0], 1-data[:, 2], label=name)
for fname in valid_data_files:
data = np.loadtxt(fname).reshape(-1, 2)
name = fname.split('/')[-1]
plt.plot(data[:, 0], 1-data[:, 1], label=name)
plt.legend(loc=1)
raw_input('Press Enter.')
| 23.833333 | 72 | 0.676573 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 68 | 0.118881 |
934b9553c57c50df3dbe6b024a339fd09db698e0 | 2,395 | py | Python | props/graph_representation/proposition.py | kshabahang/props | d3cc981f778185769b4dc2816aecaf66d21d0e91 | [
"MIT"
] | null | null | null | props/graph_representation/proposition.py | kshabahang/props | d3cc981f778185769b4dc2816aecaf66d21d0e91 | [
"MIT"
] | null | null | null | props/graph_representation/proposition.py | kshabahang/props | d3cc981f778185769b4dc2816aecaf66d21d0e91 | [
"MIT"
] | null | null | null | from props.dependency_tree.definitions import subject_dependencies, ARG_LABEL,\
object_dependencies, SOURCE_LABEL, domain_label, POSSESSED_LABEL,\
POSSESSOR_LABEL
class Proposition:
def __init__(self,pred,args,outputType):
self.pred = pred
self.args = args
self.outputType = outputType
for ent in self.args:
(rel,arg) = ent
if rel == POSSESSOR_LABEL:
ent[1] = fixPossessor(arg)
def find_ent(self,ent):
ret = []
for i,(rel,arg) in enumerate(self.args):
if ent in arg:
ret.append(i)
return ret
def rel_order(self,rel):
if rel in subject_dependencies+[domain_label,POSSESSED_LABEL,POSSESSOR_LABEL]:
return 0
if rel == ARG_LABEL:
return 1
if rel in object_dependencies:
return 2
if rel.startswith("prep"):
return 3
if rel == SOURCE_LABEL:
return 5
else:
return 4
def __str__(self):
PDF = (self.outputType == "pdf")
HTML = (self.outputType == "html")
if PDF:
bold = lambda t:t
color = lambda t,color:t
if HTML:
bold = lambda t:"<b>{0}</b>".format(t)
color = lambda t,color:'<font color="{0}">{1}</font>'.format(color,t)
curProp = r'{0}:({1})'.format(bold(self.pred),
", ".join([rel + ":" + bold(color(arg,"blue")) for rel,arg in sorted(self.args,key=lambda(rel,_):self.rel_order(rel))]))
return curProp
mapPossessive = {"my":"I",
"your":"you",
"its":"it",
"her":"she",
"his":"he",
"our":"we",
"their":"they"}
def fixPossessor(possessor):
"""
fix phrasing in a given possessor node, such as "its -> it" "her -> she" "his -> he", etc.
"""
return mapPossessive.get(possessor.lower().lstrip().rstrip(),possessor)
# if not (len(possessor.text) == 1):
# return
#
# curWord = possessor.text[0].word.lower()
# possessor.text = [Word(index=possessor.text[0].index,
# word=mapPossessive.get(curWord, curWord))]
| 33.263889 | 159 | 0.499791 | 1,526 | 0.637161 | 0 | 0 | 0 | 0 | 0 | 0 | 505 | 0.210856 |
934bd76a6819cffc22fcfcbe6e22970e2f898fc2 | 4,263 | py | Python | hello.py | olibob/pyflasktuto | b49c435340bfe4346c4a91d8041dbf81d890d7d8 | [
"MIT"
] | null | null | null | hello.py | olibob/pyflasktuto | b49c435340bfe4346c4a91d8041dbf81d890d7d8 | [
"MIT"
] | null | null | null | hello.py | olibob/pyflasktuto | b49c435340bfe4346c4a91d8041dbf81d890d7d8 | [
"MIT"
] | null | null | null | from flask import Flask, request, render_template, session, redirect, url_for, flash
from flask_script import Manager, Shell
from flask_bootstrap import Bootstrap
from flask_moment import Moment
from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField
from wtforms.validators import Required
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate, MigrateCommand
from flask_mail import Mail, Message
from datetime import datetime
import os
from threading import Thread
basedir = os.path.abspath(os.path.dirname(__file__))
app = Flask(__name__)
# flask-wtf CSRF protection secret key
app.config['SECRET_KEY'] = 'hard to guess string'
# declare database URLS and commit after request
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///'+os.path.join(basedir, 'data.sqlite')
app.config['SQLALCHEMY_COMMIT_ON_TEARDOWN'] = True
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['MAIL_SERVER'] = 'smtp.gmail.com'
app.config['MAIL_PORT'] = 587
app.config['MAIL_USE_TLS'] = True
app.config['MAIL_USERNAME'] = os.environ.get('MAIL_USERNAME')
app.config['MAIL_PASSWORD'] = os.environ.get('MAIL_PASSWORD')
app.config['FLASKY_MAIL_SUBJECT_PREFIX'] = '[FLASKY]'
app.config['FLASKY_MAIL_SENDER'] = 'robby57@gmail.com'
app.config['FLASKY_ADMIN'] = os.environ.get('FLASKY_ADMIN')
bootstrap = Bootstrap(app)
manager = Manager(app)
moment = Moment(app)
# WTForms
class NameForm(FlaskForm):
name = StringField('What is your name?', validators=[Required()])
submit = SubmitField('Submit')
# SQLAlchemy
db = SQLAlchemy(app)
class Role(db.Model):
__tablename__ = 'roles'
id = db.Column(db.Integer, primary_key = True)
name = db.Column(db.String(64), unique = True)
users = db.relationship('User', backref = 'role', lazy = 'dynamic')
def __repr__(self):
return '<Role %r>' % self.name
class User(db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key = True)
username = db.Column(db.String(64), unique = True, index = True)
role_id = db.Column(db.Integer, db.ForeignKey('roles.id'))
def __repr__(self):
return '<User %r>' % self.username
def make_shell_context():
return dict(app = app, db = db, User = User, Role = Role)
manager.add_command('shell', Shell(make_context = make_shell_context))
# db Migration
migrate = Migrate(app, db)
manager.add_command('db', MigrateCommand)
# Mail
mail = Mail(app)
def send_async_email(app, msg):
with app.app_context():
mail.send(msg)
def send_email(to, subject, template, **kwargs):
msg = Message(app.config['FLASKY_MAIL_SUBJECT_PREFIX'] + subject,
sender = app.config['FLASKY_MAIL_SENDER'],
recipients = [to]
)
msg.body = render_template(template + '.txt', **kwargs)
msg.html = render_template(template + '.html', **kwargs)
thr = Thread(target = send_async_email, args = [app, msg])
thr.start()
return thr
@app.route('/', methods=['GET', 'POST'])
def index():
user_agent = request.headers.get('User-Agent')
form = NameForm()
if form.validate_on_submit():
user = User.query.filter_by(username=form.name.data).first()
if user is None:
user = User(username = form.name.data)
db.session.add(user)
session['known'] = False
if app.config['FLASKY_ADMIN']:
send_email(app.config['FLASKY_ADMIN'], 'New User', 'mail/new_user', user=user)
else:
session['known'] = True
session['name'] = form.name.data
form.name.data = ''
return redirect(url_for('index'))
return render_template('index.html',
user_agent=user_agent,
form=form,
name=session.get('name'),
known = session.get('known', False),
)
@app.route('/time')
def time():
return render_template('time.html', current_time=datetime.utcnow())
@app.route('/user/<name>')
def user(name):
return render_template('user.html', name=name)
@app.errorhandler(404)
def page_not_found(e):
return render_template('404.html'), 404
@app.errorhandler(500)
def internal_server_error(e):
return render_template('500.html'), 500
if __name__ == '__main__':
manager.run()
| 32.052632 | 94 | 0.683087 | 718 | 0.168426 | 0 | 0 | 1,256 | 0.294628 | 0 | 0 | 866 | 0.203143 |
934c2f4590fdde4bb9eb55db3bd4e1d99170a2a3 | 2,799 | py | Python | tap/tests/test_result.py | cans/tappy-pkg | d147b67fc0219e08a1d64a2cd3cd0c844fbc5ce5 | [
"BSD-2-Clause"
] | null | null | null | tap/tests/test_result.py | cans/tappy-pkg | d147b67fc0219e08a1d64a2cd3cd0c844fbc5ce5 | [
"BSD-2-Clause"
] | null | null | null | tap/tests/test_result.py | cans/tappy-pkg | d147b67fc0219e08a1d64a2cd3cd0c844fbc5ce5 | [
"BSD-2-Clause"
] | null | null | null | # Copyright (c) 2015, Matt Layman
import os
import unittest
from tap.runner import TAPTestResult
class FakeTestCase(unittest.TestCase):
def runTest(self):
pass
def __call__(self, result):
pass
class TestTAPTestResult(unittest.TestCase):
@classmethod
def _make_one(cls):
# Yep, the stream is not being closed.
stream = open(os.devnull, 'w')
result = TAPTestResult(stream, False, 0)
return result
def test_has_tracker(self):
result = self._make_one()
self.assertTrue(result.tracker is not None)
def test_adds_error(self):
result = self._make_one()
# Python 3 does some extra testing in unittest on exceptions so fake
# the cause as if it were raised.
ex = Exception()
ex.__cause__ = None
result.addError(FakeTestCase(), (None, ex, None))
self.assertEqual(len(result.tracker._test_cases['FakeTestCase']), 1)
def test_adds_failure(self):
result = self._make_one()
# Python 3 does some extra testing in unittest on exceptions so fake
# the cause as if it were raised.
ex = Exception()
ex.__cause__ = None
result.addFailure(FakeTestCase(), (None, ex, None))
self.assertEqual(len(result.tracker._test_cases['FakeTestCase']), 1)
def test_adds_success(self):
result = self._make_one()
result.addSuccess(FakeTestCase())
self.assertEqual(len(result.tracker._test_cases['FakeTestCase']), 1)
def test_adds_skip(self):
result = self._make_one()
try:
result.addSkip(FakeTestCase(), 'a reason')
self.assertEqual(
len(result.tracker._test_cases['FakeTestCase']), 1)
except AttributeError:
self.assertTrue(True, 'Python 2.6 does not support skip.')
def test_adds_expected_failure(self):
result = self._make_one()
try:
result.addExpectedFailure(FakeTestCase(), (None, None, None))
line = result.tracker._test_cases['FakeTestCase'][0]
self.assertEqual(line.status, 'not ok')
self.assertEqual(line.directive, '(expected failure)')
except AttributeError:
self.assertTrue(
True, 'Python 2.6 does not support expected failure.')
def test_adds_unexpected_success(self):
result = self._make_one()
try:
result.addUnexpectedSuccess(FakeTestCase())
line = result.tracker._test_cases['FakeTestCase'][0]
self.assertEqual(line.status, 'ok')
self.assertEqual(line.directive, '(unexpected success)')
except AttributeError:
self.assertTrue(
True, 'Python 2.6 does not support unexpected success.')
| 33.321429 | 76 | 0.629511 | 2,694 | 0.962487 | 0 | 0 | 193 | 0.068953 | 0 | 0 | 555 | 0.198285 |
934da82fb900c36fd86daeb41307bafc6afe8b8f | 390 | py | Python | 1068.py | destinationunknown/CSES | 692b84ee8d52c5266dc1825e7d5447424cb93690 | [
"MIT"
] | 2 | 2021-06-22T03:33:45.000Z | 2021-09-29T18:19:40.000Z | 1068.py | destinationunknown/CSES | 692b84ee8d52c5266dc1825e7d5447424cb93690 | [
"MIT"
] | null | null | null | 1068.py | destinationunknown/CSES | 692b84ee8d52c5266dc1825e7d5447424cb93690 | [
"MIT"
] | 2 | 2020-08-04T18:11:53.000Z | 2021-07-12T10:12:15.000Z | # Weird Algorithm
# Consider an algorithm that takes as input a positive integer n. If n is even, the algorithm divides it by two, and if n is odd, the algorithm multiplies it by three and adds one. The algorithm repeats this, until n is one.
n = int(input())
print(n, end=" ")
while n != 1:
if n % 2 == 0:
n = n // 2
else:
n = (n * 3) + 1
print(n, end=" ")
| 26 | 224 | 0.607692 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 247 | 0.633333 |
934dcd3bfdb0c7406bd85e5c81f4e80b7bdbab85 | 5,868 | py | Python | benchmark.py | ceshine/small-file-benchmark | 70c8e1c7400e45c7d99e7f89e77d510efa23fbfd | [
"MIT"
] | null | null | null | benchmark.py | ceshine/small-file-benchmark | 70c8e1c7400e45c7d99e7f89e77d510efa23fbfd | [
"MIT"
] | null | null | null | benchmark.py | ceshine/small-file-benchmark | 70c8e1c7400e45c7d99e7f89e77d510efa23fbfd | [
"MIT"
] | null | null | null | """Simple Benchmark of Reading Small Files From Disk
Usage:
benchmark.py (-h | --help)
benchmark.py init COUNT
benchmark.py (create|test) (flat|two_level|four_level|memmap) [--size=<size>]
Arguments:
COUNT The number of files to be created. Supports scientific notation (e.g. 3e5).
Options:
-h --help Show this screen.
--size=<size> The size of random generated arrays [default:256].
"""
from pathlib import Path
import uuid
import time
import numpy as np
from tqdm import tqdm
from docopt import docopt
Path("cache").mkdir(exist_ok=True)
def create_filelist(n):
np.save(
"cache/filelist.npy",
np.array([uuid.uuid4().hex for _ in range(n)]))
# Test
files = np.load("cache/filelist.npy")
assert files.shape[0] == n
def create_files_flat(size):
Path("cache/flat/").mkdir(exist_ok=True)
files = np.load("cache/filelist.npy")
np.random.seed(515)
for name in tqdm(files):
np.save(
f"cache/flat/{name}.npy",
np.random.random(
int(10 + np.random.random() * 5 * size
)).astype(np.float32)
)
def create_files_two_level(size):
Path("cache/2level/").mkdir(exist_ok=True)
files = np.load("cache/filelist.npy")
for i in range(16**3):
Path("cache/2level/%03x" % i).mkdir(exist_ok=True)
np.random.seed(515)
for name in tqdm(files):
np.save(
f"cache/2level/{name[-3:]}/{name}.npy",
np.random.random(
int(10 + np.random.random() * 5 * size
)).astype(np.float32)
)
def create_files_four_level(size):
Path("cache/4level/").mkdir(exist_ok=True)
files = np.load("cache/filelist.npy")
for i in range(16):
for j in range(16):
for k in range(16):
Path(f"cache/4level/{i:x}/{j:x}/{k:x}/").mkdir(
exist_ok=True, parents=True)
np.random.seed(515)
for name in tqdm(files):
np.save(
f"cache/4level/{name[-1]}/{name[-2]}/{name[-3]}/{name}.npy",
np.random.random(
int(10 + np.random.random() * 5 * size
)).astype(np.float32)
)
def create_files_memmap(size):
Path("cache/memmap/").mkdir(exist_ok=True)
files = np.load("cache/filelist.npy")
arr = np.memmap(
"cache/memmap/arr.npy", mode="w+", order="C",
dtype="float32", shape=(files.shape[0], size))
np.random.seed(515)
for i in tqdm(range(files.shape[0])):
arr[i] = np.random.random(size).astype(np.float32)
arr.flush()
def test_flat(size):
print("Testing flat structure...")
files = np.load("cache/filelist.npy")
# # Check if the size match
# tmp = np.load(f"cache/flat/{files[0]}.npy")
# assert tmp.shape[0] == size
np.random.seed(515)
np.random.shuffle(files)
means = []
start_time = time.time()
for name in tqdm(files):
means.append(np.mean(
np.load(f"cache/flat/{name}.npy")))
print(np.max(means), np.mean(means), np.min(means))
print(f"Took {(time.time() - start_time) / 60:.2f} Minutes")
def test_two_level(size):
print("Testing two-level structure...")
files = np.load("cache/filelist.npy")
# # Check if the size match
# tmp = np.load(f"cache/2level/{files[0][-3:]}/{files[0]}.npy")
# assert tmp.shape[0] == size
np.random.seed(515)
np.random.shuffle(files)
means = []
start_time = time.time()
for name in tqdm(files):
means.append(np.mean(
np.load(f"cache/2level/{name[-3:]}/{name}.npy")))
print(np.max(means), np.mean(means), np.min(means))
print(f"Took {(time.time() - start_time) / 60:.2f} Minutes")
def test_four_level(size):
print("Testing four-level structure...")
files = np.load("cache/filelist.npy")
# # Check if the size match
# tmp = np.load(
# f"cache/4level/{files[0][-1]}/{files[0][-2]}"
# f"/{files[0][-3]}/{files[0]}.npy")
# assert tmp.shape[0] == size
np.random.seed(515)
np.random.shuffle(files)
means = []
start_time = time.time()
for name in tqdm(files):
means.append(np.mean(
np.load(
f"cache/4level/{name[-1]}/"
f"{name[-2]}/{name[-3]}/{name}.npy"
)))
print(np.max(means), np.mean(means), np.min(means))
print(f"Took {(time.time() - start_time) / 60:.2f} Minutes")
def test_memmap(size):
files = np.load("cache/filelist.npy")
means = []
start_time = time.time()
arr = np.memmap(
"cache/memmap/arr.npy", mode="r", order="C", dtype="float32",
shape=(files.shape[0], size))
idx = np.arange(files.shape[0])
np.random.seed(515)
np.random.shuffle(idx)
for i in tqdm(idx):
means.append(np.mean(arr[i]))
print(np.max(means), np.mean(means), np.min(means))
print(f"Took {(time.time() - start_time) / 60:.2f} Minutes")
if __name__ == "__main__":
arguments = docopt(__doc__)
print(arguments)
if arguments["init"]:
create_filelist(int(eval(arguments["COUNT"])))
elif arguments["create"]:
if arguments["flat"]:
create_files_flat(int(arguments["--size"]))
elif arguments["two_level"]:
create_files_two_level(int(arguments["--size"]))
elif arguments["four_level"]:
create_files_four_level(int(arguments["--size"]))
else:
create_files_memmap(int(arguments["--size"]))
elif arguments["test"]:
if arguments["flat"]:
test_flat(int(arguments["--size"]))
elif arguments["two_level"]:
test_two_level(int(arguments["--size"]))
elif arguments["four_level"]:
test_four_level(int(arguments["--size"]))
else:
test_memmap(int(arguments["--size"]))
| 30.092308 | 89 | 0.579925 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,903 | 0.324301 |
934e7112faa4303ecb5d8b5fd5fd635de5c0afd2 | 568 | py | Python | openpype/hosts/hiero/plugins/publish/integrate_version_up_workfile.py | jonclothcat/OpenPype | d1208cbebc0a7f378de0062ccd653295c6399195 | [
"MIT"
] | 87 | 2021-05-07T08:40:46.000Z | 2022-03-19T00:36:25.000Z | openpype/hosts/hiero/plugins/publish/integrate_version_up_workfile.py | zafrs/OpenPype | 4b8e7e1ed002fc55b31307efdea70b0feaed474f | [
"MIT"
] | 1,019 | 2021-04-26T06:22:56.000Z | 2022-03-31T16:30:43.000Z | openpype/hosts/hiero/plugins/publish/integrate_version_up_workfile.py | zafrs/OpenPype | 4b8e7e1ed002fc55b31307efdea70b0feaed474f | [
"MIT"
] | 33 | 2021-04-29T12:35:54.000Z | 2022-03-25T14:48:42.000Z | from pyblish import api
import openpype.api as pype
class IntegrateVersionUpWorkfile(api.ContextPlugin):
"""Save as new workfile version"""
order = api.IntegratorOrder + 10.1
label = "Version-up Workfile"
hosts = ["hiero"]
optional = True
active = True
def process(self, context):
project = context.data["activeProject"]
path = context.data.get("currentFile")
new_path = pype.version_up(path)
if project:
project.saveAs(new_path)
self.log.info("Project workfile was versioned up")
| 23.666667 | 58 | 0.653169 | 513 | 0.903169 | 0 | 0 | 0 | 0 | 0 | 0 | 125 | 0.22007 |
93504f36933a876f5106f9d86389b60f787974d5 | 1,321 | py | Python | bots/oauthbot.py | Git-Good-Team/zoomapi | 7fa0f318b9753dc742c46da4d33248bf4a5fadf3 | [
"Apache-2.0"
] | null | null | null | bots/oauthbot.py | Git-Good-Team/zoomapi | 7fa0f318b9753dc742c46da4d33248bf4a5fadf3 | [
"Apache-2.0"
] | 2 | 2020-04-04T00:30:29.000Z | 2020-04-04T00:31:51.000Z | bots/oauthbot.py | Git-Good-Team/zoomapi | 7fa0f318b9753dc742c46da4d33248bf4a5fadf3 | [
"Apache-2.0"
] | null | null | null | import sys, os
filename = os.path.join(os.path.dirname(__file__), '..')
sys.path.insert(1, filename)
from zoomapi import OAuthZoomClient
import json
from configparser import ConfigParser
from pyngrok import ngrok
parser = ConfigParser()
parser.read("bots/bot.ini")
client_id = parser.get("OAuth", "client_id")
client_secret = parser.get("OAuth", "client_secret")
port = parser.getint("OAuth", "port", fallback=4001)
browser_path = parser.get("OAuth", "browser_path")
print(f'id: {client_id} secret: {client_secret} browser: {browser_path}')
redirect_url = ngrok.connect(port, "http")
print("Redirect URL is", redirect_url)
client = OAuthZoomClient(client_id, client_secret, port, redirect_url, browser_path)
user_response = client.user.get(id='me')
user = json.loads(user_response.content)
print(user)
print ('---')
print(json.loads(client.meeting.list(user_id="me").content))
client.chat_channels.list()
channels = json.loads(client.chat_channels.list().content)["channels"]
print(channels)
for c in channels:
print(c)
if "test" in c.values():
print("Found channel test", c["id"])
cid = to_channel=c["id"]
stop = False
while not stop:
message = input("Enter message: ")
print(client.chat_messages.post(to_channel=cid, message=message))
if message == "stop":
stop = True | 31.452381 | 84 | 0.72218 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 261 | 0.197578 |
9352862472804182f6ff78dfa5acae79e4f0c5ce | 476 | py | Python | gist_set.py | devnoname120/gist-alfred | 5e918566254b6891241be3ad6e2f6c77959052ff | [
"MIT"
] | 113 | 2015-06-24T19:36:22.000Z | 2022-03-05T05:15:49.000Z | gist_set.py | devnoname120/gist-alfred | 5e918566254b6891241be3ad6e2f6c77959052ff | [
"MIT"
] | 18 | 2016-05-03T08:21:54.000Z | 2022-03-26T18:24:21.000Z | gist_set.py | devnoname120/gist-alfred | 5e918566254b6891241be3ad6e2f6c77959052ff | [
"MIT"
] | 13 | 2015-07-02T03:16:04.000Z | 2022-03-15T10:42:38.000Z | #!/usr/bin/python
# encoding: utf-8
from collections import Counter
from gist import create_workflow
from pprint import pprint as pp
import sys
import workflow
from workflow import Workflow, web
from workflow.background import run_in_background, is_running
def main(wf):
arg = wf.args[0]
wf.add_item(u"Set token", arg=arg, valid=True, icon="icons/token.png")
wf.send_feedback()
if __name__ == '__main__':
wf = create_workflow()
sys.exit(wf.run(main))
| 21.636364 | 74 | 0.735294 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 73 | 0.153361 |
9357d0df25d16453f003333896fffe2c6ffc814d | 2,699 | py | Python | class_12/strategies/fixed_trade_price_strategy.py | taoranalex/course_codes | 75f76e61f560fb51e7db7bb3edbefaeaf01ead7e | [
"MIT"
] | 121 | 2020-12-12T02:21:21.000Z | 2022-03-29T06:43:42.000Z | class_12/strategies/fixed_trade_price_strategy.py | taoranalex/course_codes | 75f76e61f560fb51e7db7bb3edbefaeaf01ead7e | [
"MIT"
] | null | null | null | class_12/strategies/fixed_trade_price_strategy.py | taoranalex/course_codes | 75f76e61f560fb51e7db7bb3edbefaeaf01ead7e | [
"MIT"
] | 80 | 2020-12-20T09:41:46.000Z | 2022-03-22T10:00:16.000Z | from howtrader.app.cta_strategy import (
CtaTemplate,
StopOrder,
TickData,
BarData,
TradeData,
OrderData,
BarGenerator,
ArrayManager
)
from howtrader.trader.constant import Interval
from datetime import datetime
from howtrader.app.cta_strategy.engine import CtaEngine, EngineType
import pandas_ta as ta
import pandas as pd
class FixedTradPriceStrategy(CtaTemplate):
"""
基于价格的定投
"""
author = "51bitquant"
fixed_trade_money = 1000 # 每次定投的资金比例.
price_change_pct = 0.05 # 价格变动多少的时候定投
parameters = ['fixed_trade_money', 'price_change_pct']
def __init__(self, cta_engine: CtaEngine, strategy_name, vt_symbol, setting):
""""""
super().__init__(cta_engine, strategy_name, vt_symbol, setting)
self.bg_4hour = BarGenerator(self.on_bar, 4, self.on_4hour_bar, Interval.HOUR)
self.am = ArrayManager(size=100) # 时间序列,类似我们用的pandas, 值保留最近的N个K线的数据.
def on_init(self):
"""
Callback when strategy is inited.
"""
self.write_log("策略初始化")
self.load_bar(1) # 具体加载多少天的数据, 1表示1天的数据,如果是2表示过去2天的数据
def on_start(self):
"""
Callback when strategy is started.
"""
self.write_log(f"我的策略启动")
self.put_event()
def on_stop(self):
"""
Callback when strategy is stopped.
"""
self.write_log("策略停止")
self.put_event()
def on_tick(self, tick: TickData):
pass
def on_bar(self, bar: BarData):
"""
Callback of new bar data update.
"""
self.bg_4hour.update_bar(bar) # 合成四小时的数据.
self.put_event()
def on_4hour_bar(self, bar: BarData):
"""
四小时的K线数据.
"""
self.cancel_all() # 撤销所有订单.
self.am.update_bar(bar) # 把最新的K线放进时间序列里面.
# 下面可以计算基数指标等等....
# 以及下单的事情.
if not self.am.inited:
return
# [0,1,2,3,4,5,6]
last_close_price = self.am.close_array[-2] # 上一根K线
current_close_price = bar.close_price # self.am.close_array[-1] # 当前的收盘价
# 如果四小时价格下跌5%就买入.
if (last_close_price - current_close_price)/last_close_price >= self.price_change_pct:
price = bar.close_price * 1.001
self.buy(price, self.fixed_trade_money/price)
self.put_event()
def on_order(self, order: OrderData):
"""
订单的回调方法: 订单状态更新的时候,会调用这个方法。
"""
self.put_event()
def on_trade(self, trade: TradeData):
"""
"""
self.put_event() # 更新UI界面方法。
def on_stop_order(self, stop_order: StopOrder):
"""
这个是一个停止单的方法,用来监听你止损单的方法。
"""
pass
| 24.761468 | 94 | 0.600963 | 2,785 | 0.885533 | 0 | 0 | 0 | 0 | 0 | 0 | 1,170 | 0.372019 |
9358a1bd65251378efa84d7f25d1fc162739f57e | 102 | py | Python | 3429.py | ssd352/quera-solutions | 7c7b572a26c3c1648f23528bcc661dec18b44943 | [
"MIT"
] | 1 | 2020-03-16T21:13:14.000Z | 2020-03-16T21:13:14.000Z | 3429.py | ssd352/quera-solutions | 7c7b572a26c3c1648f23528bcc661dec18b44943 | [
"MIT"
] | null | null | null | 3429.py | ssd352/quera-solutions | 7c7b572a26c3c1648f23528bcc661dec18b44943 | [
"MIT"
] | 2 | 2020-03-27T18:40:40.000Z | 2020-07-30T14:59:55.000Z | T = int(input())
if T > 100:
print('Steam')
elif T < 0:
print('Ice')
else:
print('Water')
| 12.75 | 18 | 0.519608 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 19 | 0.186275 |
9358d7fabdaa2250dc44f570309b7f9d32d9b186 | 159 | py | Python | settings_template.py | WHOIGit/wip-comms-ifcb-imagedb | fa0fd9743e74d483e89cbd249519c8f70a1859d9 | [
"MIT"
] | null | null | null | settings_template.py | WHOIGit/wip-comms-ifcb-imagedb | fa0fd9743e74d483e89cbd249519c8f70a1859d9 | [
"MIT"
] | 1 | 2018-11-01T20:16:28.000Z | 2018-11-01T20:21:29.000Z | settings_template.py | WHOIGit/wip-comms-ifcb-imagedb | fa0fd9743e74d483e89cbd249519c8f70a1859d9 | [
"MIT"
] | null | null | null | PSQL_CONNECTION_PARAMS = {
'dbname': 'ifcb',
'user': '******',
'password': '******',
'host': '/var/run/postgresql/'
}
DATA_DIR = '/mnt/ifcb'
| 17.666667 | 34 | 0.496855 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 85 | 0.534591 |
935a6e37fb1d18808927d59317499615e6e1d729 | 15,702 | py | Python | src/trainer.py | tpimentelms/neural-transducer | 6c99b0919aa84559b2c3aa970651f7d9696131e8 | [
"MIT"
] | null | null | null | src/trainer.py | tpimentelms/neural-transducer | 6c99b0919aa84559b2c3aa970651f7d9696131e8 | [
"MIT"
] | null | null | null | src/trainer.py | tpimentelms/neural-transducer | 6c99b0919aa84559b2c3aa970651f7d9696131e8 | [
"MIT"
] | null | null | null | import argparse
import glob
import os
import random
import re
from dataclasses import dataclass
from functools import partial
from math import ceil
from typing import List, Optional
import numpy as np
import torch
from torch.optim.lr_scheduler import ReduceLROnPlateau
from tqdm import tqdm
import util
tqdm.monitor_interval = 0
tqdm = partial(tqdm, bar_format="{l_bar}{r_bar}")
TRAIN = "train"
DEV = "dev"
TEST = "test"
class Optimizer(util.NamedEnum):
sgd = "sgd"
adadelta = "adadelta"
adam = "adam"
amsgrad = "amsgrad"
class Scheduler(util.NamedEnum):
reducewhenstuck = "reducewhenstuck"
warmupinvsqr = "warmupinvsqr"
def setup_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
@dataclass
class Evaluation:
filepath: str
devloss: float
evaluation_result: Optional[List[util.Eval]]
class BaseTrainer(object):
"""docstring for Trainer."""
def __init__(self):
super().__init__()
self.parser = argparse.ArgumentParser()
self.set_args()
self.params = self.get_params()
util.maybe_mkdir(self.params.model)
self.logger = util.get_logger(
self.params.model + ".log", log_level=self.params.loglevel
)
for key, value in vars(self.params).items():
self.logger.info("command line argument: %s - %r", key, value)
setup_seed(self.params.seed)
self.data = None
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.model = None
self.optimizer = None
self.min_lr = 0
self.scheduler = None
self.evaluator = None
self.global_steps = 0
self.last_devloss = float("inf")
self.models: List[Evaluation] = list()
def set_args(self):
"""
get_args
"""
# fmt: off
parser = self.parser
parser.add_argument('--seed', default=0, type=int)
parser.add_argument('--train', required=True, type=str, nargs='+')
parser.add_argument('--dev', required=True, type=str, nargs='+')
parser.add_argument('--test', default=None, type=str, nargs='+')
parser.add_argument('--model', required=True, help='dump model filename')
parser.add_argument('--load', default='', help='load model and continue training; with `smart`, recover training automatically')
parser.add_argument('--bs', default=20, type=int, help='training batch size')
parser.add_argument('--epochs', default=20, type=int, help='maximum training epochs')
parser.add_argument('--max_steps', default=0, type=int, help='maximum training steps')
parser.add_argument('--warmup_steps', default=4000, type=int, help='number of warm up steps')
parser.add_argument('--total_eval', default=-1, type=int, help='total number of evaluation')
parser.add_argument('--optimizer', default=Optimizer.adam, type=Optimizer, choices=list(Optimizer))
parser.add_argument('--scheduler', default=Scheduler.reducewhenstuck, type=Scheduler, choices=list(Scheduler))
parser.add_argument('--lr', default=1e-3, type=float, help='learning rate')
parser.add_argument('--min_lr', default=1e-5, type=float, help='minimum learning rate')
parser.add_argument('--momentum', default=0.9, type=float, help='momentum of SGD')
parser.add_argument('--beta1', default=0.9, type=float, help='beta1 of Adam')
parser.add_argument('--beta2', default=0.999, type=float, help='beta2 of Adam')
parser.add_argument('--estop', default=1e-8, type=float, help='early stopping criterion')
parser.add_argument('--cooldown', default=0, type=int, help='cooldown of `ReduceLROnPlateau`')
parser.add_argument('--patience', default=0, type=int, help='patience of `ReduceLROnPlateau`')
parser.add_argument('--discount_factor', default=0.5, type=float, help='discount factor of `ReduceLROnPlateau`')
parser.add_argument('--max_norm', default=0, type=float, help='gradient clipping max norm')
parser.add_argument('--gpuid', default=[], nargs='+', type=int, help='choose which GPU to use')
parser.add_argument('--loglevel', default='info', choices=['info', 'debug'])
parser.add_argument('--saveall', default=False, action='store_true', help='keep all models')
parser.add_argument('--shuffle', default=False, action='store_true', help='shuffle the data')
parser.add_argument('--cleanup_anyway', default=False, action='store_true', help='cleanup anyway')
# fmt: on
def get_params(self):
return self.parser.parse_args()
def checklist_before_run(self):
assert self.data is not None, "call load_data before run"
assert self.model is not None, "call build_model before run"
assert self.optimizer is not None, "call setup_training before run"
assert self.scheduler is not None, "call setup_scheduler before run"
assert self.evaluator is not None, "call setup_evalutator before run"
def load_data(self, dataset, train, dev, test):
raise NotImplementedError
def build_model(self):
raise NotImplementedError
def load_model(self, model):
assert self.model is None
self.logger.info("load model in %s", model)
self.model = torch.load(model, map_location=self.device)
self.model = self.model.to(self.device)
epoch = int(model.split("_")[-1])
return epoch
def smart_load_model(self, model_prefix):
assert self.model is None
models = []
for model in glob.glob(f"{model_prefix}.nll*"):
res = re.findall(r"\w*_\d+\.?\d*", model[len(model_prefix) :])
loss_ = res[0].split("_")
evals_ = res[1:-1]
epoch_ = res[-1].split("_")
assert loss_[0] == "nll" and epoch_[0] == "epoch"
loss, epoch = float(loss_[1]), int(epoch_[1])
evals = []
for ev in evals_:
ev = ev.split("_")
evals.append(util.Eval(ev[0], ev[0], float(ev[1])))
models.append((epoch, Evaluation(model, loss, evals)))
self.models = [x[1] for x in sorted(models)]
return self.load_model(self.models[-1].filepath)
def setup_training(self):
assert self.model is not None
params = self.params
if params.optimizer == Optimizer.sgd:
self.optimizer = torch.optim.SGD(
self.model.parameters(), params.lr, momentum=params.momentum
)
elif params.optimizer == Optimizer.adadelta:
self.optimizer = torch.optim.Adadelta(self.model.parameters(), params.lr)
elif params.optimizer == Optimizer.adam:
self.optimizer = torch.optim.Adam(
self.model.parameters(), params.lr, betas=(params.beta1, params.beta2)
)
elif params.optimizer == Optimizer.amsgrad:
self.optimizer = torch.optim.Adam(
self.model.parameters(),
params.lr,
betas=(params.beta1, params.beta2),
amsgrad=True,
)
else:
raise ValueError
self.min_lr = params.min_lr
if params.scheduler == Scheduler.reducewhenstuck:
self.scheduler = ReduceLROnPlateau(
self.optimizer,
"min",
patience=params.patience,
cooldown=params.cooldown,
factor=params.discount_factor,
min_lr=params.min_lr,
)
elif params.scheduler == Scheduler.warmupinvsqr:
self.scheduler = util.WarmupInverseSquareRootSchedule(
self.optimizer, params.warmup_steps
)
else:
raise ValueError
def save_training(self, model_fp):
save_objs = (self.optimizer.state_dict(), self.scheduler.state_dict())
torch.save(save_objs, f"{model_fp}.progress")
def load_training(self, model_fp):
assert self.model is not None
if os.path.isfile(f"{model_fp}.progress"):
optimizer_state, scheduler_state = torch.load(f"{model_fp}.progress")
self.optimizer.load_state_dict(optimizer_state)
self.scheduler.load_state_dict(scheduler_state)
else:
self.logger.warning("cannot find optimizer & scheduler file")
def setup_evalutator(self):
raise NotImplementedError
def get_lr(self):
if isinstance(self.scheduler, ReduceLROnPlateau):
return self.optimizer.param_groups[0]["lr"]
try:
return self.scheduler.get_last_lr()[0]
except AttributeError:
return self.scheduler.get_lr()[0]
def train(self, epoch_idx, batch_size, max_norm):
logger, model = self.logger, self.model
logger.info("At %d-th epoch with lr %f.", epoch_idx, self.get_lr())
model.train()
sampler, nb_batch = self.iterate_batch(TRAIN, batch_size)
losses, cnt = 0, 0
for batch in tqdm(sampler(batch_size), total=nb_batch):
loss = model.get_loss(batch)
self.optimizer.zero_grad()
loss.backward()
if max_norm > 0:
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm)
logger.debug(
"loss %f with total grad norm %f",
loss,
util.grad_norm(model.parameters()),
)
self.optimizer.step()
if not isinstance(self.scheduler, ReduceLROnPlateau):
self.scheduler.step()
self.global_steps += 1
losses += loss.item()
cnt += 1
loss = losses / cnt
self.logger.info(f"Running average train loss is {loss} at epoch {epoch_idx}")
return loss
def iterate_batch(self, mode, batch_size):
if mode == TRAIN:
return (self.data.train_batch_sample, ceil(self.data.nb_train / batch_size))
elif mode == DEV:
return (self.data.dev_batch_sample, ceil(self.data.nb_dev / batch_size))
elif mode == TEST:
return (self.data.test_batch_sample, ceil(self.data.nb_test / batch_size))
else:
raise ValueError(f"wrong mode: {mode}")
def calc_loss(self, mode, batch_size, epoch_idx) -> float:
self.model.eval()
sampler, nb_batch = self.iterate_batch(mode, batch_size)
loss, cnt = 0.0, 0
for batch in tqdm(sampler(batch_size), total=nb_batch):
loss += self.model.get_loss(batch).item()
cnt += 1
loss = loss / cnt
self.logger.info(f"Average {mode} loss is {loss} at epoch {epoch_idx}")
return loss
def iterate_instance(self, mode):
if mode == TRAIN:
return self.data.train_sample, self.data.nb_train
elif mode == DEV:
return self.data.dev_sample, self.data.nb_dev
elif mode == TEST:
return self.data.test_sample, self.data.nb_test
else:
raise ValueError(f"wrong mode: {mode}")
def evaluate(self, mode, epoch_idx, decode_fn) -> List[util.Eval]:
raise NotImplementedError
def decode(self, mode, write_fp, decode_fn):
raise NotImplementedError
def update_lr_and_stop_early(self, epoch_idx, devloss, estop):
stop_early = True
if isinstance(self.scheduler, ReduceLROnPlateau):
prev_lr = self.get_lr()
self.scheduler.step(devloss)
curr_lr = self.get_lr()
if (
self.last_devloss - devloss
) < estop and prev_lr == curr_lr == self.min_lr:
self.logger.info(
"Early stopping triggered with epoch %d (previous dev loss: %f, current: %f)",
epoch_idx,
self.last_devloss,
devloss,
)
stop_status = stop_early
else:
stop_status = not stop_early
self.last_devloss = devloss
else:
stop_status = not stop_early
return stop_status
def save_model(
self, epoch_idx, devloss: float, eval_res: List[util.Eval], model_fp
):
eval_tag = "".join(["{}_{}.".format(e.desc, e.res) for e in eval_res])
fp = f"{model_fp}.nll_{devloss:.4f}.{eval_tag}epoch_{epoch_idx}"
torch.save(self.model, fp)
self.models.append(Evaluation(fp, devloss, eval_res))
def select_model(self):
raise NotImplementedError
def reload_and_test(self, model_fp, best_fp, bs, decode_fn):
self.model = None
self.logger.info(f"loading {best_fp} for testing")
self.load_model(best_fp)
self.calc_loss(DEV, bs, -1)
self.logger.info("decoding dev set")
self.decode(DEV, f"{model_fp}.decode", decode_fn)
results = self.evaluate(DEV, -1, decode_fn)
if results:
results = " ".join([f"{r.desc} {r.res}" for r in results])
self.logger.info(f'DEV {model_fp.split("/")[-1]} {results}')
if self.data.test_file is not None:
self.calc_loss(TEST, bs, -1)
self.logger.info("decoding test set")
self.decode(TEST, f"{model_fp}.decode", decode_fn)
results = self.evaluate(TEST, -1, decode_fn)
if results:
results = " ".join([f"{r.desc} {r.res}" for r in results])
self.logger.info(f'TEST {model_fp.split("/")[-1]} {results}')
def cleanup(self, saveall, save_fps, model_fp):
if not saveall:
for model in self.models:
if model.filepath in save_fps:
continue
os.remove(model.filepath)
os.remove(f"{model_fp}.progress")
def run(self, start_epoch, decode_fn=None):
"""
helper for training
"""
self.checklist_before_run()
finish = False
params = self.params
steps_per_epoch = ceil(self.data.nb_train / params.bs)
if params.max_steps > 0:
max_epochs = ceil(params.max_steps / steps_per_epoch)
else:
max_epochs = params.epochs
params.max_steps = max_epochs * steps_per_epoch
self.logger.info(
f"maximum training {params.max_steps} steps ({max_epochs} epochs)"
)
if params.total_eval > 0:
eval_every = max(max_epochs // params.total_eval, 1)
else:
eval_every = 1
self.logger.info(f"evaluate every {eval_every} epochs")
for epoch_idx in range(start_epoch, max_epochs):
self.train(epoch_idx, params.bs, params.max_norm)
if not (
epoch_idx
and (epoch_idx % eval_every == 0 or epoch_idx + 1 == max_epochs)
):
continue
with torch.no_grad():
devloss = self.calc_loss(DEV, params.bs, epoch_idx)
eval_res = self.evaluate(DEV, epoch_idx, decode_fn)
if self.update_lr_and_stop_early(epoch_idx, devloss, params.estop):
finish = True
break
self.save_model(epoch_idx, devloss, eval_res, params.model)
self.save_training(params.model)
if finish or params.cleanup_anyway:
best_fp, save_fps = self.select_model()
with torch.no_grad():
self.reload_and_test(params.model, best_fp, params.bs, decode_fn)
self.cleanup(params.saveall, save_fps, params.model)
| 39.852792 | 136 | 0.606483 | 15,079 | 0.960324 | 0 | 0 | 114 | 0.00726 | 0 | 0 | 2,265 | 0.144249 |
935c2fa61554e13fc7833d382f5ef594996fe9fc | 780 | py | Python | tools/opencv.py | michaelpdu/pytorch-CycleGAN-and-pix2pix | 7e7aa3fed935644f92c0e15f7de80ce0971bf510 | [
"BSD-3-Clause"
] | null | null | null | tools/opencv.py | michaelpdu/pytorch-CycleGAN-and-pix2pix | 7e7aa3fed935644f92c0e15f7de80ce0971bf510 | [
"BSD-3-Clause"
] | null | null | null | tools/opencv.py | michaelpdu/pytorch-CycleGAN-and-pix2pix | 7e7aa3fed935644f92c0e15f7de80ce0971bf510 | [
"BSD-3-Clause"
] | null | null | null | import cv2
import argparse
import numpy as np
def gray2bgr565(input_file, output_file):
img = np.fromfile(input_file, dtype=np.uint16)
img = img.reshape(480, 640)
# img = cv2.imread(input_file, cv2.IMREAD_ANYDEPTH)
ratio = np.amax(img) / 256
img8 = (img / ratio).astype('uint8')
img8 = cv2.cvtColor(img8, cv2.COLOR_GRAY2RGB)
cv2.imwrite(output_file, img8)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Command Usages of ImageHelper')
parser.add_argument("-i", "--input", type=str, help="input image dir")
parser.add_argument("-o", "--output", type=str, help="output image dir")
args = parser.parse_args()
if args.input:
gray2bgr565(args.input, args.output)
else:
parser.print_help() | 33.913043 | 81 | 0.680769 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 161 | 0.20641 |
935c7d03b817d9b2ac9b204299f3a0bbc0e80536 | 2,354 | py | Python | UMS/views.py | rawheel/Django-User-Management-System | e5b14d2422fe5c5ea0359ded24ad229af1510050 | [
"MIT"
] | 2 | 2021-03-18T06:27:05.000Z | 2021-03-18T10:21:41.000Z | UMS/views.py | rawheel/Django-User-Management-System | e5b14d2422fe5c5ea0359ded24ad229af1510050 | [
"MIT"
] | null | null | null | UMS/views.py | rawheel/Django-User-Management-System | e5b14d2422fe5c5ea0359ded24ad229af1510050 | [
"MIT"
] | null | null | null | from django.shortcuts import render,redirect
from .forms import UserForm,RoleForm,RightsForm
from .models import UserTable,UserRole,UserRights
def show_users(request):
if request.method == "GET":
users = list(UserTable.objects.values_list('user_name', flat=True).order_by('id'))
users_list = {'users_list':users}
return render(request, "UMS/show_users.html",users_list)
else:
value = request.POST['drop1']
users = UserTable.objects.get(user_name = value)
roles = UserRole.objects.get(id=users.role_id)
role_name = roles.role_name
try:
rights = UserRights.objects.filter(role=users.role)
for i in rights:
print(i.rights_name)
print("check 1")
rights_name = i.rights_name
print("check 2")
rights_details = i.rights_details
except Exception as e:
print(e)
rights_name = 'No Rights Assigned!'
rights_details = '-'
full_name = f'{users.first_name} {users.last_name}'
#print(full_name,roles.role_name,rights.rights_name,rights.rights_details)
return render(request,"UMS/showdata.html",{'full_name':full_name,'role_name':role_name,'rights_name':rights_name,'rights_details':rights_details,'rights':rights})
def users_form(request):
if request.method == "GET":
form = UserForm()
return render(request,"UMS/users_form.html",{'form':form})
else:
form = UserForm(request.POST)
if form.is_valid():
#print(form.cleaned_data)
form.save()
else:
print(form.errors)
print("invalid")
return redirect('/users')
def roles_form(request):
if request.method == "GET":
form = RoleForm()
return render(request,"UMS/roles_form.html",{'form':form})
else:
form = RoleForm(request.POST)
if form.is_valid():
form.save()
return redirect('/roles')
def rights_form(request):
if request.method=="GET":
form = RightsForm()
return render(request,"UMS/rights_form.html",{'form':form})
else:
form = RightsForm(request.POST)
if form.is_valid():
form.save()
else:
print(form.errors)
return redirect('/rights') | 31.810811 | 170 | 0.607901 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 449 | 0.190739 |
935e4e7c33e3cd032008f714cbcd252ea376fad9 | 99 | py | Python | game/entities/ship.py | alucardzom/pyxeltron | 314e5e4801c412c9a68cc49fa3c8977bf8f6366d | [
"MIT"
] | 1 | 2021-11-11T03:04:07.000Z | 2021-11-11T03:04:07.000Z | game/entities/ship.py | truizsanchez/pyxeltron | a219b7ef250517fee15ab39ac63b82f2752e875e | [
"MIT"
] | null | null | null | game/entities/ship.py | truizsanchez/pyxeltron | a219b7ef250517fee15ab39ac63b82f2752e875e | [
"MIT"
] | 1 | 2021-08-15T17:47:54.000Z | 2021-08-15T17:47:54.000Z | from engine.entities.base import BaseEntity
class Ship(BaseEntity):
WIDTH = 8
HEIGHT = 8
| 14.142857 | 43 | 0.707071 | 52 | 0.525253 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
935e6905b97081210e7bed7902277443d7f5464d | 3,201 | py | Python | tunacell/plotting/defs.py | HuggyHugMe/tunacell | 5a7a7a58bbb557098d6638e896aa784ecc37d639 | [
"MIT"
] | null | null | null | tunacell/plotting/defs.py | HuggyHugMe/tunacell | 5a7a7a58bbb557098d6638e896aa784ecc37d639 | [
"MIT"
] | 3 | 2017-08-10T11:19:01.000Z | 2019-08-11T11:11:00.000Z | tunacell/plotting/defs.py | HuggyHugMe/tunacell | 5a7a7a58bbb557098d6638e896aa784ecc37d639 | [
"MIT"
] | 5 | 2017-08-08T22:31:24.000Z | 2021-08-06T04:08:08.000Z | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
tunacell package
============
plotting/defs.py module
~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
DEFAULT_COLORS = ('red', 'blue', 'purple', 'green', 'yellowgreen', 'cyan',
'magenta',
'indigo', 'darkorange', 'pink', 'yellow')
colors = DEFAULT_COLORS
# plotting parameters
params = {'length': {
'bottom': 1.,
'top': 8.,
'delta': 2.,
'unit': '$\mu$m'
},
'dot_length': {
'bottom': 1e-2,
'top': 1e-1,
'delta': 3e-2,
'unit': '$\mu$m/hr'
},
'dotlog_length': {
'bottom': 0.5,
'top': 2.5,
'delta': 0.5,
'unit': 'dbs/hr'
},
'width': {
'bottom': .5,
'top': 1.5,
'delta': .2,
'unit': '$\mu$m'
},
'fluo': {
'bottom': 1e5,
'top': 2e6,
'delta': 5e5,
'unit': 'A.U.'
},
'dot_fluo': {
'bottom': 1e2,
'top': 5e4,
'delta': 1e4,
'unit': 'A.U./hr'
},
'dotlog_fluo': {
'bottom': 0.1,
'top': 3,
'delta': 0.5,
'unit': 'dbs/hr'
},
'concentration': {
'bottom': 2e5,
'top': 5e5,
'delta': 1e5,
},
'volume': {
'bottom': 0.,
'top': 4.,
'delta': 1.,
'unit': '$\mu$m$^3$'
},
'area': {
'bottom': 1.,
'top': 8.,
'delta': 2.,
'unit': '$\mu$m$^2$'
},
'dotlog_area': {
'bottom': 0.5,
'top': 2.5,
'delta': 0.5,
'unit': 'dbs/hr'
},
'density': {
'bottom': 1e5,
'top': 4e5,
'delta': 1e5
},
'ALratio': {
'bottom': .1,
'top': 1.5,
'delta': .4,
'unit': '$\mu$m'
},
'age': {
'bottom': 0.,
'top': 1.
}
}
def get_params(obs, params, *keys):
return [params[obs][k] for k in keys] | 31.693069 | 74 | 0.219931 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 817 | 0.255233 |
935ef32a8b82613d0863e4ef3259b0459d1157b4 | 7,109 | py | Python | scripts/ilqr/iLQR.py | leoking99-BIT/Constrained_ILQR | 08346c0aa9eeb035ae6e3d6643ac9c119cb893d2 | [
"Apache-2.0"
] | 42 | 2020-03-06T08:19:38.000Z | 2022-03-26T16:36:00.000Z | scripts/ilqr/iLQR.py | leoking99-BIT/Constrained_ILQR | 08346c0aa9eeb035ae6e3d6643ac9c119cb893d2 | [
"Apache-2.0"
] | 1 | 2022-01-10T12:59:04.000Z | 2022-02-01T00:43:52.000Z | scripts/ilqr/iLQR.py | mengxingshifen1218/Constrained_ILQR | 08346c0aa9eeb035ae6e3d6643ac9c119cb893d2 | [
"Apache-2.0"
] | 16 | 2020-08-17T08:32:51.000Z | 2022-02-10T14:49:00.000Z | import math
import numpy as np
import matplotlib.pyplot as plt
import scipy.integrate as integrate
import pdb
import sys
from ilqr.vehicle_model import Model
from ilqr.local_planner import LocalPlanner
from ilqr.constraints import Constraints
class iLQR():
def __init__(self, args, obstacle_bb, verbose=False):
self.args = args
self.Ts = args.timestep
self.N = args.horizon
self.tol = args.tol
self.obstacle_bb = obstacle_bb
self.verbose = verbose
self.global_plan = None
self.local_planner = LocalPlanner(args)
self.vehicle_model = Model(args)
self.constraints = Constraints(args, obstacle_bb)
# initial nominal trajectory
self.control_seq = np.zeros((self.args.num_ctrls, self.args.horizon))
self.control_seq[0, :] = np.ones((self.args.horizon)) * 0.5
self.debug_flag = 0
self.lamb_factor = 10
self.max_lamb = 1000
# self.fig, (self.ax1, self.ax2, self.ax3) = plt.subplots(1,3, num=0, figsize=(20, 5))
def set_global_plan(self, global_plan):
self.global_plan = global_plan
self.local_planner.set_global_planner(self.global_plan)
def get_nominal_trajectory(self, X_0, U):
X = np.zeros((self.args.num_states, self.args.horizon+1))
X[:, 0] = X_0
for i in range(self.args.horizon):
X[:, i+1] = self.vehicle_model.forward_simulate(X[:, i], U[:, i])
return X
def forward_pass(self, X, U, k, K):
X_new = np.zeros((self.args.num_states, self.args.horizon+1))
X_new[:, 0] = X[:, 0]
U_new = np.zeros((self.args.num_ctrls, self.args.horizon))
# Do a forward rollout and get states at all control points
for i in range(self.args.horizon):
U_new[:, i] = U[:, i] + k[:, i] + K[:, :, i] @ (X_new[:, i] - X[:, i])
X_new[:, i+1] = self.vehicle_model.forward_simulate(X_new[:, i], U_new[:, i])
return X_new, U_new
def backward_pass(self, X, U, poly_coeff, x_local_plan, npc_traj, lamb):
# Find control sequence that minimizes Q-value function
# Get derivatives of Q-function wrt to state and control
l_x, l_xx, l_u, l_uu, l_ux = self.constraints.get_cost_derivatives(X[:, 1:], U, poly_coeff, x_local_plan, npc_traj)
df_dx = self.vehicle_model.get_A_matrix(X[2, 1:], X[3, 1:], U[0,:])
df_du = self.vehicle_model.get_B_matrix(X[3, 1:])
# Value function at final timestep is known
V_x = l_x[:,-1]
V_xx = l_xx[:,:,-1]
# Allocate space for feedforward and feeback term
k = np.zeros((self.args.num_ctrls, self.args.horizon))
K = np.zeros((self.args.num_ctrls, self.args.num_states, self.args.horizon))
# Run a backwards pass from N-1 control step
for i in range(self.args.horizon-1,-1,-1):
Q_x = l_x[:,i] + df_dx[:,:,i].T @ V_x
Q_u = l_u[:,i] + df_du[:,:,i].T @ V_x
Q_xx = l_xx[:,:,i] + df_dx[:,:,i].T @ V_xx @ df_dx[:,:,i]
Q_ux = l_ux[:,:,i] + df_du[:,:,i].T @ V_xx @ df_dx[:,:,i]
Q_uu = l_uu[:,:,i] + df_du[:,:,i].T @ V_xx @ df_du[:,:,i]
# Q_uu_inv = np.linalg.pinv(Q_uu)
Q_uu_evals, Q_uu_evecs = np.linalg.eig(Q_uu)
Q_uu_evals[Q_uu_evals < 0] = 0.0
Q_uu_evals += lamb
Q_uu_inv = np.dot(Q_uu_evecs,np.dot(np.diag(1.0/Q_uu_evals), Q_uu_evecs.T))
# Calculate feedforward and feedback terms
k[:,i] = -Q_uu_inv @ Q_u
K[:,:,i] = -Q_uu_inv @ Q_ux
# Update value function for next time step
V_x = Q_x - K[:,:,i].T @ Q_uu @ k[:,i]
V_xx = Q_xx - K[:,:,i].T @ Q_uu @ K[:,:,i]
return k, K
def run_step(self, ego_state, npc_traj):
assert self.global_plan is not None, "Set a global plan in iLQR before starting run_step"
self.local_planner.set_ego_state(ego_state)
ref_traj, poly_coeff = self.local_planner.get_local_plan()
X_0 = np.array([ego_state[0][0], ego_state[0][1], ego_state[1][0], ego_state[2][2]])
# self.control_seq[:, :-1] = self.control_seq[:, 1:]
# self.control_seq[:, -1] = np.zeros((self.args.num_ctrls))
X, U = self.get_optimal_control_seq(X_0, self.control_seq, poly_coeff, ref_traj[:, 0], npc_traj)
traj = X[:2, ::int(self.args.horizon/10)].T
self.control_seq = U
# self.plot(U, X, ref_traj)
return traj, ref_traj, U #self.filter_control(U, X[2,:])
def get_optimal_control_seq(self, X_0, U, poly_coeff, x_local_plan, npc_traj):
X = self.get_nominal_trajectory(X_0, U)
J_old = sys.float_info.max
lamb = 1 # Regularization parameter
# Run iLQR for max iterations
for itr in range(self.args.max_iters):
k, K = self.backward_pass(X, U, poly_coeff, x_local_plan, npc_traj, lamb)
# Get control values at control points and new states again by a forward rollout
X_new, U_new = self.forward_pass(X, U, k, K)
J_new = self.constraints.get_total_cost(X, U, poly_coeff, x_local_plan, npc_traj)
if J_new < J_old:
X = X_new
U = U_new
lamb /= self.lamb_factor
if (abs(J_old - J_new) < self.args.tol):
print("Tolerance reached")
break
else:
lamb *= self.lamb_factor
if lamb > self.max_lamb:
break
J_old = J_new
# print(J_new)
return X, U
def filter_control(self, U, velocity):
U[1] = np.arctan2(self.args.wheelbase*U[1],velocity[:-1])
return U
def plot(self, control, X, ref_traj):
self.ax1.clear()
self.ax1.plot(np.arange(len(control[0])), control[0,:], color='g', label='Acc')
self.ax1.plot(np.arange(len(control[0])), control[1,:], color='b', label='Yaw Rate')
self.ax1.set_ylabel('Values')
self.ax1.set_xlabel('Time')
self.ax1.set_title('Controls',fontsize=18)
# self.ax1.xlim(0, len(control[0]))
# self.ax1.ylim(-6, 6)
# self.ax1.axis('equal')
self.ax1.legend()
self.ax1.grid()
self.ax2.clear()
self.ax2.plot(ref_traj[:, 0], ref_traj[:, 1], color='r', label='Ref Traj')
self.ax2.plot(X[0, :], X[1, :], color='g', label='Real Traj')
self.ax2.set_ylabel('y')
self.ax2.set_xlabel('x')
self.ax2.set_title('Position Trajectory',fontsize=18)
self.ax2.legend()
self.ax2.grid()
# plt.legend()
self.ax3.clear()
self.ax3.plot(np.arange(len(X[0])), X[2, :], color='r', label='Velocity')
self.ax3.plot(np.arange(len(X[0])), X[3, :], color='g', label='Yaw')
self.ax3.set_ylabel('Values')
self.ax3.set_xlabel('Time')
self.ax3.set_title('Traj',fontsize=18)
self.ax3.grid()
self.ax3.legend()
plt.pause(0.001)
| 40.163842 | 124 | 0.573639 | 6,860 | 0.964974 | 0 | 0 | 0 | 0 | 0 | 0 | 1,162 | 0.163455 |
935fa4a7ed04d049f63c5a737cdba5df5fb7e88c | 25 | py | Python | src/engines/train/__init__.py | cr3ux53c/DenseNet-Tensorflow2 | 208143bf4086c407e524e01cd945fd3b0741b48d | [
"MIT"
] | 60 | 2020-07-08T02:39:06.000Z | 2022-03-28T14:26:34.000Z | src/engines/train/__init__.py | cr3ux53c/DenseNet-Tensorflow2 | 208143bf4086c407e524e01cd945fd3b0741b48d | [
"MIT"
] | 28 | 2019-08-13T22:20:46.000Z | 2020-02-17T19:27:32.000Z | src/engines/train/__init__.py | cr3ux53c/DenseNet-Tensorflow2 | 208143bf4086c407e524e01cd945fd3b0741b48d | [
"MIT"
] | 18 | 2020-08-26T02:06:32.000Z | 2022-03-22T03:04:40.000Z | from .train import train
| 12.5 | 24 | 0.8 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
9361e059240f0fd25cbcad6c1466201786c8b48a | 164 | py | Python | genie_core/services/KeyBoardService.py | JereMIbq1995/genie-core | f87b6de56749630a46200298f7021047e854c8a3 | [
"MIT"
] | null | null | null | genie_core/services/KeyBoardService.py | JereMIbq1995/genie-core | f87b6de56749630a46200298f7021047e854c8a3 | [
"MIT"
] | null | null | null | genie_core/services/KeyBoardService.py | JereMIbq1995/genie-core | f87b6de56749630a46200298f7021047e854c8a3 | [
"MIT"
] | null | null | null |
class KeyBoardService():
def __init__(self):
pass
def is_key_pressed(self, *keys):
pass
def is_key_released(self, *key):
pass | 16.4 | 36 | 0.591463 | 163 | 0.993902 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
93625b79b6238b332e1234a333a23eb72571d1f2 | 4,034 | py | Python | tordatahub/tests/create_topics.py | jasonz93/python-tordatahub | 3a9a497d5a0bebf915d7e24049dd8b06099e3c04 | [
"Apache-2.0"
] | null | null | null | tordatahub/tests/create_topics.py | jasonz93/python-tordatahub | 3a9a497d5a0bebf915d7e24049dd8b06099e3c04 | [
"Apache-2.0"
] | null | null | null | tordatahub/tests/create_topics.py | jasonz93/python-tordatahub | 3a9a497d5a0bebf915d7e24049dd8b06099e3c04 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import sys
import traceback
from tordatahub import DataHub
from tordatahub.utils import Configer
from tordatahub.models import Project, Topic, RecordType, FieldType, RecordSchema, TupleRecord, CursorType
from tordatahub.errors import DatahubException, ObjectAlreadyExistException
configer = Configer('tordatahub.ini')
access_id = configer.get('tordatahub', 'access_id', '')
access_key = configer.get('tordatahub', 'access_key', '')
endpoint = configer.get('tordatahub', 'endpoint', '')
project_name = configer.get('tordatahub', 'project_name', 'meter_project_test')
topic_name = configer.get('tordatahub', 'topic_name', 'meter_topic_test')
print "======================================="
print "access_id: %s" % access_id
print "access_key: %s" % access_key
print "endpoint: %s" % endpoint
print "project_name: %s" % project_name
print "topic_name: %s" % topic_name
print "=======================================\n\n"
if not access_id or not access_key or not endpoint:
print "access_id and access_key and endpoint must be set!"
sys.exit(-1)
dh = DataHub(access_id, access_key, endpoint)
try:
for pi in range(1,10):
project_name = "meter_project_test_%d" % pi
project = Project(name=project_name, comment="meter project test")
try:
dh.create_project(project)
print "create project %s success!" % project_name
print "=======================================\n\n"
except ObjectAlreadyExistException, e:
print "project %s already exist!" % project_name
for ti in range(1,100):
topic_name = "meter_topic_test_%d_%d" %(pi, ti)
topic = Topic(name=topic_name)
topic.project_name = project_name
topic.shard_count = 20
topic.life_cycle = 7
topic.record_type = RecordType.TUPLE
topic.record_schema = RecordSchema.from_lists(['bigint_field', 'string_field', 'double_field', 'bool_field', 'time_field'], [FieldType.BIGINT, FieldType.STRING, FieldType.DOUBLE, FieldType.BOOLEAN, FieldType.TIMESTAMP])
try:
dh.create_topic(topic)
print "create topic %s success!" % topic_name
# block等待所有shard状态ready
dh.wait_shards_ready(project_name, topic_name)
print "shards all ready!!!"
shards = dh.list_shards(project_name, topic_name)
for shard in shards:
record = TupleRecord(schema=topic.record_schema, values=[1, 'yc1', 10.01, True, 1455869335000000])
record.shard_id = shard.shard_id
record.put_attribute('AK', '47')
records = []
records.append(record)
failed_indexs = dh.put_records(project_name, topic_name, records)
print "put record to project:%s topic:%s failed_index:%s" %(project_name, topic_name, failed_indexs)
except ObjectAlreadyExistException, e:
print "topic %s already exist!" % topic_name
print "=======================================\n\n"
except Exception, e:
print traceback.format_exc()
sys.exit(-1)
| 45.840909 | 231 | 0.645513 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,660 | 0.410282 |
9364fa85414e75b8108245e512d4d8a8d1ad59d1 | 494 | py | Python | config.py | didim99/FlaskLearning | 66d9de3729d372ec548ffbaaaff1d50797467361 | [
"MIT"
] | null | null | null | config.py | didim99/FlaskLearning | 66d9de3729d372ec548ffbaaaff1d50797467361 | [
"MIT"
] | null | null | null | config.py | didim99/FlaskLearning | 66d9de3729d372ec548ffbaaaff1d50797467361 | [
"MIT"
] | null | null | null | import os
from dotenv import load_dotenv
basedir = os.path.abspath(os.path.dirname(__file__))
load_dotenv(os.path.join(basedir, '.env'))
class Config(object):
SECRET_KEY = os.environ.get('SECRET_KEY') or 'you-will-never-guess'
VKAPI = {
'v': '5.122',
'client_id': '7386546',
'redirect_uri': 'https://didim.eclabs.ru/other/tstu/vkapi/verify.php',
'state': os.environ.get('VKAPI_STATE') or 'you-will-never-guess',
'response_type': 'code'
}
| 27.444444 | 78 | 0.643725 | 352 | 0.712551 | 0 | 0 | 0 | 0 | 0 | 0 | 200 | 0.404858 |
93699c5951eeec7f3f3c9e322a78230d739bef03 | 38,153 | py | Python | index_builder/topic_model.py | Klamann/search-index-builder | 86c0b48a70871085966a91e75cd97c6862f8e988 | [
"Apache-2.0"
] | null | null | null | index_builder/topic_model.py | Klamann/search-index-builder | 86c0b48a70871085966a91e75cd97c6862f8e988 | [
"Apache-2.0"
] | null | null | null | index_builder/topic_model.py | Klamann/search-index-builder | 86c0b48a70871085966a91e75cd97c6862f8e988 | [
"Apache-2.0"
] | null | null | null | import argparse
import itertools
import json
import logging
import os
import pickle
import time
import warnings
from collections import Counter, defaultdict
from typing import Dict, Any, List, Iterable, Tuple, Set
warnings.filterwarnings(action='ignore', category=UserWarning, module='gensim')
import langdetect
import spacy
from gensim import corpora
from gensim.corpora import IndexedCorpus
from gensim.models import HdpModel, LdaMulticore
from gensim.models.basemodel import BaseTopicModel
from langdetect.lang_detect_exception import LangDetectException
from langdetect.language import Language
from spacy.tokens.doc import Doc
from spacy.tokens.token import Token
import text_processing
import util
from data import JsonLinesCorpus, Topic, Document, DocumentCollection
from util import ProgressLog
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
logger = logging.getLogger('topic-models')
logger.setLevel(logging.INFO)
#
# goal: go from plaintext PDFs + optional metadata file (result of parser) to id-topic-mapping (input for es index)
# add optional pre-classification (id-class mapping) as first layer of the hierarchical topic model
#
# start: need corpus-id-mapping and metadata-by-doc-id.
# first layer: split by category and write multiple corpora, each with own id-mapping
# subsequent layers: split by assigned topic, add id-mapping, store topics in central id-topic-dict
# side-effect: build topic-tree, store relations in Topic objects (get parents and children, root topic is "main")
#
# so, the id-category-map is a one-time thingy that we don't need to preserve at all. Just write everything
# into the topic tree and document-topic-mapping immediately
#
# steps:
# - calculate topic model from document collection
# - classify documents using this model, store topic labels in document objects
# - create one new model per topic with different hyperparameters and train it with the sub-corpus consisting only of
# the documents in this topic
# - recur
#
# issues:
# - need to build a new mmcorpus and a corpusindex-docid-mapping for each model
#
# data structure: LayeredTopicModel
# - recursive structure, initialize for every subsequent layer
# - the build script requires lots of state and temporary files
# -> maybe have a separate builder, that spits out the final model...
# - the final model consists of multiple topic models + metadata in a single archive
#
# topic model visualization: https://github.com/bmabey/pyLDAvis
#
class TopicModel:
def __init__(self, file_pdf_text: str = None, file_corpus_input: str = None,
file_metadata: str = None, file_output_prefix: str = None, abstracts_only=False,
language_filter: str = None, model: str = "hdp", batch_size=100, n_threads=None,
topic_layers: List[int] = None, topic_limit_per_layer: List[int] = None,
category_layer=False, min_docs_per_topic: int = None, token_min_count=1,
dict_size_limit=10000, document_limit: int = None):
"""
:param file_pdf_text: path to the file containing the parsed PDFs (output of pdf_parser)
:param file_corpus_input: path to the file containing the tokens of the parsed pdfs
(optional, preferred over file_pdf_text)
:param file_metadata: path to the metadata file (output of arxiv_crawler. required,
if the category layer should be used)
:param file_output_prefix: all output files, including temporary files, will be prefixed
with this string. all results will be stored under this prefix aswell.
:param abstracts_only: use only title and abstract for the topic model instead of the
full document text
:param language_filter: filter by the specified language code. the spacy parser we use
currenlty only supports english text, so 'en' is a reasonable value here
(though not a requirement)
:param model: specify the model to use. supported models: "hdp", "lda"
:param batch_size: the batch size of the spacy parser
:param n_threads: the number of threads to use on parallelizable tasks (e.g. spacy)
:param topic_layers: how many topics are to be calculated on each nested topic layer
:param topic_limit_per_layer: how many of those topics should have a fixed limit during
classification (i.e. each document can be only part of up to N topics instead of as
many as the topic model yields)
:param category_layer: use the categories extracted from metadata as the first layer
:param min_docs_per_topic: how many documents are required for each sub-topic to add
(e.g. min_docs = 100, we have 1000 documents, this limits the number of sub-topics to 10)
:param token_min_count: lowest allowed token count for words that may appear in the dictionary
:param dict_size_limit: the total size limit of the dictionary (take the N most frequent terms)
:param document_limit: just process the first N documents (useful for testing)
"""
super().__init__()
# file paths
self.file_pdf_text = file_pdf_text
self.file_corpus_input = file_corpus_input
self.file_metadata = file_metadata
self.file_output_prefix = file_output_prefix
# derived paths
self.file_tasklog = file_output_prefix + '-progress.log'
self.file_corpus_plain = file_corpus_input or file_output_prefix + '-corpus-plain.json.bz2'
self.file_corpus = file_output_prefix + '-corpus.json'
self.file_dict = file_output_prefix + '-lemma.dict.bz2'
self.file_ids = file_output_prefix + '-ids.json'
self.file_docs = file_output_prefix + '-docs.json'
self.file_model = file_output_prefix + '-hdp.pkl.bz2'
self.file_topics = file_output_prefix + '-topics.json.bz2'
# application config
self.abstracts_only = abstracts_only
self.language_filter = language_filter
self.model = model
self.batch_size = batch_size
self.n_threads = n_threads or max(2, int(os.cpu_count() / 2))
self.topic_layers = topic_layers or [10]
self.topic_limit_per_layer = topic_limit_per_layer or [0] * len(topic_layers)
self.category_layer = category_layer
self.min_docs_per_topic = min_docs_per_topic
self.token_min_count = token_min_count
self.dict_size_limit = dict_size_limit
self.document_limit = document_limit
# integrity checks
if not abstracts_only and not file_pdf_text and not file_corpus_input:
raise ValueError("At least one of the parameters 'file_pdf_text' or 'file_token_input' "
"is required, if 'abstracts_only' is not enabled.")
if (category_layer or abstracts_only) and not file_metadata:
raise ValueError("The parameter 'file_metadata' is required, if 'category_layer' "
"or 'abstracts_only' is True.")
if not file_output_prefix:
raise ValueError("The output path must not be empty.")
def build(self, force=False):
# evaluate progress information (no need to do long-running tasks twice)
progress = ProgressLog(self.file_tasklog)
if progress.finished:
logger.info("skipping {} tasks that have already been finished".format(len(progress.finished)))
# unify declarations
if isinstance(self.topic_layers, int):
self.topic_layers = [self.topic_layers]
# build the corpus (if required) and vocabulary
if force or 'token_dict' not in progress:
self.stream_token_dict()
progress.add('token_dict', "finished calculating the token counts and the global dictionary for all documents")
# create a reduced version of the corpus based on the provided dictionary
if force or 'reduced_corpus' not in progress:
self.stream_reduced_corpus()
progress.add('reduced_corpus', "")
# build the category layer (if specified)
if self.category_layer and (force or 'metadata' not in progress):
self.stream_metadata()
progress.add('metadata', "finished extracting categories from document metadata")
# build the nested topic model and classify documents
if force or 'topic_model' not in progress:
self.stream_nested_topic_model()
progress.add('topic_model', "")
logger.info("build completed. Classification results have been stored in `{}`".format(self.file_topics))
def stream_nested_topic_model(self):
# initialize data structures
root_topic = Topic('root', layer=0)
current_topics = None # type: List[Topic]
documents = None # type: Dict[str, Document]
dictionary = self.load_dictionary()
if self.category_layer:
logger.info("building first topic layer from document metadata...")
current_topics = self.topics_from_metadata(root_topic)
documents = self.docs_from_metadata(current_topics)
else:
current_topics = [root_topic]
documents = self.docs_from_ids()
# build topic model and classify documents
logger.info("building topic models and classifying documents...")
for idx, (num_topics, topic_limit) in enumerate(zip(self.topic_layers, self.topic_limit_per_layer)):
logger.info("Processing layer {} of {}, with {} sub-topics per parent topic{}"
.format(idx+1, len(self.topic_layers), num_topics, " (max. {} topics per doc)"
.format(topic_limit) if topic_limit else ""))
# TODO add option to remove temporary data immediately
# collect topics for the next iteration
next_topics = [] # type: List[Topic]
# go through the documents of each topic
for topic in current_topics:
logger.info("Processing documents in topic '{}'...".format(topic.topic_id))
# load the last corpus that was created for this topic's parent
corpus = self.load_corpus_for_topic(topic.parent if topic != root_topic else topic)
# reduce the corpus so it only contains the documents we need
sub_corpus = self.corpus2corpus(corpus, documents, topic) if topic != root_topic else corpus
if sub_corpus: # only continue, of there are actually documents with this topic
# limit the number of sub-topics, if necessary
num_topics_adjusted = min(int(len(sub_corpus) / self.min_docs_per_topic), num_topics) \
if self.min_docs_per_topic else num_topics
if num_topics_adjusted <= 3:
logger.info("skipping topic {} (too few documents: {})".format(topic.topic_id, len(sub_corpus)))
else:
# build the topic model
self.stream_topic_model(topic, dictionary, sub_corpus, num_topics_adjusted)
# classify documents using the topic model
sub_topics = self.stream_classify_documents(topic, sub_corpus, documents, topic_limit=topic_limit)
# save the sub-topics for the next layer
next_topics.extend(sub_topics)
logger.info("All {} documents in topic '{}' have been classified".format(len(sub_corpus), topic.topic_id))
else:
logger.warning("there are no documents in topic '{}'. Hint: parent topic '{}' has {} documents"
.format(topic.topic_id, topic.parent.topic_id if topic.parent else "root", len(corpus)))
# select the topics for the next iteration
current_topics = next_topics
logger.info("all {} documents have been classified. storing results...".format(len(documents)))
topics = {topic.topic_id: topic for topic in root_topic._collect_topics()}
collection = DocumentCollection(topics, documents)
util.json_write(collection.to_dict(), self.file_topics, pretty=False)
def stream_token_dict(self):
"""
make a single run over the file containing all documents as plaintext.
Parse all documents using spacy, store the token counts for each document
and build the global token dict
"""
if self.file_corpus_input:
logger.info("reading corpus from '{}'".format(self.file_corpus_plain))
corpus = JsonLinesCorpus(self.file_corpus_input)
return self.store_gensim_dict(corpus)
else:
if self.abstracts_only:
logger.info("reading abstracts from '{}'".format(self.file_metadata))
documents = util.json_read_lines(self.file_metadata, self.get_title_and_abstract)
else:
logger.info("reading documents from '{}'".format(self.file_pdf_text))
documents = util.json_read_lines(self.file_pdf_text, self.combine_pages)
# limit document count (if configured)
documents_limited = (next(documents) for i in range(self.document_limit)) if self.document_limit else documents
# filter by document language (if configured)
documents_filtered = self.filter_by_lang(documents_limited, self.language_filter) if self.language_filter else documents_limited
# parse documents using spacy
documents_tokens = self.spacy_parse(documents_filtered, batch_size=self.batch_size, n_threads=self.n_threads)
# stream intermediate result to disk (in case data does not fit in RAM, which it won't if you're serious about this stuff)
return self.store_tokens_and_gensim_dict(documents_tokens)
def stream_reduced_corpus(self):
corpus = JsonLinesCorpus(self.file_corpus_plain)
if corpus.has_plain_tokens():
logger.info("building a reduced version of corpus '{}'".format(self.file_corpus_plain))
dictionary = self.load_dictionary()
corpus.convert_tokens_to_ids(self.file_corpus, id2word=dictionary.id2token)
else:
# corpus is already in reduced format. continue...
self.file_corpus = self.file_corpus_plain
def stream_metadata(self):
# get the IDs of all documents we need
documents = self.docs_from_ids()
# read the metadata file and extract all categories for the documents we want
logger.info("reading metadata from " + self.file_metadata)
metadata = util.json_read_lines(self.file_metadata) # type: List[Dict[str,Any]]
category_count = Counter()
for meta_dict in metadata:
doc_id = meta_dict['header']['identifier'].split(':')[-1]
# match doc ids
if doc_id in documents:
doc = documents[doc_id]
categories = meta_dict['header']['setSpecs']
categories_clean = sorted(set(c.split(':')[0] for c in categories))
doc.categories = categories_clean
for cat in categories_clean:
category_count[cat] += 1
# integrity check
for doc in documents.values():
if doc.categories is None:
logger.warning("there was no metadata entry for document '{}'".format(doc.doc_id))
# reading finished. print stats and write to file
logger.info("categories for {} documents have been read: {}".format(len(documents), category_count.items()))
util.json_write(Document.store_documents(documents.values()), self.file_docs, pretty=False)
def stream_topic_model(self, topic: Topic, dictionary: corpora.Dictionary = None,
corpus: IndexedCorpus = None, num_topics=20, max_topics_per_doc=5):
# load dictionary and corpus, if necessary
if not dictionary:
dictionary = self.load_dictionary()
logger.warning("the default dictionary was loaded from file. "
"You should keep an instance in memory instead of calling this in a loop...")
if not corpus:
corpus = JsonLinesCorpus(self.file_corpus)
logger.warning("the default corpus was loaded from file. You should provide a "
"reduced corpus to increase performance (see corpus2corpus)")
# build the model
logger.info("building a topic model with {} topics for {} documents in topic '{}'"
.format(num_topics, len(corpus), topic.topic_id))
t0 = time.time()
if self.model == "lda":
model = LdaMulticore(corpus, id2word=dictionary.id2token, num_topics=num_topics,
passes=2, iterations=50, chunksize=2000, workers=self.n_threads)
elif self.model == "hdp":
# T = overall topic limit, K = max topics per document
model = HdpModel(corpus, id2word=dictionary.id2token, T=num_topics, K=max_topics_per_doc)
else:
raise ValueError("Unknown model identifier '{}'".format(self.model))
t1 = time.time()
# serialize
logger.info("building the model took {:.1f} s. Serializing model...".format(t1-t0))
output_path = self._get_model_path(topic)
with util.open_by_ext(output_path, 'wb') as fp:
pickle.dump(model, fp, protocol=4)
logger.info("model dump finished, took {:.1f} s".format(time.time()-t1))
def stream_classify_documents(self, parent_topic: Topic, corpus: JsonLinesCorpus,
documents: Dict[str, Document], topic_limit=0) -> List[Topic]:
# load the actual topic model
model = self.load_model(self._get_model_path(parent_topic)) # type: HdpModel
# build Topic objects from model
topics = {}
try:
for i in itertools.count():
topic_id = "{}-{}".format(parent_topic.topic_id, i)
show_topic_kwargs = {}
if self.model == "hdp":
show_topic_kwargs = {'num_words': 10, 'formatted': False}
elif self.model == "lda":
show_topic_kwargs = {'topn': 10}
topic_terms = [(term, round(score, 5)) for term, score in model.show_topic(i, **show_topic_kwargs)]
topic = parent_topic.add_child(topic_id, topic_terms)
topics[i] = topic
except IndexError:
pass # most pythonic way to interrupt iteration, if # of elements is unknown...
# calculate the topics for each document
logger.info("classifying {} documents from topic '{}' into {} new categories"
.format(len(corpus), parent_topic.topic_id, len(topics)))
t = time.time()
for i, doc_dict in enumerate(corpus.iter_all()):
if not doc_dict['id'] or doc_dict['id'] not in documents:
logger.warning("Document '{}' at corpus index {} (topic: {}) was not found "
"in the document index and will be skipped"
.format(doc_dict['id'], parent_topic.topic_id, i))
continue
doc_id = doc_dict['id']
tokens = doc_dict['tokens']
document = documents[doc_id]
assert document.topics is None or parent_topic in document.topics, \
"tried to classify a document which is not part of the current topic"
doc_topics = sorted(model[tokens], key=lambda x: x[1], reverse=True) # type: List[Tuple[str, float]]
for topic_idx, score in (doc_topics[:topic_limit] if topic_limit else doc_topics):
if score > 0.10:
document.add_topic(topics[topic_idx], round(score, 5))
if (i+1) % 10000 == 0:
t1 = time.time()
logger.info("{}/{} documents have been classified ({:.2f} doc/min)"
.format(i+1, len(corpus), self.batch_size*60/(t1-t)))
t = t1
return list(topics.values())
def corpus2corpus(self, corpus: JsonLinesCorpus, documents: Dict[str, Document], topic: Topic) -> JsonLinesCorpus:
"""
get a subset of a corpus. It will include all documents that contain
the specified topic.
Writes the reduced corpus to a new file whose name is derived from the document ID
:param corpus: the source corpus
:param documents: the document definition (contains document topics)
:param topic: filter all documents in the corpus by this topic
:return: a new corpus containing only the filtered documents
"""
logger.info("creating a subset of corpus '{}' for topic '{}'".format(corpus.fname, topic.topic_id))
# specify the filter function
def doc_filter(doc_dict: Dict[str, Any]) -> bool:
"""
:return: True, iff this document has the specified topic
"""
doc = documents[doc_dict['id']]
return doc.topics and topic in doc.topics
# build the new corpus
corpus_path = self._get_corpus_path(topic)
return corpus.subset(corpus_path, doc_filter)
def test_model(self, fin_corpus: str, fin_model: str):
model = self.load_model(fin_model)
model.print_topics(num_topics=-1, num_words=10)
corpus = JsonLinesCorpus(fin_corpus)
for tokens in corpus:
topics = model[tokens]
print("dominant topics in https://arxiv.org/abs/{}".format(tokens))
for topic, score in sorted(topics, key=lambda x: x[1], reverse=True):
print("topic {} @ {:.3f}: {}".format(topic, score, model.print_topic(topic)))
def test_document_topics(self):
# get best matching documents + URLs per topic
topic_model = DocumentCollection.from_dict(util.json_read(self.file_topics))
docs_by_first_topic = defaultdict(list)
# group documents by first topic
for id, doc in topic_model.documents.items():
if doc.topics:
topic, score = doc.topics[0]
docs_by_first_topic[topic].append((id, score))
else:
logger.warning("document {} has no topics".format(doc.doc_id))
# sort by score descending
for doc_list in docs_by_first_topic.values():
doc_list.sort(key=lambda x: x[1], reverse=True)
# print highest scoring documents for each topic
for topic in topic_model.topics.values():
print("Topic {}: {}".format(topic.topic_id, topic.tokens))
for doc_id, score in docs_by_first_topic[topic.topic_id][:10]:
print("paper https://arxiv.org/abs/{} with score {}".format(doc_id.replace('-', '/'), score))
def docs_from_ids(self) -> Dict[str, Document]:
return {doc_id: Document(doc_id) for doc_id in util.json_read(self.file_ids)}
def docs_from_metadata(self, topics: List[Topic]) -> Dict[str, Document]:
# restore documents
topic_dict = {t.topic_id: t for t in topics}
documents = Document.restore_documents(util.json_read(self.file_docs), topic_dict)
# add topics to documents (one for each category)
if self.category_layer:
for doc in documents.values():
if doc.categories:
for category in doc.categories:
doc.add_topic(topic_dict[category], 1.0)
else:
logger.warning("Document {} has no categories!".format(doc.doc_id))
return documents
def topics_from_metadata(self, parent_topic: Topic) -> List[Topic]:
# note: some papers do not have categories (especially very old ones)
categories = (doc_dict['categories'] for doc_dict in util.json_read(self.file_docs) if doc_dict['categories'])
topic_ids = sorted(set(util.flatten(categories, generator=True)))
topics = [parent_topic.add_child(topic_id) for topic_id in topic_ids]
return topics
def load_dictionary(self) -> corpora.Dictionary:
dictionary = corpora.Dictionary.load(self.file_dict)
dictionary[0] # forces id2token to be calculated. Probably a bug in gensim...
return dictionary
def load_corpus_for_topic(self, topic: Topic) -> JsonLinesCorpus:
corpus_path = self._get_corpus_path(topic)
if os.path.isfile(corpus_path):
# load the corpus for this topic (if available)
return JsonLinesCorpus(self._get_corpus_path(topic))
else:
if topic.parent:
# ok, try again with this topic's parent
return self.load_corpus_for_topic(topic.parent)
else:
# no parent left? then use the root corpus
return JsonLinesCorpus(self.file_corpus)
def _get_topic_file_prefix(self, topic: Topic) -> str:
"""
get a file prefix based on the output path of this instance and the topic id
"""
return "{}-topic-{}".format(self.file_output_prefix, topic.topic_id)
def _get_model_path(self, topic: Topic) -> str:
"""
get the path of the model associated with this topic
"""
return self._get_topic_file_prefix(topic) + '-model.pkl.bz2'
def _get_corpus_path(self, topic: Topic) -> str:
"""
get the path of the model associated with this topic
"""
return self._get_topic_file_prefix(topic) + '-corpus.json'
@staticmethod
def load_model(file_model: str) -> BaseTopicModel:
logger.debug("loading model from file '{}'...".format(file_model))
with util.open_by_ext(file_model, 'rb') as fp:
return pickle.load(fp)
@staticmethod
def filter_by_lang(documents: Iterable[Dict[str, Any]], lang_code: str, threshold=0.8,
broken_codes=['cy', 'ca', 'pt']) -> Iterable[Dict[str, Any]]:
logger.info("will only accept documents in language '{}'".format(lang_code))
counter = Counter()
for i, entry in enumerate(documents):
id = entry['id']
doc = entry['text']
if not doc:
logger.debug("empty document at index %s", i)
continue
sample = doc[5000:6000] if len(doc) >= 6000 else doc[:1000]
try:
langs = langdetect.detect_langs(sample) # type: List[Language]
lang = langs[0].lang
proba = langs[0].prob
if lang != lang_code or proba < threshold:
logger.debug("language: {}, {:.3f}, {}, \"{}\"".format(lang, proba, id, sample[:100].replace('\n', '\\n')))
if proba < threshold or lang in broken_codes:
counter['_failed'] += 1
else:
counter[lang] += 1
else:
counter[lang] += 1
yield entry
except LangDetectException:
logger.warning("language detection failed on document {} (sample: {})".format(id, sample[:1000]), exc_info=1)
logger.info("Results of language detection: {}".format(str(counter.most_common())))
@classmethod
def spacy_parse(cls, documents: Iterable[Dict[str, Any]], batch_size=10, n_threads=1) -> Iterable[Dict[str, Any]]:
logger.debug("loading spacy model...")
t = time.time()
nlp = spacy.load('en', parser=False)
logger.info("loading spacy model took {:.2f}s. Processing documents using spacy...".format(time.time() - t))
# preserve document IDs
gen1, gen2 = itertools.tee(documents)
ids = (x['id'] for x in gen1)
texts = (x['text'] for x in gen2)
docs = nlp.pipe(texts)
# start the actual work and join the results with the IDs again
t = time.time()
count = 0
docs = nlp.pipe(texts, batch_size=batch_size, n_threads=n_threads)
for id, doc in zip(ids, docs): # type: Tuple[str, Doc]
count += 1
if count % batch_size == 0:
t1 = time.time()
logger.info("a total of {} documents has been processed, took {:.2f}s ({:.2f} doc/min, {} thread(s))"
.format(count, t1-t, batch_size*60/(t1-t), n_threads))
t = t1
# skip undesired tokens
tokens = cls.filter_tokens(doc)
lemmata = [token.lemma_ for token in tokens]
yield {'id': id, 'tokens': lemmata}
@staticmethod
def filter_tokens(document: Doc) -> List[Token]:
"""
conditions are
- length > 1
- first character is alpha
- no space or punctuation
- consists of few strange characters
:param document:
:return:
"""
pos_filter = ['SPACE', 'PUNCT']
return [token for token in document if
len(token) > 1 and
token.string[0].isalpha() and
token.pos_ not in pos_filter and
(token.is_alpha or text_processing.has_valid_chars(token.string))]
def store_tokens_and_gensim_dict(self, documents: Iterable[Dict[str,Any]]):
"""
process token stream to build dictionary in memory and dump tokens as one json per line to file.
afterwards, serialize the entire dictionary.
"""
logger.info("building the dictionary and storing the corpus...")
dictionary = corpora.Dictionary()
doc_ids = set()
with util.open_by_ext(self.file_corpus_plain, 'wt', encoding='utf-8') as fp:
for entry in documents:
doc_id = entry['id']
tokens = entry['tokens'] # type: List[str]
token_counts = Counter(tokens)
doc_ids.add(doc_id)
result = {'id': doc_id, 'tokens': token_counts}
fp.write(json.dumps(result, separators=None, indent=None, ensure_ascii=False))
fp.write('\n')
dictionary.doc2bow(tokens, allow_update=True)
# store the document IDs
util.json_write(sorted(doc_ids), self.file_ids)
# store the dictionary
dictionary.filter_extremes(no_below=self.token_min_count, no_above=0.2, keep_n=self.dict_size_limit)
dictionary.compactify()
dictionary.save(self.file_dict, pickle_protocol=4)
return doc_ids, dictionary
def store_gensim_dict(self, corpus: JsonLinesCorpus) -> Tuple[Set[str], corpora.Dictionary]:
"""
process token stream to build dictionary in memory, then serialize the entire dictionary.
also stores document IDs in a separate file.
"""
logger.info("building the dictionary...")
dictionary = corpora.Dictionary()
doc_ids = set()
for i, doc in enumerate(corpus.iter_all()):
doc_id = doc['id']
doc_ids.add(doc_id)
token_counts = doc['tokens'] # type: Dict[str, int]
# unfortunately, dictionary.doc2bow() does not accept (token,count) tuples
# therefore we expand the dictionary to a token list again... (yes, this is stupid)
tokens = util.flatten([token] * count for token, count in token_counts.items())
dictionary.doc2bow(tokens, allow_update=True)
if (i+1) % 50000 == 0:
logger.info("{} documents have been read so far".format(i+1))
# store the document IDs
util.json_write(sorted(doc_ids), self.file_ids)
# store the dictionary
dictionary.filter_extremes(no_below=self.token_min_count, no_above=0.2, keep_n=self.dict_size_limit)
dictionary.compactify()
dictionary.save(self.file_dict, pickle_protocol=4)
return doc_ids, dictionary
@staticmethod
def combine_pages(entry: Dict[str, Any], **kwargs) -> Dict[str, Any]:
# document IDs might be broken, if they were extracted from file names...
doc_id = text_processing.fix_file_based_id(entry['id'])
raw_text = "\n".join(entry['pages'])
clean_text = text_processing.clean_parsed_text(raw_text)
return {'id': doc_id, 'text': clean_text}
@staticmethod
def get_title_and_abstract(entry: Dict[str, Any], **kwargs) -> Dict[str, Any]:
full_id = entry['header']['identifier']
short_id = full_id[(full_id.rfind(':') + 1):]
title = text_processing.strip_all_whitespace(entry['title'][0])
abstract = text_processing.strip_all_whitespace(entry['description'][0])
return {'id': short_id, 'text': (title + "\n\n" + abstract) }
def topic_stats(topic_file: str):
print("gathering stats for topics in", topic_file)
dc_dict = util.json_read(topic_file)
dc = DocumentCollection.from_dict(dc_dict)
flat_topics = util.flatten((doc.topics or [] for doc in dc.documents.values()), generator=True)
c = Counter(flat_topics)
for topic, count in c.most_common():
print("{}: {} ({})".format(topic.topic_id, count, topic.tokens))
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(description='Build a nested topic model and classify documents')
parser.add_argument('-p', '--input-pdfs', metavar='FILE', type=str,
help='path to the file containing the parsed PDFs (output of pdf_parser)')
parser.add_argument('-t', '--input-tokens', metavar='FILE', type=str,
help='path to the file containing the tokens of the parsed pdfs '
'(optional, alternative to --input-pdfs)')
parser.add_argument('-m', '--input-meta', metavar='FILE', type=str,
help='path to the metadata file (output of arxiv_crawler. '
'required, if the category layer should be used)')
parser.add_argument('-o', '--output-prefix', metavar='PATH', type=str, required=True,
help='all output files, including temporary files, will be prefixed '
'with this string. all results will be stored under this '
'prefix aswell.')
parser.add_argument('-a', '--abstracts-only', action='store_true',
help="build topic models based on a paper's abstract only "
"(do not use the entire document text)")
parser.add_argument('-T', '--topic-model', metavar='MODEL', type=str, default="lda",
help='the topic model to use. Options: "lda" (default), "hdp")')
parser.add_argument('-l', '--layers', metavar='LAYERS', type=str, default="10",
help='how many nested topic layers are to be used? Example: "10,7,4"')
parser.add_argument('-c', '--limit-classification', metavar='LIMITS', type=str,
help='limits the number of topics that each document can be assigned '
'to at each layer during classification. One number per layer, '
'0 stands for unlimited. Must have same length as -l. '
'Example: "1,2,0"')
parser.add_argument('-M', '--min-docs-per-topic', metavar='N', type=int,
help='require at least N documents per topic on each layer. '
'Can reduce the allowed topic count at each layer (but never increase). '
'Interrupts the build for a topic, if less than 3*N documents remain '
'(a topic model with just two topics does not seem useful)')
parser.add_argument('-f', '--lang-filter', metavar='LANG', type=str, default="en",
help='filter by the specified language code. Defaults to "en" '
'(because we can currently only parse english text)')
parser.add_argument('-v', '--vocab-size', metavar='N', type=int,
help='limit the size of the vocabulary, if specified')
parser.add_argument('-d', '--doc-limit', metavar='N', type=int,
help='just process the first N documents (useful for testing)')
args = parser.parse_args()
# process list input & convert data types
if isinstance(args.layers, str):
args.layers = [int(s.strip()) for s in args.layers.split(",")]
if isinstance(args.limit_classification, str):
args.limit_classification = [int(s.strip()) for s in args.limit_classification.split(",")]
if args.limit_classification and len(args.layers) != len(args.limit_classification):
raise ValueError("the arguments --layers and --limit-classification must have the "
"same length! layers: {}, limits: {}"
.format(str(args.layers), str(args.limit_classification)))
return args
# example args:
# topic_model.py -t "tokens.json.bz2" -m "meta.json.bz2" -o "./data/test" -l "5,5" -v 10000
if __name__ == "__main__":
args = parse_args()
topic_model = TopicModel(file_pdf_text=args.input_pdfs, file_corpus_input=args.input_tokens,
file_metadata=args.input_meta, file_output_prefix=args.output_prefix,
abstracts_only=args.abstracts_only, model=args.topic_model,
language_filter=args.lang_filter, batch_size=500, n_threads=None,
topic_layers=args.layers, topic_limit_per_layer=args.limit_classification,
category_layer=(args.input_meta is not None),
min_docs_per_topic=args.min_docs_per_topic,
token_min_count=5, dict_size_limit=args.vocab_size,
document_limit=args.doc_limit)
topic_model.build()
| 53.964639 | 140 | 0.627919 | 30,472 | 0.798679 | 2,762 | 0.072393 | 4,434 | 0.116216 | 0 | 0 | 13,555 | 0.35528 |
936b0f95e7f3ca1f6eafc51508f66b72158c5fa4 | 1,247 | py | Python | python/misc/switcharoo.py | christopher-burke/warmups | 140c96ada87ec5e9faa4622504ddee18840dce4a | [
"MIT"
] | null | null | null | python/misc/switcharoo.py | christopher-burke/warmups | 140c96ada87ec5e9faa4622504ddee18840dce4a | [
"MIT"
] | 2 | 2022-03-10T03:49:14.000Z | 2022-03-14T00:49:54.000Z | python/misc/switcharoo.py | christopher-burke/warmups | 140c96ada87ec5e9faa4622504ddee18840dce4a | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""Switcharoo.
Create a function that takes a string and returns a new string with its
first and last characters swapped, except under three conditions:
If the length of the string is less than two, return "Incompatible.".
If the argument is not a string, return "Incompatible.".
If the first and last characters are the same, return "Two's a pair.".
Source:
https://edabit.com/challenge/tnKZCAkdnZpiuDiWA
"""
def flip_end_chars(txt):
"""Flip the first and last characters if txt is a string."""
if isinstance(txt, str) and txt and len(txt) > 1:
first, last = txt[0], txt[-1]
if first == last:
return "Two's a pair."
return "{}{}{}".format(last, txt[1:-1], first)
return "Incompatible."
def main():
assert flip_end_chars("Cat, dog, and mouse.") == ".at, dog, and mouseC"
assert flip_end_chars("Anna, Banana") == "anna, BananA"
assert flip_end_chars("[]") == "]["
assert flip_end_chars("") == "Incompatible."
assert flip_end_chars([1, 2, 3]) == "Incompatible."
assert flip_end_chars("dfdkf49824fdfdfjhd") == "Two's a pair."
assert flip_end_chars("#343473847#") == "Two's a pair."
print('Passed.')
if __name__ == "__main__":
main()
| 31.175 | 75 | 0.655172 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 726 | 0.582197 |
936be3218f3e880a40b9ff8cc02572fc2dea5387 | 2,212 | py | Python | external/unbound/libunbound/python/examples/async-lookup.py | simplixcurrency/simplix | dd313f6fe5a42cf508b19aea3f49cb8ba6b5dbf1 | [
"BSD-3-Clause"
] | 1,751 | 2016-11-03T18:25:34.000Z | 2022-03-30T17:43:26.000Z | external/unbound/libunbound/python/examples/async-lookup.py | simplixcurrency/simplix | dd313f6fe5a42cf508b19aea3f49cb8ba6b5dbf1 | [
"BSD-3-Clause"
] | 603 | 2017-03-03T19:51:58.000Z | 2022-03-31T12:56:58.000Z | external/unbound/libunbound/python/examples/async-lookup.py | simplixcurrency/simplix | dd313f6fe5a42cf508b19aea3f49cb8ba6b5dbf1 | [
"BSD-3-Clause"
] | 296 | 2016-11-14T07:00:11.000Z | 2022-03-29T00:56:58.000Z | #!/usr/bin/python
'''
async-lookup.py : This example shows how to use asynchronous lookups
Authors: Zdenek Vasicek (vasicek AT fit.vutbr.cz)
Marek Vavrusa (xvavru00 AT stud.fit.vutbr.cz)
Copyright (c) 2008. All rights reserved.
This software is open source.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
'''
from __future__ import print_function
import unbound
import time
ctx = unbound.ub_ctx()
ctx.resolvconf("/etc/resolv.conf")
def call_back(my_data,status,result):
print("Call_back:", sorted(my_data))
if status == 0 and result.havedata:
print("Result:", sorted(result.data.address_list))
my_data['done_flag'] = True
my_data = {'done_flag':False,'arbitrary':"object"}
status, async_id = ctx.resolve_async("www.nic.cz", my_data, call_back, unbound.RR_TYPE_A, unbound.RR_CLASS_IN)
while (status == 0) and (not my_data['done_flag']):
status = ctx.process()
time.sleep(0.1)
if (status != 0):
print("Resolve error:", unbound.ub_strerror(status))
| 38.137931 | 110 | 0.758137 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,642 | 0.742315 |
936be74b5161c4ea335dcc47745288106a326b76 | 244 | py | Python | 1/one.py | TheFrederick-git/adventofcode2021 | a320f3bba2655afab1aad8bf2520ccb705b2fd1e | [
"MIT"
] | null | null | null | 1/one.py | TheFrederick-git/adventofcode2021 | a320f3bba2655afab1aad8bf2520ccb705b2fd1e | [
"MIT"
] | null | null | null | 1/one.py | TheFrederick-git/adventofcode2021 | a320f3bba2655afab1aad8bf2520ccb705b2fd1e | [
"MIT"
] | null | null | null | """1/1 adventofcode"""
with open("input.txt", "r", encoding="UTF-8") as i_file:
data = list(map(int, i_file.read().splitlines()))
values = ["i" if data[i] > data[i - 1] else "d" for i in range(1, len(data))]
print(values.count("i"))
| 34.857143 | 78 | 0.598361 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 52 | 0.213115 |
936c4abf26bcf5aae9aaddd431b1ecdde9736568 | 3,345 | py | Python | fcsgg/modeling/backbone/resnet.py | liuhengyue/fcsgg | 826c6e194270461a66ca5d048cb67f1ccf7ef387 | [
"MIT"
] | 9 | 2022-01-17T03:27:46.000Z | 2022-03-26T09:35:59.000Z | fcsgg/modeling/backbone/resnet.py | liuhengyue/fcsgg | 826c6e194270461a66ca5d048cb67f1ccf7ef387 | [
"MIT"
] | 3 | 2022-01-26T03:28:18.000Z | 2022-02-03T04:19:29.000Z | fcsgg/modeling/backbone/resnet.py | liuhengyue/fcsgg | 826c6e194270461a66ca5d048cb67f1ccf7ef387 | [
"MIT"
] | null | null | null | """
Simple ResNet FPN that only outputs p2.
Modified from https://github.com/HRNet/Higher-HRNet-Human-Pose-Estimation/blob/master/lib/models/pose_higher_hrnet.py
"""
__author__ = "Hengyue Liu"
__copyright__ = "Copyright (c) 2021 Futurewei Inc."
__credits__ = []
__license__ = "MIT License"
__version__ = "0.1"
__maintainer__ = "Hengyue Liu"
__email__ = "onehothenry@gmail.com"
import torch.nn.functional as F
from detectron2.layers import ShapeSpec
from detectron2.modeling.backbone import BACKBONE_REGISTRY, build_resnet_backbone, FPN
class SingleFPN(FPN):
"""
Basically only output one single scale feature. No top block
"""
def __init__(
self, bottom_up, in_features, out_channels, norm="", top_block=None, fuse_type="sum", level="p2"
):
super(SingleFPN, self).__init__(bottom_up, in_features, out_channels,
norm=norm, top_block=top_block, fuse_type=fuse_type)
assert level in self._out_features, "{} is not in the out_features list of FPN.".format(level)
self.level = level
self._out_feature_strides = {self.level: self._out_feature_strides[self.level]}
self._out_features = [self.level]
self._out_feature_channels = {k: out_channels for k in self._out_features}
del self.output_convs
del self.fpn_output2, self.fpn_output3, self.fpn_output4
def forward(self, x):
"""
Args:
input (dict[str->Tensor]): mapping feature map name (e.g., "res5") to
feature map tensor for each feature level in high to low resolution order.
Returns:
dict[str->Tensor]:
mapping from feature map name to FPN feature map tensor
in high to low resolution order. Returned feature names follow the FPN
paper convention: "p<stage>", where stage has stride = 2 ** stage e.g.,
["p2", "p3", ..., "p6"].
"""
# Reverse feature maps into top-down order (from low to high resolution)
bottom_up_features = self.bottom_up(x)
x = [bottom_up_features[f] for f in self.in_features[::-1]]
prev_features = self.lateral_convs[0](x[0])
for features, lateral_conv in zip(
x[1:], self.lateral_convs[1:]):
top_down_features = F.interpolate(prev_features, scale_factor=2, mode="nearest")
lateral_features = lateral_conv(features)
prev_features = lateral_features + top_down_features
if self._fuse_type == "avg":
prev_features /= 2
prev_features = self.fpn_output5(prev_features)
return {self._out_features[0]: prev_features}
@BACKBONE_REGISTRY.register()
def build_resnet_fpn_p2_backbone(cfg, input_shape: ShapeSpec):
"""
Args:
cfg: a detectron2 CfgNode
Returns:
backbone (Backbone): backbone module, must be a subclass of :class:`Backbone`.
"""
bottom_up = build_resnet_backbone(cfg, input_shape)
in_features = cfg.MODEL.FPN.IN_FEATURES
out_channels = cfg.MODEL.FPN.OUT_CHANNELS
backbone = SingleFPN(
bottom_up=bottom_up,
in_features=in_features,
out_channels=out_channels,
norm=cfg.MODEL.FPN.NORM,
top_block=None,
fuse_type=cfg.MODEL.FPN.FUSE_TYPE,
)
return backbone | 39.352941 | 117 | 0.6571 | 2,154 | 0.643946 | 0 | 0 | 648 | 0.193722 | 0 | 0 | 1,180 | 0.352765 |
936c5a70ec50a4830edce31139219ee55e7967af | 198 | py | Python | musicdb/__init__.py | ieuan-jones/musicdb | e894614cb70881eca810d97f7362a68398c3c3b0 | [
"BSD-3-Clause"
] | null | null | null | musicdb/__init__.py | ieuan-jones/musicdb | e894614cb70881eca810d97f7362a68398c3c3b0 | [
"BSD-3-Clause"
] | null | null | null | musicdb/__init__.py | ieuan-jones/musicdb | e894614cb70881eca810d97f7362a68398c3c3b0 | [
"BSD-3-Clause"
] | null | null | null | import os
from flask import Flask
def create_app():
app = Flask(__name__, instance_relative_config=True)
from . import catalogue
app.register_blueprint(catalogue.bp)
return app
| 16.5 | 56 | 0.732323 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
936f55fdfaa40d9286b2780e20a668166b6fc40f | 273 | py | Python | backfill/save_to_gcs.py | grollins/quandl-gcp-pipeline | 1b6c247fb3ad1ee33bd3cf363d119c124b8e26b2 | [
"MIT"
] | null | null | null | backfill/save_to_gcs.py | grollins/quandl-gcp-pipeline | 1b6c247fb3ad1ee33bd3cf363d119c124b8e26b2 | [
"MIT"
] | null | null | null | backfill/save_to_gcs.py | grollins/quandl-gcp-pipeline | 1b6c247fb3ad1ee33bd3cf363d119c124b8e26b2 | [
"MIT"
] | null | null | null | from google.cloud import storage
GCS_CLIENT = storage.Client()
GCS_BUCKET = GCS_CLIENT.get_bucket('senpai-io.appspot.com')
path = 'quandl-stage/backfill_data_jan2015_mar2018.csv'
blob = GCS_BUCKET.blob(path)
blob.upload_from_filename(filename='data_jan2015_mar2018.csv')
| 30.333333 | 62 | 0.81685 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 97 | 0.355311 |
93700816a23a94674ae0ee1adf228796c45d26ba | 740 | py | Python | CMS/test/mocks/search_mocks.py | office-for-students/wagtail-CMS | 98789c279edf48f2bbedb5415437da3317f0e12b | [
"MIT"
] | 4 | 2019-06-04T07:18:44.000Z | 2020-06-15T22:27:36.000Z | CMS/test/mocks/search_mocks.py | office-for-students/wagtail-CMS | 98789c279edf48f2bbedb5415437da3317f0e12b | [
"MIT"
] | 38 | 2019-05-09T13:14:56.000Z | 2022-03-12T00:54:57.000Z | CMS/test/mocks/search_mocks.py | office-for-students/wagtail-CMS | 98789c279edf48f2bbedb5415437da3317f0e12b | [
"MIT"
] | 3 | 2019-09-26T14:32:36.000Z | 2021-05-06T15:48:01.000Z | import json
from requests.models import Response
from http import HTTPStatus
from CMS.test.mocks.search_mocks_content import content
class SearchMocks:
@classmethod
def get_search_response_content(cls):
return content;
@classmethod
def get_successful_search_response(cls):
response = Response()
response.status_code = HTTPStatus.OK
response._content = json.dumps(cls.get_search_response_content()).encode('utf-8')
return response
@classmethod
def get_unsuccessful_search_response(cls):
response = Response()
response.status_code = HTTPStatus.INTERNAL_SERVER_ERROR
response._content = json.dumps(None).encode('utf-8')
return response
| 27.407407 | 89 | 0.714865 | 604 | 0.816216 | 0 | 0 | 563 | 0.760811 | 0 | 0 | 14 | 0.018919 |
9370464ed328cc5d44014a45d60a36057d95bbda | 1,818 | py | Python | code/hw2/performance.py | edrebin/NLP-Course | 004af059e4a48b9086dc122d32c864799f1f16f1 | [
"Apache-2.0"
] | 9 | 2021-08-29T15:23:09.000Z | 2022-01-09T20:13:39.000Z | code/hw2/performance.py | edrebin/NLP-Course | 004af059e4a48b9086dc122d32c864799f1f16f1 | [
"Apache-2.0"
] | 1 | 2022-03-08T11:05:29.000Z | 2022-03-08T13:48:32.000Z | code/hw2/performance.py | edrebin/NLP-Course | 004af059e4a48b9086dc122d32c864799f1f16f1 | [
"Apache-2.0"
] | 2 | 2021-12-12T22:11:44.000Z | 2022-01-26T03:55:56.000Z | import numpy as np
from spacy.pipeline.sentencizer import Sentencizer
from glob import glob
from spacy.lang.en import English
def metrics(a, b):
from sklearn.metrics import f1_score, recall_score, precision_score, accuracy_score
return (accuracy_score(a, b),
recall_score(a, b),
precision_score(a, b),
f1_score(a, b))
def performance(colgate=None):
colgate = colgate if colgate is not None else Sentencizer()
nlp = English()
output = []
for test in glob("marked-*.txt"):
input = test.replace("marked-", "")
txt = open(input).read()
tokens = nlp(open(test).read())
hy_tokens = colgate(nlp(txt))
assert len(tokens) == len(hy_tokens)
y = [False] * len(tokens)
seen_period = False
for i, tok in enumerate(tokens):
is_in_punct_chars = tok.text in Sentencizer.default_punct_chars
if seen_period and not tok.is_punct and not is_in_punct_chars and not tok.is_space:
y[i] = True
seen_period = False
elif tok.is_punct and tok.text == "#":
seen_period = True
y = np.array(y, dtype=bool)
y[0] = True
hy = np.array([x.is_sent_start for x in hy_tokens])
_ = metrics(y, hy)
output.append((test, _, y.sum()))
return output
if __name__ == "__main__":
from hw2 import ColgateSBD
from glob import glob
from spacy.lang.en import English
output = performance(ColgateSBD())
for input, perf, n_sent in output:
print("Input:", input, perf, "Number of sentences:", n_sent)
print("*" * 5, "Sentencizer", "*" * 5)
output = performance()
for input, perf, n_sent in output:
print("Input:", input, perf, "Number of sentences:", n_sent)
| 31.344828 | 95 | 0.608361 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 117 | 0.064356 |
93706b196b7d6d2ef7011340df5c93edb90333c5 | 6,893 | py | Python | phonebook02/contact.py | pgThiago/saving-in-txt-python | dda28fb3c8033eb16650e630a579bc66e475465f | [
"MIT"
] | 1 | 2020-08-15T23:22:44.000Z | 2020-08-15T23:22:44.000Z | phonebook02/contact.py | pgThiago/saving-in-txt-python | dda28fb3c8033eb16650e630a579bc66e475465f | [
"MIT"
] | null | null | null | phonebook02/contact.py | pgThiago/saving-in-txt-python | dda28fb3c8033eb16650e630a579bc66e475465f | [
"MIT"
] | null | null | null | from os import path
from operator import itemgetter
from time import sleep
class Contact:
'''def __init__(self, name = ' ', phone = ' ', birthday = ' '):
self.name = name
self.phone = phone
self.birthday = birthday'''
def check_if_txt_exists(self):
'''Checks if txt exists and if not just create it'''
if path.exists('contacts.txt') == True:
return True
else:
file = open('contacts.txt', 'w')
file.close()
def is_empty(self):
'''checks if txt is empty. Return True if it is empty'''
file = open('contacts.txt', 'r')
lines = file.readlines()
if not lines:
return True
else:
return False
def add_to_list(self, lis, di):
'''Add the dict to the list
lis: list_of_contacts in main.py
di: dicionary in main.py
'''
lis.append(di.copy())
def add_to_dictionary(self, di, lis, name, phone, birthday):
'''Add name, phone and birthday to the dictionary'''
di["Name"] = self.name
di["Phone"] = self.phone
di["Birthday"] = self.birthday.strip('\n')
self.add_to_list(lis, di)
def your_contacts(self, lis, di):
'''Show all contacts in txt to the user'''
self.get_datas_from_txt(lis, di)
# "Final list" to prevent duplicated items
final_list = []
for item in sorted(lis, key=itemgetter('Name')):
if item not in final_list:
final_list.append(item)
if len(final_list) == 1:
one_contact = f'{len(final_list)} CONTACT'
print(f'{one_contact:>74}')
else:
lot_of_contacts = f'{len(final_list)} CONTACTS'
print(f'{lot_of_contacts:>75}')
sleep(0.3)
yc = 'YOUR CONTACTS LIST'
print(f'{yc:>83}')
print('=-=' * 50)
for person in sorted(final_list, key=itemgetter('Name')):
sleep(0.3)
txt = f'Name: {person["Name"]:<50} Phone: {person["Phone"]:<50} Birthday: {person["Birthday"]:>10}'
print(f'{txt:^150}')
print('=-=' * 50)
def get_datas_from_txt(self, lis, di):
'''Gets all contacts from txt'''
file = open('contacts.txt','r')
lines = file.readlines()
for line in lines:
line_split = line.strip('\n')
line_split = line.split('=%$#=')
self.name = ''.join(line_split[0])
self.phone = ''.join(line_split[1])
self.birthday = ''.join(line_split[2]).strip('\n')
self.add_to_dictionary(di, lis, self.name, self.phone, self.birthday)
def send_to_txt(self, lis):
''' Send contacts to txt '''
final_list = []
for item in sorted(lis, key=itemgetter('Name')):
if item not in final_list:
final_list.append(item)
file = open('contacts.txt', 'w')
for person in sorted(final_list, key=itemgetter('Name')):
string = (f'{person["Name"]}=%$#={person["Phone"]}=%$#={person["Birthday"]}').strip()
file.write(f'{string}\n')
# add, delete, change and search methods
# ===================================================================================== #
def add(self, lis, di):
''' Add a new contact to txt '''
self.get_datas_from_txt(lis, di)
self.name = input('{:>70}'.format('Name: ')).upper().strip()
self.phone = input('{:>71}'.format('Phone: '))
self.birthday = input('{:>105}'.format('Birthday (dd/mm/yyyy) please add "/ /" : ')).strip('\n')
self.add_to_dictionary(di, lis, self.name, self.phone, self.birthday)
self.send_to_txt(lis)
def delete(self, lis, di, phone_that_will_be_deleted):
''' check if contact exists '''
self.get_datas_from_txt(lis, di)
for con in sorted(lis, key=itemgetter('Name')):
if con["Phone"] == phone_that_will_be_deleted:
return True
else:
return False
def del_contact(self, lis, di, phone_that_will_be_deleted):
''' Delete a contact from txt '''
self.get_datas_from_txt(lis, di)
if self.delete(lis, di, phone_that_will_be_deleted) == True:
for con in sorted(lis, key=itemgetter('Name')):
if con["Phone"] == phone_that_will_be_deleted:
ind = lis.index(con)
del lis[ind]
self.send_to_txt(lis)
def change(self, lis, di, ch_phone_number, ch_to_change):
''' Change a contact '''
# ch_phone_numer is the number of contact
# ch_to_change is the name of contact
self.get_datas_from_txt(lis, di)
for con in sorted(lis, key=itemgetter('Name')):
if con["Phone"] == ch_phone_number:
return True
else:
return False
def add_to_dic_after_calling_change_function(self, lis, di, ch_phone_number, ch_to_change):
if self.change(lis, di, ch_phone_number, ch_to_change) == True:
for con in sorted(lis, key=itemgetter('Name')):
if con["Phone"] == ch_phone_number:
ind = lis.index(con)
del lis[ind]
newName = input('{:>74}'.format('New name: ')).upper().strip()
newPhone = input('{:>75}'.format('New phone: ')).strip()
newBirthday = input('{:>105}'.format('Birthday (dd/mm/yyyy) please add "/ /" : ')).strip('\n')
di["Name"] = newName
di["Phone"] = newPhone
di["Birthday"] = newBirthday
self.add_to_list(lis, di)
self.send_to_txt(lis)
def search(self, lis, di, sear):
''' Search a contact in txt '''
self.get_datas_from_txt(lis, di)
final_list = []
for item in sorted(lis, key=itemgetter('Name')):
if item not in final_list:
final_list.append(item)
for con in sorted(final_list, key=itemgetter('Name')):
if con["Name"] == sear:
return True
else:
return False
def show_wanted_contact(self, lis, di, sear):
# Final list to not print duplicated items
final_list = []
for item in sorted(lis, key=itemgetter('Name')):
if item not in final_list:
final_list.append(item)
if self.search(lis, di, sear) == True:
for con in sorted(final_list, key=itemgetter('Name')):
if con["Name"] == sear:
fo = f'Here it is ====>>>> | Name: {con["Name"]} Phone: {con["Phone"]} Birthday: {con["Birthday"]} |'
print(f'{fo:>75}')
| 38.943503 | 123 | 0.526041 | 6,815 | 0.988684 | 0 | 0 | 0 | 0 | 0 | 0 | 1,868 | 0.271 |
93717d5c39265a03944d975e47f65b92a746680b | 84 | py | Python | src/vpnchooser/__init__.py | cbrand/vpnchooser | 0982baeb14eff1e0b7ef5dbc8f11b3a7213f341c | [
"MIT"
] | null | null | null | src/vpnchooser/__init__.py | cbrand/vpnchooser | 0982baeb14eff1e0b7ef5dbc8f11b3a7213f341c | [
"MIT"
] | null | null | null | src/vpnchooser/__init__.py | cbrand/vpnchooser | 0982baeb14eff1e0b7ef5dbc8f11b3a7213f341c | [
"MIT"
] | 1 | 2016-05-31T16:14:37.000Z | 2016-05-31T16:14:37.000Z | # -*- encoding: utf-8 -*-
from .applicaton import app, api
from . import resources
| 16.8 | 32 | 0.678571 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 25 | 0.297619 |
9371f372d231442d071f230b9b470e2409d71503 | 3,157 | py | Python | scripts/pughpore/passagetime-simple.py | jhwnkim/nanopores | 98b3dbb5d36464fbdc03f59d224d38e4255324ce | [
"MIT"
] | 8 | 2016-09-07T01:59:31.000Z | 2021-03-06T12:14:31.000Z | scripts/pughpore/passagetime-simple.py | jhwnkim/nanopores | 98b3dbb5d36464fbdc03f59d224d38e4255324ce | [
"MIT"
] | null | null | null | scripts/pughpore/passagetime-simple.py | jhwnkim/nanopores | 98b3dbb5d36464fbdc03f59d224d38e4255324ce | [
"MIT"
] | 4 | 2017-12-06T17:43:01.000Z | 2020-05-01T05:41:14.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
# (c) 2017 Gregor Mitscha-Baude
# TODO: obtain rD from actual simulation
from nanopores import fields, kT, eta, qq, savefigs
from numpy import exp, pi, sqrt, linspace, diff, array, dot
L = 46e-9 # length of pore
r = 2.0779e-9 # radius of protein trypsin
V = 0.08 # applied potential
E = V/L # electric field
rD = 0.2
#D = rD* kT/(6.*pi*eta*r) # diffusion constant (Stokes)
# load translocation events without binding
name = "events3_nobind_new"
fields.set_dir_mega()
data = fields.get_fields(name)
# take only events that translocated
data.pop("b1")
data.pop("b2")
data, _ = fields._subset(data, data["ood"], lambda x: x==0)
data, times = fields._sorted(data, data["t"])
print "mean"
D = array(data["Dzavg"]).mean() * 1e-9
F = -array(data["Fzavg"]).mean()
v = D/kT * F # electrophoretic velocity
print "D = ", D, "F = ", F, "v = ", v
print "at x = (0,0,0)"
D = 6.8e-12
F = 1.5e-11
v = D/kT * F # electrophoretic velocity
print "D = ", D, "F = ", F, "v = ", v
def mean(lst):
return sum(lst)/float(len(lst))
def maximum_likelihood(times, n=10):
times = 1e-3 * array(times)
T = mean(times)
Tinv = mean([1./t for t in times])
def amean(v):
return mean([1./(1. + L/(v*t)) for t in times])
def fix(v):
a = amean(v)
factor = (sqrt((a-.5)**2 + T*Tinv*a*(1-a)) - (a-.5))/(1-a)
print a
#print factor
return L/T * factor
v = L*sqrt(Tinv/T) # this initial guess is accurate to 1e-7!!
for i in range(n):
v0 = v
#print "i = %d: v = %s" % (i, v)
v = fix(v)
print "i = %d: dv = %s" % (i, abs(v-v0))
D = v**2/2.*T - v*L + L**2/2.*Tinv
return v, D
v, D = maximum_likelihood(times)
print "maximum likelihood"
print "D = ", D, "F = ", v*kT/D, "v = ", v
# simple 1D model from Talaga2009
def p(t, timescale=1.):
# timescale: 1 -> s, 1e-3 -> ms etc
t *= timescale
return exp(-(L - t*v)**2/(4.*t*D)) * (L + t*v) / (4.*t * sqrt(pi*t*D))
def pp(times, timescale=1.):
return array([p(t, timescale) for t in times])
def integrate(t, pt):
pt = array(pt)
dt = diff(t)
values = 0.5*(pt[:-1] + pt[1:])
return dot(values, dt)
def integrate_hist(hist):
n, bins, _ = hist
dt = diff(bins)
return dot(n, dt)
# scale times
scale = 1e-6 # microseconds
times = [t*1e-3/scale for t in times]
from matplotlib import pyplot as plt
t = linspace(1e-9/scale, 8e-6/scale, 500)
hist = plt.hist(times, bins=30, color="#aaaaff", linewidth=0.5,
weights=[1./500.]*len(times),
label="BD simulations")
pt = pp(t, scale) * integrate_hist(hist) * scale
plt.plot(t, pt, "-", color="g", linewidth=3, label="FPT model")
plt.legend(loc="upper right", frameon=False)
plt.xlabel(u"dwell time [µs]")
plt.ylabel(u"rel. frequency")
print "integral", integrate_hist(hist), "==", integrate(t, pt)
#plt.figure()
#plt.hist(data["Fzavg"], bins=30, color="#aaaaff", linewidth=0.5)
#
#plt.figure()
#plt.hist(data["Dzavg"], bins=30, color="#aaaaff", linewidth=0.5)
from folders import FIGDIR
savefigs("current-nobind-hist", FIGDIR + "/rw")
| 25.877049 | 74 | 0.596136 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 991 | 0.313806 |
9372415bb53340040af72936577c024343d4574e | 15,605 | py | Python | IMU_algorithms/record_data.py | nesl/UnderwaterSensorTag | c0bf499b4bd7588d74bb2a0228f077d1fc38e7f5 | [
"BSD-3-Clause"
] | null | null | null | IMU_algorithms/record_data.py | nesl/UnderwaterSensorTag | c0bf499b4bd7588d74bb2a0228f077d1fc38e7f5 | [
"BSD-3-Clause"
] | null | null | null | IMU_algorithms/record_data.py | nesl/UnderwaterSensorTag | c0bf499b4bd7588d74bb2a0228f077d1fc38e7f5 | [
"BSD-3-Clause"
] | 1 | 2019-03-01T00:48:59.000Z | 2019-03-01T00:48:59.000Z | from modules.mpulib import computeheading, attitudefromCompassGravity, RP_calculate, MadgwickQuaternionUpdate, Euler2Quat, quaternion_to_euler_angle, MPU9250_computeEuler
import socket, traceback
import csv
import struct
import sys, time, string, pygame
import pygame
import pygame.draw
import pygame.time
import numpy as np
from math import sin, cos, acos
from modules.euclid import Vector3, Quaternion
from modules.EuclidObjects import Cube, Screen, Grid, PerspectiveScreen
import math
# from pygame.locals import *
# from ponycube import *
from modules.madgwickahrs import *
import modules.quaternion
from modules.quaternion import QuaternionClass
from modules.a3muse import androidAccMag2Euler, qnormalized, quatNormalized, IntegrationRK4, EulerToQuat, AccMagOrientation, headingfromMag, QuatToEuler, angle_between, QuatToRotMat, AxisAngleToRotMat, RotMatToQuat, AccMag2Euler
from math import atan2, atan
from numpy.linalg import inv
from numpy import linalg as LA
# import euclid
import serial
ser = serial.Serial('/dev/tty.usbmodem14411')
ser.baudrate = 115200
ser.timeout = 3
prev_time = 0
filename = open('/Users/eunsunlee/Documents/NESL/UnderwaterSensorTag/IMU_algorithms/optitrack/imu_movement.txt','w')
# offset_mx = 77.345
# offset_my = -13.725
# offset_mz = -71.64
# scale_mx = 1.1
# scale_my = 1.13
# scale_mz = 0.827
# LAB
offset_mx = 71.12
offset_my = -30.385
offset_mz = -66.24
scale_mx = 1.210645853980839
scale_my = 1.1778152745972439
scale_mz = 0.7547368963031613
dt = 1/10
visualIMU = False
if visualIMU:
pygame.init()
screen = Screen(1600,400,scale=1.5)
cube1 = Cube(40,30,60)
cube2 = Cube(40,30,60)
cube3 = Cube(40,30,60)
cube4 = Cube(40,30,60)
cube5 = Cube(40,30,60)
q1 = Quaternion(1,0,0,0)
q2 = Quaternion(1,0,0,0)
q3 = Quaternion(1,0,0,0)
q4 = Quaternion(1,0,0,0)
q5 = Quaternion(1,0,0,0)
p1 = Vector3(-400,0,0)
p2 = Vector3(-200,0,0)
p3 = Vector3(0,0,0)
p4 = Vector3(200,0,0)
p5 = Vector3(400,0,0)
incr = Quaternion(0.96,0.01,0.01,0).normalized()
cube1.erase(screen)
cube1.draw(screen,q1,p1)
cube2.erase(screen)
cube2.draw(screen,q2,p2)
cube3.erase(screen)
cube3.draw(screen,q3,p3)
cube4.erase(screen)
cube4.draw(screen,q4,p4)
cube5.erase(screen)
cube5.draw(screen,q5,p5)
# Madgwick
Imupredict = MadgwickAHRS();
Imupredict2 = MadgwickAHRS();
# A3
omega0 = [0,0,0]
similaritywindowA3 = 0
Sc = []
Sg = []
C = []
G = []
Eg = 0
quatA3 = QuaternionClass(1, 0, 0, 0)
quatMuseAlg = QuaternionClass(1, 0, 0, 0)
similaritywindowMUSE = 0
initial = 0
update = 0
Ax = []
Ay = []
Az = []
beta = 0.80
quat = QuaternionClass(1,0,0,0)
# 1 Hz - 1000
# 10 Hz - 100
while True:
reading = ser.readline()
print(reading, file = filename)
# print(reading)
sp = str(reading).split(',')
# print(sp)
time = float(sp[0][2:].strip())
# reads in g so multiply by 9.8
ax = float(sp[1].strip())
ay = float(sp[2].strip())
az = float(sp[3].strip())
ax = ax*9.8
ay = ay*9.8
az = az*9.8
gx = float(sp[4].strip())*math.pi/180 #rad/s
gy = float(sp[5].strip())*math.pi/180 #rad/s
gz = float(sp[6].strip())*math.pi/180 #rad/s
#uT
mx = float(sp[7].strip())
my = float(sp[8].strip())
mz = float(sp[9].strip())
mx = mx - offset_mx
my = my - offset_my
mz = mz - offset_mz
mx = mx*scale_mx
my = my*scale_my
mz = mz*scale_mz
qw = float(sp[10].strip())
qx = float(sp[11].strip())
qy = float(sp[12].strip())
qz = float(sp[13].strip())
pitch = float(sp[14].strip())
roll = float(sp[15].strip())
yaw = float(sp[16].strip())
dq = QuaternionClass(0,0,-1,0)
# print("yaw, pitch, roll: ", yaw, pitch, roll)
heading = float(sp[17].split('\\r')[0].strip())
# print("heading: ", heading)
# print(computeheading(mx,my))
# print(yaw, pitch, roll)
accel = [ax, ay, az]
gyro = [gx, gy, gz]
mag = [mx, my, mz]
# print(accel)
a333 = 0
# yawAM, pitchAM, rollAM, quatAM = AccMagOrientation(accel, mag)
# print("ypr: ", yaw, pitch, roll)
# print("ypr: ", yawAM, pitchAM, rollAM)
# print("heading: ", heading)
# print(headingM)
# time_diff = 60
if visualIMU: #quaternion from imu
# yellow area facing straight if imu hold with usbside facing me
# print("yaw: ", yaw)
# q1w = float(sp[10].strip())
# q1x = float(sp[11].strip())
# q1z = -float(sp[12].strip())
# q1y = float(sp[13].split('\\r')[0].strip())
# quatMDP = QuaternionClass(q1w, q1x, q1y, q1z)
# rollMDP, pitchMDP, yawMDP = QuatToEuler(quatMDP)
# print("yawMDP: ", yawMDP)
# quat = QuaternionClass(qw, qx, qy, -qz) *dq
q1.w = quat[0]
q1.x = quat[1]
q1.z = quat[3]
q1.y = quat[2]
q1 = q1.normalized()
cube1.erase(screen)
cube1.draw(screen,q1,p1)
# print("yaw: ", yaw )
if visualIMU: # Madgwick Algorithm
Imupredict.samplePeriod = 0.025#0.1
Imupredict.update(gyro,accel,mag)
quatMad = Imupredict.quaternion
quatMad = qnormalized(quatMad)
Imupredict.quaternion = quatMad
#quatMad = quatNormalized(quatMad)
yawMad, pitchMad, rollMad = QuatToEuler(quatMad)
# print("yawMad: ", yawMad*180/math.pi)
quat = QuaternionClass(quatMad[0], quatMad[1], quatMad[3], quatMad[2])
q2.w = quat[0]
q2.x = quat[1]
q2.z = quat[3]
q2.y = quat[2]
q2 = q2.normalized()
cube2.erase(screen)
cube2.draw(screen,q2,p2)
if False:
# quat = MadgwickQuaternionUpdate(ax, ay, az, gx, gy, gz, mx, my, mz, quat)
# q5.w = quat[0]
# q5.x = quat[1]
# q5.z = -quat[2]
# q5.y = quat[3]
# q5 = q5.normalized()
# cube5.erase(screen)
# cube5.draw(screen,q5,p5)
yawT, pitchT, rollT, quatT = androidAccMag2Euler(accel, mag)
if yawT > 0:
yawT = 360 - yawT*180/math.pi
else:
yawT = -yawT*180/math.pi
# print("yaw: ",yawT)
q5.w = quatT[0]
q5.x = quatT[1]
q5.z = -quatT[2]
q5.y = quatT[3]
q5 = q5.normalized()
cube5.erase(screen)
cube5.draw(screen,q5,p5)
# Imupredict2.samplePeriod = 0.1
# Imupredict2.update_imu(gyro,accel)
# quatMad2 = Imupredict2.quaternion
# quatMad2 = qnormalized(quatMad)
# Imupredict2.quaternion = quatMad2
# q5.w = quatMad2[0]
# q5.x = quatMad2[1]
# q5.z = -quatMad2[2]
# q5.y = quatMad2[3]
# q5 = q5.normalized()
# cube5.erase(screen)
# cube5.draw(screen,q5,p5)
# https://stackoverflow.com/questions/32372847/android-algorithms-for-sensormanager-getrotationmatrix-and-sensormanager-getori/35390001#35390001
if visualIMU: #a3
q_a3 = 0
omega1 = [gx, gy, gz]
quatG = IntegrationRK4(omega0, omega1, quatA3, dt)
yawG, pitchG, rollG = QuatToEuler(quatG)
if yawG < 0:
yawG = -yawG*180/math.pi
else:
yawG = 360 - yawG*180/math.pi
# # print(yawG, pitchG, rollG)
omega0 = omega1
# # # A3 Algorithm - accelerometer, magnetometer calibration
# yawAM, pitchAM, rollAM, quatAM = AccMag2Euler(accel, mag)
yawAM, pitchAM, rollAM, quatAM = androidAccMag2Euler(accel, mag)
# # print(yawAM, pitchAM, rollAM)
# # # TODO: Update quaternion if w < 240 degree, a < 2g
w = max(abs(np.array(gyro)))*180/math.pi
a = max(abs(np.array(accel)))
# # # if w < 240 and a < 2*9.8:
# # # print("stable")
# # # else:
# # # print("moving")
# # headingM = headingfromMag(mag)
headingM = computeheading(mx, my)
# print("headingM:" , headingM)
# print("heading: ", headingM)
# print("yawG: ", yawG*180/math.pi)
# # print(headingM)
if similaritywindowA3 > 1:
# print("similaritywindow")
# calculate pc and pg
pc = 1/(2**np.var(np.subtract(Sc,C)))
pg = 1/(2**np.var(np.subtract(Sg,G)))
# print(pc)
# print(pg)
if pc > 0.2 and pg > 0.2:
print("change?")
# TODO: if Ec < Eg, then update quaternion
E1 = -32.14*pc + 19.93
E2 = -12.86*pg + 11.57
Ec = max(E1, E2)
Eg = (Eg + 0.0003*w*dt + 0.001*a*dt)*1000
#print(Ec)
#print(Eg)
if Ec < Eg*1000:
# print(a333)
a333 = a333 + 1
print("A3 reset ")
q_a3 = 1
#quatA3 = quatAM
# # quat = quatAM
# reset values
similaritywindowA3 = 0
C = []
Sc = []
Sg = []
G = []
Eg = 0
else:
# #TODO: update Eg
Eg = Eg + 0.0003*w*dt + 0.001*a*dt
C.append(yawAM)
Sc.append(yawG)
Sg.append(rollG)
G.append(rollAM)
similaritywindowA3 = similaritywindowA3 + dt
if q_a3:
quatA3 = quatAM #QuaternionClass(quatAM[0], quatAM[1], quatAM[2], quatAM[3])
# print("quatAM", quatAM)
else:
quatA3 = quatG
# print("quatG", quatG[0], quatG[1], quatG[2], quatG[3])
# print("quatA3", quatA3[0], quatA3[1], quatA3)
yawA3, pitchA3, rollA3 = QuatToEuler(quatA3)
# print("yawA3: ", yawA3*180/math.pi)
quatA3_temp = QuaternionClass(quatA3[0], quatA3[1], quatA3[3], -quatA3[2])
# quatA3 = quatA3_temp
q3.w = quatA3_temp[0]
q3.x = quatA3_temp[1]
q3.y = quatA3_temp[2]
q3.z = quatA3_temp[3]
q3 = q3.normalized()
cube3.erase(screen)
cube3.draw(screen,q3,p3)
if visualIMU: # MUSE
# # # Initial yaw, pitch, roll from Accelerometer and Magnetometer
#yawAM, pitchAM, rollAM, quatAM = AccMag2Euler(accel, mag)
yawAM, pitchAM, rollAM, quatAM = androidAccMag2Euler(accel, mag)
omega1 = [gx, gy, gz]
quatG = IntegrationRK4(omega0, omega1, quatMuseAlg, dt)
yawG, pitchG, rollG = QuatToEuler(quatG)
omega0 = omega1
headingM = computeheading(mx, my)
# headingM = headingfromMag(mag)
if initial < 30:
quatMuseAlg = quatAM
print("initial")
# O: orientation rotMat from quat
# O-1 : inverse of the rot Mat
# Calculate Ng = O*NL- Equation (1)
N_L = np.mat([[mx],[my],[mz]])
# print("N_L")
# print(N_L)
O = QuatToRotMat(quatAM)
N_G = O*N_L
# print("N_G")
# print(N_G)
initial = initial + 1
else:
quatMuseAlg = quatAM
# print("similaritywindow: ", similaritywindowMUSE)
if similaritywindowMUSE > 1:
# print("Ax: ", Ax)
# print("Ay: ", Ay)
# print("Az: ", Az)
aAx = abs(np.array(Ax))
aAy = abs(np.array(Ay))
aAz = abs(np.array(Az))
# print("Ax: ", aAx)
# print("Ay: ", aAy)
# print("Az: ", aAz)
agAx = aAx - 9.8
agAy = aAy - 9.8
agAz = aAz - 9.8
# print("agAx: ", agAx)
# print("agAy: ", agAy)
# print("agAz: ", agAz)
aagAx = abs(agAx)
aagAy = abs(agAy)
aagAz = abs(agAz)
# print("aagAx: ", aagAx)
# print("aagAy: ", aagAy)
# print("aagAz: ", aagAz)
x_max = max(aagAx)
y_max = max(aagAy)
z_max = max(aagAz)
# Ax = abs(abs(np.array(Ax))-9.8)
# Ay = abs(abs(np.array(Ax))-9.8)
# Az = abs(abs(np.array(Az))-9.8)
# # print(Az)
# # x_max = max([abs(max(Ax)), abs(min(Ax))])
# # y_max = max([abs(max(Ay)), abs(min(Ay))])
# # z_max = max([abs(max(Az)), abs(min(Az))])
# x_max = max(Ax)
# y_max = max(Ay)
# z_max = max(Az)
# print("x: ", x_max)
# print("y: ", y_max)
# print("z: ", z_max)
xyz_min = min([x_max, y_max, z_max])
# print(xyz_min)
# acceleration roughly measures 9.8m/s2
if xyz_min < 1:
print("yes, update quat with AM")
Oa = QuatToRotMat(quatAM)
Og = QuatToRotMat(quatG)
Ocomp = np.mat(Oa)*(1-beta) + np.mat(Og)*beta
# print("Oa")
# print(Oa)
# print("Og")
# print(Og)
# print("Ocomp")
# print(Ocomp)
quatComp = RotMatToQuat(np.array(np.mat(Ocomp)))
quatMuseAlg = quatComp
update = 1
# Update 3D magnetic vector estimation
N_L = np.mat([[mx],[my],[mz]])
# print("N_L")
# print(N_L)
O = QuatToRotMat(quatAM)
N_G = O*N_L
# reset values
similaritywindowMUSE = 0
Ax = []
Ay = []
Az = []
else:
Ax.append(ax)
Ay.append(ay)
Az.append(az)
similaritywindowMUSE = similaritywindowMUSE + dt
if update == 0:
O_hat = QuatToRotMat(quatG)
Oinv_hat = inv(O_hat)
N_L_hat = Oinv_hat * N_G
# print("N_L_hat")
# print(N_L_hat)
N_L = np.mat([[mx],[my],[mz]])
# print("N_L")
# print(N_L)
N_L_hat = np.array([np.array(N_L_hat)[0][0], np.array(N_L_hat)[1][0], np.array(N_L_hat)[2][0]])
N_L = np.array([mx, my, mz])
RotAxis = np.cross(N_L_hat, N_L)
RotAxis = RotAxis/LA.norm(RotAxis)
# print("RotAxis")
# print(RotAxis/LA.norm(RotAxis))
alpha = 0.01
RotAngle = angle_between(N_L_hat, N_L)
alphaRotAngle = alpha* RotAngle
deltaRotMat = AxisAngleToRotMat(RotAxis, alphaRotAngle)
Onew_hat = np.array(np.mat(inv(deltaRotMat))*np.mat(O_hat))
quatMUSE = RotMatToQuat(Onew_hat)
quatMUSE = quatNormalized(quatMUSE)
quatMuseAlg = QuaternionClass(quatMUSE[0], quatMUSE[1], quatMUSE[2], quatMUSE[3])
#print("update quat with MUSE")
update = 0
yawMUSE, pitchMUSE, rollMUSE = QuatToEuler(quatMuseAlg)
# print("yawMUSE: ", yawMUSE*180/math.pi)
q4.w = quatMuseAlg[0]
q4.x = quatMuseAlg[1]
q4.y = quatMuseAlg[3]
q4.z = -quatMuseAlg[2]
q4 = q4.normalized()
cube4.erase(screen)
cube4.draw(screen,q4,p4)
if visualIMU:
# quatDMP = QuaternionClass(qw, qx, qy, qz)
# yawDMP, pitchDMP, rollDMP = MPU9250_computeEuler(qw, qx, qy, qz)
# print("yprDMP: ", yawDMP, pitchDMP, rollDMP)
# # print("ypr: ", yaw, pitch, roll)
# quatDMP1 = Euler2Quat(yawDMP, pitchDMP, rollDMP)
# quatDMP = qnormalized(quatDMP)
# print("quatDMP: " , quatDMP[0], quatDMP[1], quatDMP[2], quatDMP[3])
# yawDMP, pitchDMP, rollDMP = quaternion_to_euler_angle(quatDMP[0], quatDMP[1], quatDMP[2], quatDMP[3])
# quatDMP1 = Euler2Quat(yawDMP, pitchDMP, rollDMP)
# quatDMP1 = qnormalized(quatDMP1)
# print("quatDMP1: ", quatDMP1[0], quatDMP1[1], quatDMP1[2], quatDMP1[3])
# print("ypr: ", yawDMP*180/math.pi)
# if yaw - 180 > 0 :
# yaw -= 360
# yaw *= math.pi/180
# if roll - 180 > 0 :
# roll -= 360
# roll *= math.pi/180
# if pitch - 180 > 0 :
# pitch -= 360
# pitch *= math.pi/180
# quatDMP = Euler2Quat(yaw, pitch, roll)
# quatDMP = qnormalized(quatDMP)
# q5.w = quatDMP1[0]
# q5.x = quatDMP1[1]
# q5.y = quatDMP1[3]
# q5.z = -quatDMP1[2]
# yawES = math.atan2(mx,my)
# rollES, pitchES = RP_calculate(accel)
# rollES = rollES
# yawES *= 180/math.pi
# if yawES < 0 :
# yawES += 360.0
# rollES *= 180/math.pi
# if rollES < 0 :
# rollES += 360.0
# pitchES *= 180/math.pi
# if pitchES < 0 :
# pitchES += 360.0
# print("yaw, yawES: ", yaw, yawES)
# print("roll, rollES: ", roll, rollES)
# print("pitch, pitchES: ", pitch, pitchES)
# rollES = rollES * 180/math.pi
# if rollES < 0:
# rollES = 360 + rollES
# rollES = (360 - rollES*180/math.pi)
# rollES = rollES * math.pi/180
# yawES = yawES*math.pi/180
# rollES = rollES*math.pi/180
# print("yawES: ", yawES)
#
# quatES = Euler2Quat(yaw*math.pi/180, pitch*math.pi/180, roll*math.pi/180)
# # quatES = Euler2Quat(yawES*math.pi/180, 0, 0)
# quatES = qnormalized(quatES)
# # print("quatES: ", quatES[0], quatES[1], quatES[2], quatES[3]) # 3 - yaw
# q5.w = quatES[0]
# q5.x = quatES[1]
# q5.z = -quatES[2]
# q5.y = quatES[3]
q5 = q5.normalized()
cube5.erase(screen)
cube5.draw(screen,q5,p5)
if visualIMU:
pygame.display.flip()
pygame.time.delay(0)
event = pygame.event.poll()
if event.type == pygame.QUIT \
or (event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE):
break
# print(time)
# print(time-prev_time)
# print(ax)
# print(ay)
# print(az)
# print(gx)
# print(gy)
# print(gz)
# print(mx)
# print(my)
# print(mz)
# sp = reading.split()
# print(float(sp[0][:-1]))
# print(sp[1].split(','))
# # print(float(sp[1][:-1]))
| 20.890228 | 228 | 0.614418 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6,534 | 0.418712 |
9373256a577124b3122a3c2bb32ac8963415c16e | 5,229 | py | Python | rest_api/views/doc_users.py | AktanKasymaliev/django_MyDentKg_backend | ede10d33028d36035b22e1e1dec916d773fecff6 | [
"MIT"
] | null | null | null | rest_api/views/doc_users.py | AktanKasymaliev/django_MyDentKg_backend | ede10d33028d36035b22e1e1dec916d773fecff6 | [
"MIT"
] | null | null | null | rest_api/views/doc_users.py | AktanKasymaliev/django_MyDentKg_backend | ede10d33028d36035b22e1e1dec916d773fecff6 | [
"MIT"
] | null | null | null | from rest_framework import generics
from rest_framework import response
from rest_framework.permissions import AllowAny, IsAuthenticated
from rest_framework.views import APIView
from rest_api.serializers.doc_serializers import (DoctorRegisterSerializer,
DoctorUsersSerializer, DoctorLoginSerializer, DoctorChangePasswordSerializer)
from rest_framework.response import Response
from rest_api.send_mail import password_reset_token_created, send_confirmation_email
from rest_framework import status
from doctorsUser.models import DoctorUser
from drf_yasg.utils import swagger_auto_schema
from rest_framework_simplejwt.views import TokenObtainPairView
from rest_framework_simplejwt.backends import TokenBackend
from rest_api.permissions import IsOwnerOrReadOnly
import jwt
from config.settings import SECRET_KEY
class DoctorUsersView(generics.ListAPIView):
queryset = DoctorUser.objects.all()
serializer_class = DoctorUsersSerializer
@swagger_auto_schema(operation_description='List doctor users (can add params(?search) for search)', tags=['Doctor User'],
security=[])
def get(self, request):
return self.list(request)
def get_queryset(self):
search = self.request.query_params.get("search")
query = super().get_queryset()
if search:
query = query.filter(fullname__icontains=search)
return query
else:
return query
class Doctor(generics.RetrieveAPIView):
serializer_class = DoctorUsersSerializer
queryset = DoctorUser.objects.all()
class DoctorUserRegisterView(generics.CreateAPIView):
serializer_class = DoctorRegisterSerializer
@swagger_auto_schema(operation_description='Registration doctor users', tags=['Doctor User'],
security=[])
def post(self, request):
serializer = DoctorRegisterSerializer(data=request.data)
if serializer.is_valid(raise_exception=True):
user = serializer.save()
if user:
send_confirmation_email(request, user)
return Response(serializer.data, status=status.HTTP_201_CREATED)
class DoctorLoginView(TokenObtainPairView):
serializer_class = DoctorLoginSerializer
permission_classes = [AllowAny,]
@swagger_auto_schema(operation_description='Login doctor users', tags=['Doctor User'],
security=[])
def post(self, request, *args, **kwargs):
return super().post(request, *args, **kwargs)
class DoctorChangePasswordView(generics.UpdateAPIView):
serializer_class = DoctorChangePasswordSerializer
model = DoctorUser
permission_classes = (IsOwnerOrReadOnly,)
def update(self, request, *args, **kwargs):
user = self.request.user
serializer: DoctorChangePasswordSerializer = self.get_serializer(data=request.data)
if serializer.is_valid():
if not user.check_password(serializer.data.get("old_password")):
return Response({"old_password": "Wrong password"}, status=status.HTTP_400_BAD_REQUEST)
user.set_password(serializer.data.get("new_password"))
user.save()
return Response({
'status': 'success',
'code': status.HTTP_200_OK,
'message': 'Password updated successfully',
'data': []
})
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class DoctorForgotPasswordView(APIView):
permission_classes = [IsOwnerOrReadOnly, ]
@swagger_auto_schema(operation_description='Reset password doctor users', tags=['Doctor User'],
security=[])
def get(self, request, *args, **kwargs):
password_reset_token_created(request)
return response.Response("Email was sended", status=status.HTTP_200_OK)
class DoctorInfo(generics.ListAPIView):
serializer_class = DoctorUsersSerializer
model = DoctorUser
permission_classes = [IsAuthenticated]
def get(self, request, *args, **kwargs):
token = request.META.get('HTTP_AUTHORIZATION', " ").split(' ')[1]
user_id = jwt.decode(token, SECRET_KEY, algorithms=["HS256"])["user_id"]
request.user = DoctorUser.objects.get(pk=user_id)
try:
data = {
"id": str(request.user.id),
"fullname": str(request.user.fullname),
"username": str(request.user.username),
"email": str(request.user.email),
"phone_number": str(request.user.phone_number),
"license_image": str(request.user.license_image),
"avatar": str(request.user.avatar),
"profession": str(request.user.profession),
"experience": str(request.user.experience),
"price": str(request.user.price),
"company": str(request.user.company),
"address": str(request.user.address),
"is_active": str(request.user.is_active),
}
return response.Response(data, status=200)
except Exception:
return response.Response("Login does not succeded", status=401) | 43.214876 | 126 | 0.673934 | 4,388 | 0.839166 | 0 | 0 | 1,223 | 0.233888 | 0 | 0 | 532 | 0.10174 |
937335c45d766563fe6c185b76d6ad2c9b3fea7a | 652 | py | Python | examples/plots/plot_quaternion_integrate.py | Mateus224/pytransform3d-1 | 26f1d39c5fa5f5c400fdabc7e58f645c7a35bee5 | [
"BSD-3-Clause"
] | null | null | null | examples/plots/plot_quaternion_integrate.py | Mateus224/pytransform3d-1 | 26f1d39c5fa5f5c400fdabc7e58f645c7a35bee5 | [
"BSD-3-Clause"
] | null | null | null | examples/plots/plot_quaternion_integrate.py | Mateus224/pytransform3d-1 | 26f1d39c5fa5f5c400fdabc7e58f645c7a35bee5 | [
"BSD-3-Clause"
] | null | null | null | """
======================
Quaternion Integration
======================
Integrate angular velocities to a sequence of quaternions.
"""
import numpy as np
import matplotlib.pyplot as plt
from pytransform3d.rotations import quaternion_integrate, matrix_from_quaternion, plot_basis
angular_velocities = np.empty((21, 3))
angular_velocities[:, :] = np.array([np.sqrt(0.5), np.sqrt(0.5), 0.0])
angular_velocities *= np.pi
Q = quaternion_integrate(angular_velocities, dt=0.1)
ax = None
for t in range(len(Q)):
R = matrix_from_quaternion(Q[t])
p = 2 * (t / (len(Q) - 1) - 0.5) * np.ones(3)
ax = plot_basis(ax=ax, s=0.15, R=R, p=p)
plt.show()
| 27.166667 | 92 | 0.654908 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 136 | 0.208589 |
9373d6bbb7c1cbcacd636bf497c2f411b745ae3a | 188 | py | Python | 8kyu/Beginner Series #2 Clock.py | walkgo/codewars_tasks | 4c0ab6f0e1d2181318fc15b12dd55ef565ecd223 | [
"MIT"
] | null | null | null | 8kyu/Beginner Series #2 Clock.py | walkgo/codewars_tasks | 4c0ab6f0e1d2181318fc15b12dd55ef565ecd223 | [
"MIT"
] | null | null | null | 8kyu/Beginner Series #2 Clock.py | walkgo/codewars_tasks | 4c0ab6f0e1d2181318fc15b12dd55ef565ecd223 | [
"MIT"
] | null | null | null | def past(h, m, s):
h_ms = h * 3600000
m_ms = m * 60000
s_ms = s * 1000
return h_ms + m_ms + s_ms
# Best Practices
def past(h, m, s):
return (3600*h + 60*m + s) * 1000 | 18.8 | 37 | 0.537234 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 16 | 0.085106 |
93753504d8f288b3efbdb0fba48e17b67530926a | 1,071 | py | Python | examples/experimental/gmsh_api_test.py | Karl-Eriksson/calfem-python | e9a88a85d3a73877ec99f7fbd1a296a44c3c9b22 | [
"MIT"
] | 54 | 2016-04-11T19:12:13.000Z | 2022-02-22T07:15:39.000Z | examples/experimental/gmsh_api_test.py | Karl-Eriksson/calfem-python | e9a88a85d3a73877ec99f7fbd1a296a44c3c9b22 | [
"MIT"
] | 13 | 2019-07-01T19:48:38.000Z | 2022-02-11T12:50:02.000Z | examples/experimental/gmsh_api_test.py | Karl-Eriksson/calfem-python | e9a88a85d3a73877ec99f7fbd1a296a44c3c9b22 | [
"MIT"
] | 273 | 2017-08-01T10:29:09.000Z | 2022-02-16T14:02:36.000Z | import gmsh
import sys
import numpy as np
import calfem.mesh as cfm
import calfem.vis_mpl as cfv
if __name__ == "__main__":
gmsh.initialize(sys.argv)
gmsh.model.add("t1")
gmsh.model.geo.add_point(0.0, 0.0, 0.0)
gmsh.model.geo.add_point(1.0, 0.0, 0.0)
gmsh.model.geo.add_point(1.0, 1.0, 0.0)
gmsh.model.geo.add_point(0.0, 1.0, 0.0)
gmsh.model.geo.add_line(1, 2)
gmsh.model.geo.add_line(2, 3)
gmsh.model.geo.add_line(3, 4)
gmsh.model.geo.add_line(4, 1)
gmsh.model.geo.add_curve_loop([1, 2, 3, 4], 1)
gmsh.model.geo.add_plane_surface([1], 1)
#gmsh.model.geo.add_surface_loop([1, 2, 3, 4])
gmsh.model.geo.synchronize()
#gmsh.option.setNumber("Mesh.ElementOrder", 5)
# gmsh.option.setNumber("Mesh.HighOrderOptimize", 2)
gmsh.option.setNumber("Mesh.RecombineAll", 1)
#gmsh.option.setNumber('Mesh.MeshSizeMin', 0.025)
#gmsh.option.setNumber('Mesh.MeshSizeMax', 0.025)
gmsh.model.mesh.generate(2)
print_entities()
#gmsh.write("t1.msh")
gmsh.fltk.run()
gmsh.finalize()
| 23.282609 | 56 | 0.660131 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 296 | 0.276377 |
9376046268e91238a8435a22fb73b4a25cba7b36 | 1,495 | py | Python | bandera.py | lauralardies/recursividad | 940e4c5fb1da00a88f955624409e4dc49150717d | [
"Apache-2.0"
] | null | null | null | bandera.py | lauralardies/recursividad | 940e4c5fb1da00a88f955624409e4dc49150717d | [
"Apache-2.0"
] | null | null | null | bandera.py | lauralardies/recursividad | 940e4c5fb1da00a88f955624409e4dc49150717d | [
"Apache-2.0"
] | null | null | null | # En este problema vamos a resolver el problema de la bandera de Dijkstra.
# Tenemos una fila de fichas que cada una puede ser de un único color: roja, verde o azul. Están colocadas en un orden cualquiera
# y tenemos que ordenarlas de manera que quede, de izquierda a derecha, los colores ordenados primero en rojo, luego verde y por
# último azul. La organización se obtiene mediante intercambios sucesivos, pero el color de la ficha sólo se comprueba una vez.
def intercambiar(bandera, x, y):
temporal = bandera[x]
bandera[x] = bandera[y]
bandera[y] = temporal
def permutar(bandera, i, j, k):
if k != j:
if bandera[j+1] == "R":
intercambiar(bandera, i+1, j+1)
permutar(bandera, i+1, j+1, k)
elif bandera[j+1] == "V":
permutar(bandera, i, j+1, k)
elif bandera[j+1] == "A":
intercambiar(bandera, j+1, k)
permutar(bandera, i, j, k-1)
else:
print("Has introducido una ficha inválida")
exit()
return bandera
bandera = [""]
n = int(input("Introduce el número de elementos que quieres en tu bandera a ordenar: "))
print("Ahora introduce las fichas aleatoriamente (ficha roja = R, ficha verde = V, ficha azul = A)")
for i in range (0, n):
ele = input()
ele = ele.capitalize()
bandera.append(ele)
bandera_ordenada = permutar(bandera, 0, 0, len(bandera) - 1)
bandera_ordenada.pop(0)
print("¡Esta es tu bandera ordenada!\n" + str(bandera_ordenada)) | 38.333333 | 129 | 0.650167 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 711 | 0.473054 |
9376fd70f43fc6254d25d8d9de6182ad6d1f30d5 | 32 | py | Python | tests/test_placeholder.py | symonk/stashie-cli | f18eec8a8e4bcb0b947a934e25b9a9f8066335fc | [
"Apache-2.0"
] | null | null | null | tests/test_placeholder.py | symonk/stashie-cli | f18eec8a8e4bcb0b947a934e25b9a9f8066335fc | [
"Apache-2.0"
] | null | null | null | tests/test_placeholder.py | symonk/stashie-cli | f18eec8a8e4bcb0b947a934e25b9a9f8066335fc | [
"Apache-2.0"
] | null | null | null | def test_placeholder():
...
| 10.666667 | 23 | 0.59375 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
9378f025d68dbd651d584bdfea07d35e9c5e3bf2 | 1,860 | py | Python | src/tests/test_download.py | dschon/rcp2 | b8a666d6a108c006aa1624944172af1f77bc0383 | [
"MIT"
] | 10 | 2019-02-22T03:48:16.000Z | 2022-02-06T00:23:53.000Z | src/tests/test_download.py | dschon/rcp2 | b8a666d6a108c006aa1624944172af1f77bc0383 | [
"MIT"
] | 82 | 2019-02-19T20:28:00.000Z | 2022-03-20T16:55:45.000Z | src/tests/test_download.py | dschon/rcp2 | b8a666d6a108c006aa1624944172af1f77bc0383 | [
"MIT"
] | 30 | 2019-02-18T17:25:31.000Z | 2022-01-22T14:38:29.000Z | import pytest
import responses
from src.data import download
def declare_action(fname, action, pooch):
"""Declare the download action taken.
This function helps us know if ``src.data.download.fetch`` downloaded a
missing file, fetched an available file, or updated on old file.
Args:
fname (str): The file name.
action (str): "download", "fetch", or "update".
pooch (pooch.Pooch): The caller.
"""
return action
source = {
"fname": "data.txt",
"url": "https://my-data-source.com/data.txt",
"known_hash": "5891b5b522d5df086d0ff0b110fbd9d21bb4fc7163af34d08286a2e846f6be03",
"processor": declare_action,
}
@responses.activate
def test_fetch_download(tmp_path):
"""Download a file when no local copy exists."""
responses.add(method=responses.GET, url=source["url"], body="hello\n")
assert download.fetch(**source, path=tmp_path) == "download"
@responses.activate
def test_fetch_fetch(tmp_path):
"""Fetch a local file when it exists."""
responses.add(method=responses.GET, url=source["url"], body="hello\n")
assert download.fetch(**source, path=tmp_path) == "download"
assert download.fetch(**source, path=tmp_path) == "fetch"
@responses.activate
def test_fetch_update(tmp_path):
"""Update a local file when it is out of date."""
responses.add(method=responses.GET, url=source["url"], body="hello\n")
with open(tmp_path / "data.txt", "w") as f:
f.write("goodbye\n")
assert download.fetch(**source, path=tmp_path) == "update"
@responses.activate
def test_fetch_corrupt(tmp_path):
"""Raise an error for a download with an unrecognized checksum."""
responses.add(method=responses.GET, url=source["url"], body="goodbye\n")
with pytest.raises(ValueError):
download.fetch(**source, path=tmp_path)
| 31.525424 | 85 | 0.67957 | 0 | 0 | 0 | 0 | 1,151 | 0.618817 | 0 | 0 | 812 | 0.436559 |
937962d2ebcdc2a6b9460ca91cb7e76e7ac6b49e | 1,434 | py | Python | what.py | manastech/de-bee | 32d4084b7d765b2766a7b8b947e896bb6f81beb9 | [
"MIT"
] | 1 | 2015-07-30T07:37:20.000Z | 2015-07-30T07:37:20.000Z | what.py | manastech/de-bee | 32d4084b7d765b2766a7b8b947e896bb6f81beb9 | [
"MIT"
] | null | null | null | what.py | manastech/de-bee | 32d4084b7d765b2766a7b8b947e896bb6f81beb9 | [
"MIT"
] | null | null | null | from google.appengine.ext import webapp
from wsgiref.handlers import CGIHandler
from model import Membership
from model import Group
from model import Transaction
class WhatHandler(webapp.RequestHandler):
def get(self):
page = self.request.get('p');
if page is None or page == '':
page = 1
else:
page = int(page)
offset = (page - 1) * 20
if page != 1:
self.response.out.write("<a href=\"?p=%s\">Previous</a> | " % (page - 1))
self.response.out.write(" %s " % page)
self.response.out.write(" | <a href=\"?p=%s\">Next</a>" % (page + 1))
self.response.out.write("<br/><br/>")
self.response.out.write("<ul>")
for tr in Transaction.gql("ORDER BY date DESC LIMIT %s, %s" % (offset, 20)):
try:
self.response.out.write("<li>In %s: %s <b>%s</b> %s ($%s due to \"%s\", %s)</li>" % (
tr.group.name,
tr.fromMember.userNick,
tr.type,
tr.toMember.userNick,
tr.amount,
tr.reason,
tr.date))
except:
self.response.out.write("<li style=\"color:blue\">Group must have been deleted...</li>")
self.response.out.write("</ul>")
def main():
application = webapp.WSGIApplication([
('/what', WhatHandler),
], debug=True)
CGIHandler().run(application) | 30.510638 | 93 | 0.535565 | 1,040 | 0.725244 | 0 | 0 | 0 | 0 | 0 | 0 | 276 | 0.192469 |
937c9982a5f43646604b5753ad9ccca9aeb1f6d9 | 1,191 | py | Python | semana_02/desafios/python-6/main.py | alexaldr/AceleraDev-Python | f655b31f5d672f617c43282dc91a19f761845f84 | [
"MIT"
] | null | null | null | semana_02/desafios/python-6/main.py | alexaldr/AceleraDev-Python | f655b31f5d672f617c43282dc91a19f761845f84 | [
"MIT"
] | 23 | 2021-03-19T04:59:51.000Z | 2022-02-10T15:15:12.000Z | semana_02/desafios/python-6/main.py | alexaldr/AceleraDev-Python | f655b31f5d672f617c43282dc91a19f761845f84 | [
"MIT"
] | null | null | null | from abc import ABCMeta, abstractmethod
class Department:
def __init__(self, name, code):
self.name = name
self.code = code
class Employee(metaclass=ABCMeta):
def __init__(self, code, name, salary, department):
self.code = code
self.name = name
self.salary = salary
self._department = department # _ = protected, __ = private
@abstractmethod
def calc_bonus(self):
pass
def get_hours(self):
return 8
def get_departament(self):
return self._department.name
def set_departament(self, name):
self._department.name = name
class Manager(Employee):
def __init__(self, code, name, salary):
super().__init__(code, name, salary, Department('managers', 1))
def calc_bonus(self):
return self.salary * 0.15
class Seller(Employee):
def __init__(self, code, name, salary):
super().__init__(code, name, salary, Department('sellers', 2))
self._sales = 0
def calc_bonus(self):
return self._sales * 0.15
def get_sales(self):
return self._sales
def put_sales(self, value):
self._sales += value
| 22.903846 | 71 | 0.622166 | 1,139 | 0.956339 | 0 | 0 | 54 | 0.04534 | 0 | 0 | 48 | 0.040302 |
937e0e4be5d04ed65cc13c12fe0380f70dc970f4 | 3,130 | py | Python | train.py | JackwithWilshere/FashionAI- | 9a7a30aa12035418c835cdb4e05641cdb7a456b3 | [
"MIT"
] | 5 | 2019-04-03T07:32:39.000Z | 2021-12-23T06:48:27.000Z | train.py | espectre/FashionAI | 9a7a30aa12035418c835cdb4e05641cdb7a456b3 | [
"MIT"
] | null | null | null | train.py | espectre/FashionAI | 9a7a30aa12035418c835cdb4e05641cdb7a456b3 | [
"MIT"
] | 4 | 2019-01-29T01:33:13.000Z | 2021-05-13T10:24:31.000Z | import torch
import torch.nn as nn
import torch.optim as optim
import torchvision.transforms as transforms
import os
from torch.autograd import Variable
import argparse
import numpy as np
from torch.optim.lr_scheduler import *
from model.resnet import resnet101
from data_pre.FashionAI import fashion
parser=argparse.ArgumentParser()
parser.add_argument('--workers',type=int,default=2)
parser.add_argument('--batchSize',type=int,default=64)
parser.add_argument('--nepoch',type=int,default=11)
parser.add_argument('--lr',type=float,default=0.001)
parser.add_argument('--gpu',type=str,default='7')
parser.add_argument('--attr',type=str,default='collar_design_labels')
opt=parser.parse_args()
print(opt)
os.environ["CUDA_VISIBLE_DEVICES"]=opt.gpu
transform_train=transforms.Compose([
transforms.Resize((256,256)),
transforms.RandomCrop((224,224)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.485,0.456,0.406),(0.229,0.224,0.225))
])
transform_val=transforms.Compose([
transforms.Resize((224,224)),
transforms.ToTensor(),
transforms.Normalize((0.485,0.456,0.406),(0.229,0.224,0.225))
])
trainset=fashion('/home/yhf/Challenge/FashionAI/STL_FashionAI/data/2base/Annotations/sum_labels.csv',transform_train,opt.attr,train=True)
trainloader=torch.utils.data.DataLoader(trainset,batch_size=opt.batchSize,shuffle=True,num_workers=opt.workers)
valset=fashion('/home/yhf/Challenge/FashionAI/STL_FashionAI/data/2base/Annotations/sum_labels.csv',transform_val,opt.attr,train=False)
valloader=torch.utils.data.DataLoader(valset,batch_size=opt.batchSize,shuffle=False,num_workers=opt.workers)
AttrNum={
"coat_length_labels":8,
"collar_design_labels":5,
"lapel_design_labels":5,
"neck_design_labels":5,
"neckline_design_labels":10,
"pant_length_labels":6,
"skirt_length_labels":6,
"sleeve_length_labels":9
}
model=resnet101(pretrained=True)
model.fc=nn.Linear(2048,AttrNum[opt.attr])
model.cuda()
optimizer=torch.optim.SGD(model.parameters(),lr=opt.lr,momentum=0.9,weight_decay=5e-4)
scheduler=StepLR(optimizer,step_size=3)
criterion=nn.CrossEntropyLoss()
criterion.cuda()
def train(epoch):
print('\nTrain Epoch:%d' % epoch)
scheduler.step()
model.train()
for batch_idx, (img,label) in enumerate(trainloader):
image=Variable(img.cuda())
label=Variable(label.cuda())
optimizer.zero_grad()
out=model(image)
loss=criterion(out,label)
loss.backward()
optimizer.step()
if batch_idx%20==0:
print("Epoch: %d [%d:%d] loss: %f" % (epoch,batch_idx,len(trainloader),loss.mean()))
def val(epoch):
print('\nTest Epoch:%d'%epoch)
model.eval()
total=0
correct=0
for batch_idx, (img,label) in enumerate(valloader):
image=Variable(img.cuda(),volatile=True)
label=Variable(label.cuda())
out=model(image)
_,predict=torch.max(out.data,1)
total+=image.size(0)
correct+=predict.eq(label.data).cpu().sum()
print("Acc:%f" % ((1.0*correct)/total))
for epoch in range(opt.nepoch):
train(epoch)
val(epoch)
torch.save(model.state_dict(),'ckp/model_task_%s.pth' % opt.attr)
| 32.947368 | 138 | 0.741534 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 532 | 0.169968 |
937e540fc45ceb0533afb18ba69d5a86a510db61 | 7,363 | py | Python | Salami/Player.py | markjoshua12/game-jam-2020 | 846dd052d649a609ab7a52ac0f4dcbeb71781c3b | [
"MIT"
] | 15 | 2020-04-17T12:02:14.000Z | 2022-03-16T03:01:34.000Z | Salami/Player.py | markjoshua12/game-jam-2020 | 846dd052d649a609ab7a52ac0f4dcbeb71781c3b | [
"MIT"
] | 9 | 2020-04-25T01:57:16.000Z | 2020-04-29T11:42:34.000Z | Salami/Player.py | markjoshua12/game-jam-2020 | 846dd052d649a609ab7a52ac0f4dcbeb71781c3b | [
"MIT"
] | 55 | 2020-04-17T12:01:11.000Z | 2021-12-28T10:14:02.000Z |
import arcade
import math
import LevelGenerator
import Textures
import Sounds
from Constants import TILE_SIZE, ROOM_WIDTH, ROOM_HEIGHT
from Mob import Mob
from Projectile import Projectile
class Player(Mob):
def __init__(self, x, y, keyboard):
self.keyboard = keyboard
self.movespeed = 2.5
self.jump_height = 4
self.jumping = False
self.max_attack_speed = 12
self.curr_attack_speed = 0
self.attack_dir = 0
self.curr_jump_height = 0
self.min_jump_height = 8
self.max_jump_height = 64
self.walk_count = 0
self.walk_frame_speed = 8
self.not_mirrored = True
self.curr_dash_frame = 0
self.dash_frame_speed = 12
self.dashing = False
self.crawling = False
self.curr_crawl_frame = 0
self.crawl_frame_speed = 16
self.health = 9
self.curr_invis_frame = 0
self.invis_frame = 150
# Textures
self.idle_texture = Textures.get_texture(0, 4)
self.idle_texture_mirrored = Textures.get_texture(0, 5)
self.walking_textures = Textures.get_textures(1, 4, 4)
self.walking_textures_mirrored = Textures.get_textures(1, 5, 4)
self.dash_textures = Textures.get_textures(5, 4, 3)
self.dash_textures_mirrored = Textures.get_textures(5, 5, 3)
self.crawl_textures = Textures.get_textures(7, 4, 4)
self.crawl_textures_mirrored = Textures.get_textures(7, 5, 4)
super().__init__(self.idle_texture, x, y)
def update(self):
speed_mult = 1
if self.keyboard.is_pressed("sprint"):
speed_mult = 2
if self.keyboard.is_pressed("dash"):
if not self.dashing:
self.change_y += 2
self.dashing = True
if self.keyboard.is_pressed("l"):
pass
# self.level.reset = True
if self.keyboard.is_pressed("down"):
self.change_y -= 0.1
self.crawling = True
speed_mult *= 0.5
else:
self.crawling = False
if self.keyboard.is_pressed("attack"):
if self.curr_attack_speed == 0:
extra_y_dir = 0
if self.keyboard.is_pressed("up"):
extra_y_dir = 4
elif self.keyboard.is_pressed("down"):
extra_y_dir = -4
attack_x = (self.change_x) * 4
attack_y = (self.change_y + extra_y_dir) * 3
attack_angle = int(math.atan2(attack_y, attack_x)/math.pi*180)
card = Projectile(
Textures.SPRITESHEET[3 + int((attack_angle % 360) / 45) + 16],
self.center_x,
self.center_y,
attack_x,
attack_y)
self.level.add_entity_to_list(card, self.level.entities)
self.curr_attack_speed = self.max_attack_speed
Sounds.play(Sounds.SHOOT)
if self.curr_attack_speed > 0:
self.curr_attack_speed -= 1
if self.keyboard.is_pressed("jump"):
if self.level.physics_engine.can_jump(1):
# if self.level.engine.can_jump(self, 1):
if not self.jumping:
Sounds.play(Sounds.JUMP)
self.level.physics_engine.jump(self.jump_height)
self.jumping = True
# elif self.level.engine.can_jump(self, -1):
elif self.level.physics_engine.can_jump(-1):
self.jumping = False
self.curr_jump_height = 0
if self.curr_jump_height > self.max_jump_height:
self.jumping = False
self.curr_jump_height = 0
elif self.curr_jump_height >= self.min_jump_height:
self.jumping = False
self.curr_jump_height = 0
if self.jumping:
self.change_y = self.jump_height
self.curr_jump_height += self.jump_height
if self.keyboard.is_pressed("left"):
self.change_x = -self.movespeed * speed_mult
elif self.keyboard.is_pressed("right"):
self.change_x = self.movespeed * speed_mult
else:
if self.change_x > 1:
self.change_x -= 1
self.not_mirrored = True
elif self.change_x < -1:
self.change_x += 1
self.not_mirrored = False
else:
self.change_x = 0
if self.dashing:
if self.change_x > 0:
self.change_x = self.movespeed * speed_mult * 1.5
elif self.change_x < 0:
self.change_x = -self.movespeed * speed_mult * 1.5
self.curr_dash_frame += 1
if self.curr_dash_frame >= self.dash_frame_speed * len(self.dash_textures):
self.curr_dash_frame = 0
self.dashing = False
elif self.crawling:
self.curr_crawl_frame += 1
if self.curr_crawl_frame >= self.crawl_frame_speed * len(self.crawl_textures):
self.curr_crawl_frame = 0
else:
self.walk_count += 1
if self.walk_count >= len(self.walking_textures) * self.walk_frame_speed:
self.walk_count = 0
if self.curr_invis_frame > 0 and self.curr_invis_frame % 12 < 6:
self.texture = Textures.get_texture(15, 15)
elif self.change_x > 0:
if self.dashing:
self.texture = self.dash_textures[self.curr_dash_frame // self.dash_frame_speed]
elif self.crawling:
self.texture = self.crawl_textures[self.curr_crawl_frame // self.crawl_frame_speed]
else:
self.texture = self.walking_textures[self.walk_count // self.walk_frame_speed]
# self.player_dir = True
elif self.change_x < 0:
if self.dashing:
self.texture = self.dash_textures_mirrored[self.curr_dash_frame // self.dash_frame_speed]
elif self.crawling:
self.texture = self.crawl_textures_mirrored[self.curr_crawl_frame // self.crawl_frame_speed]
else:
self.texture = self.walking_textures_mirrored[self.walk_count // self.walk_frame_speed]
# self.player_dir = False
else:
if self.not_mirrored:
if self.crawling:
self.texture = self.crawl_textures[0]
else:
self.texture = self.idle_texture
else:
if self.crawling:
self.texture = self.crawl_textures_mirrored[0]
else:
self.texture = self.idle_texture_mirrored
super().update()
def collided(self, entity, dx, dy):
super().collided(entity, dx, dy)
def hurt(self, damage, knockback):
if damage == 0:
return
if self.curr_invis_frame <= 0:
self.health -= damage
self.change_x += knockback
self.curr_invis_frame = self.invis_frame
Sounds.play(Sounds.HURT)
if self.health <= 0:
self.level.game_over = True
self.level.game_over_timer = 180 | 34.568075 | 108 | 0.561184 | 7,170 | 0.973788 | 0 | 0 | 0 | 0 | 0 | 0 | 229 | 0.031101 |
938003c63346f324cd22abdc408302c529c5d8bc | 5,127 | py | Python | app/main/forms/direct_award_forms.py | pocketstefan/digitalmarketplace-buyer-frontend | f4d27f03d5f3accb29eaa61e5ec8d9e5eb60c306 | [
"MIT"
] | null | null | null | app/main/forms/direct_award_forms.py | pocketstefan/digitalmarketplace-buyer-frontend | f4d27f03d5f3accb29eaa61e5ec8d9e5eb60c306 | [
"MIT"
] | null | null | null | app/main/forms/direct_award_forms.py | pocketstefan/digitalmarketplace-buyer-frontend | f4d27f03d5f3accb29eaa61e5ec8d9e5eb60c306 | [
"MIT"
] | null | null | null | from flask_wtf import FlaskForm
from wtforms.validators import DataRequired, Length, NumberRange, InputRequired, ValidationError
from dmutils.forms.fields import (
DMBooleanField,
DMDateField,
DMPoundsField,
DMStripWhitespaceStringField,
DMRadioField,
)
from dmutils.forms.validators import GreaterThan
from decimal import Decimal
class CreateProjectForm(FlaskForm):
save_search_selection = DMRadioField(
validators=[
InputRequired("Please choose where to save your search")
]
)
name = DMStripWhitespaceStringField(
"Name your search. A reference number or short description of what you want to buy makes a good name.",
)
def __init__(self, projects, **kwargs):
super().__init__(**kwargs)
self.save_search_selection.options = [{
"label": project["name"] or f"Untitled project {project['id']}",
"value": str(project["id"]),
} for project in projects]
self.save_search_selection.options.append({
"label": "Save a new search",
"value": "new_search",
"reveal": {
"question": self.name.label.text,
"hint": "100 characters maximum",
"name": self.name.name,
}
})
def validate_name(form, field):
if form.save_search_selection.data == "new_search":
try:
Length(min=1, max=100, message="Names must be between 1 and 100 characters")(form, field)
except ValidationError as e:
form.save_search_selection.options[-1]["reveal"]["error"] = e.args[0]
raise
class DidYouAwardAContractForm(FlaskForm):
YES = 'yes'
NO = 'no'
STILL_ASSESSING = 'still-assessing'
did_you_award_a_contract = DMRadioField(
"Did you award a contract?",
validators=[InputRequired(message="You need to answer this question.")],
options=[
{'value': YES, 'label': 'Yes'},
{'value': NO, 'label': 'No'},
{'value': STILL_ASSESSING, 'label': 'We are still assessing services'},
])
class WhichServiceWonTheContractForm(FlaskForm):
which_service_won_the_contract = DMRadioField(
"Which service won the contract?",
validators=[InputRequired(message="Please select the service that won the contract")],
)
def __init__(self, services, *args, **kwargs):
super(WhichServiceWonTheContractForm, self).__init__(*args, **kwargs)
self.which_service_won_the_contract.options = [{
"label": service["data"]["serviceName"],
"value": service["id"],
"hint": service["supplier"]["name"],
} for service in services['services']]
class TellUsAboutContractForm(FlaskForm):
INPUT_REQUIRED_MESSAGE = "You need to answer this question."
INVALID_DATE_MESSAGE = "Your answer must be a valid date."
INVALID_VALUE_MESSAGE = "Enter your value in pounds and pence using numbers and decimals only" \
", for example 9900.05 for 9900 pounds and 5 pence."
start_date = DMDateField(
"Start date",
validators=[
InputRequired(INPUT_REQUIRED_MESSAGE),
DataRequired(INVALID_DATE_MESSAGE),
],
)
end_date = DMDateField(
"End date",
validators=[
InputRequired(INPUT_REQUIRED_MESSAGE),
DataRequired(INVALID_DATE_MESSAGE),
GreaterThan("start_date", "Your end date must be later than the start date."),
],
)
value_in_pounds = DMPoundsField(
"Value",
validators=[
InputRequired(INPUT_REQUIRED_MESSAGE),
DataRequired(INVALID_VALUE_MESSAGE),
NumberRange(min=Decimal('0.01'), message=INVALID_VALUE_MESSAGE),
],
)
buying_organisation = DMStripWhitespaceStringField(
"Organisation buying the service",
hint="For example, National Audit Office or Lewisham Council",
validators=[
InputRequired(INPUT_REQUIRED_MESSAGE)
],
)
class WhyDidYouNotAwardForm(FlaskForm):
why_did_you_not_award_the_contract = DMRadioField(
"Why didn't you award a contract?",
options=[
{
"label": "The work has been cancelled",
"value": "work_cancelled",
"hint": "For example, because you no longer have the budget",
},
{
"label": "There were no suitable services",
"value": "no_suitable_services",
"hint": "The services in your search results did not meet your requirements",
},
],
validators=[InputRequired(message="Please select a reason why you didn't award a contract")]
)
class BeforeYouDownloadForm(FlaskForm):
user_understands = DMBooleanField(
"I understand that I cannot edit my search again after I export my results",
validators=[
InputRequired(message="Please confirm that you understand before you continue.")
],
)
| 33.953642 | 111 | 0.61771 | 4,755 | 0.927443 | 0 | 0 | 0 | 0 | 0 | 0 | 1,555 | 0.303296 |
938285bf3f7e333d883cb4b2288f0db4d00d5c14 | 5,093 | py | Python | dotstrings/parser.py | nickromano/dotstrings | c7d2a0771d9ba9c26902415524944aa603ad0271 | [
"MIT"
] | null | null | null | dotstrings/parser.py | nickromano/dotstrings | c7d2a0771d9ba9c26902415524944aa603ad0271 | [
"MIT"
] | null | null | null | dotstrings/parser.py | nickromano/dotstrings | c7d2a0771d9ba9c26902415524944aa603ad0271 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""Utilities for dealing with .strings files"""
import re
from typing import List, Match, Optional, TextIO, Tuple, Union
from dotstrings.dot_strings_entry import DotStringsEntry
_ENTRY_REGEX = r'^"(.+)"\s?=\s?"(.*)";$'
_ENTRY_PATTERN = re.compile(_ENTRY_REGEX)
_NS_ENTRY_REGEX = r'^(NS[^ ]+)\s?=\s?"(.*)";$'
_NS_ENTRY_PATTERN = re.compile(_NS_ENTRY_REGEX)
def load(file_details: Union[TextIO, str], encoding: Optional[str] = None) -> List[DotStringsEntry]:
"""Parse the contents of a .strings file from a file pointer.
:param file_details: The file pointer or a file path
:param encoding: The encoding the file is in
:returns: A list of `DotStringEntry`s
"""
# If it's a file pointer, read in the contents and parse
if not isinstance(file_details, str):
contents = "".join(file_details.readlines())
return loads(contents)
# It must have been a path instead then, so open the file, and parse
if encoding:
encoding_list = [encoding]
else:
encoding_list = ["utf-8", "utf-16-le", "utf-16-be"]
for encoding_option in encoding_list:
try:
with open(file_details, "r", encoding=encoding_option) as strings_file:
return load(strings_file)
except UnicodeDecodeError:
pass
raise Exception(f"Could not determine encoding for file at path: {file_details}")
def loads(contents: str) -> List[DotStringsEntry]:
"""Parse the contents of a .strings file.
Note: CRLF is not supported in strings.
:param contents: The contents of a .strings file
:returns: A list of `DotStringsEntry`s"""
# Sometimes we have CRLF. It's easier to just replace now. This could, in
# theory, cause issues, but we just don't support it for now.
if "\r\n" in contents:
raise Exception("Strings contain CRLF")
contents = contents.replace("\r\n", "\n")
# Let's split so that we have a single item and it's comments together
entries = contents.split('";')
# Remove any empty entries
entries = [entry for entry in entries if len(entry.strip()) > 0]
# Add the splitter back on the end
entries = [entry + '";' for entry in entries]
parsed_entries: List[DotStringsEntry] = []
# Now we can work on parsing them one by one
for entry in entries:
parsed_entries.append(_parse_entry(entry))
return parsed_entries
def _find_entry(entry_lines: List[str]) -> Tuple[int, Optional[Match]]:
"""Search for the entry in some entry lines
:param entry_lines: The lines for an entry from a .strings file (including comments)
:returns: A tuple with the index of the match and the match itself
"""
for index, line in enumerate(entry_lines):
# Naive checks to avoid doing a regex if we don't have to
if len(line) == 0:
continue
if len(line.strip()) == 0:
continue
if line.startswith(" "):
continue
match = _ENTRY_PATTERN.match(line)
if match:
return index, match
# We didn't match so try the NS entry one
match = _NS_ENTRY_PATTERN.match(line)
if match:
return index, match
return 0, None
def _parse_entry(entry: str) -> DotStringsEntry:
"""Parse a single entry in a .strings file and its comments.
:param entry: A single entry from a .strings file
:returns: A parsed entry value
:raises Exception: If we fail to parse the entry
"""
# pylint: disable=too-many-branches
lines = entry.split("\n")
entry_index, entry_match = _find_entry(lines)
# If we didn't find it, then that's a problem
if entry_match is None:
raise Exception("Failed to find key and value in entry:\n" + entry)
# We also expect it to be the last line, so if it's not, then that's a problem too
if entry_index != len(lines) - 1:
raise Exception("Found key and value in an unexpected position in entry:\n" + entry)
# We now have the key and value
key = entry_match.group(1)
value = entry_match.group(2)
# Just the comment to go
# We already know the key and value were on the last line, so let's drop it
lines = lines[:-1]
comment = ""
in_comment = False
for line in lines:
if not in_comment and "/*" in line:
in_comment = True
if not in_comment:
continue
if line.strip().startswith("/*"):
line = line.replace("/*", "")
if line.strip().endswith("*/"):
line = line.replace("*/", "")
comment += line.strip() + "\n"
if "*/" in line:
in_comment = False
# If we didn't find any comment, set it to None
if len(comment) == 0:
return DotStringsEntry(key, value, [])
comments = comment.split("\n")
comments = [comment.strip() for comment in comments]
comments = [comment for comment in comments if len(comment) > 0]
return DotStringsEntry(key, value, comments)
# pylint: enable=too-many-branches
| 28.452514 | 100 | 0.634793 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,147 | 0.421559 |
fa7502cb66cc41fddb3ababb88dbe745379e17f4 | 1,249 | py | Python | source/timeseries/single/linear.py | supercoder3000/py_tensorflow_experiments | 01aebf681df25286cf4503661148203a03309b04 | [
"MIT"
] | null | null | null | source/timeseries/single/linear.py | supercoder3000/py_tensorflow_experiments | 01aebf681df25286cf4503661148203a03309b04 | [
"MIT"
] | null | null | null | source/timeseries/single/linear.py | supercoder3000/py_tensorflow_experiments | 01aebf681df25286cf4503661148203a03309b04 | [
"MIT"
] | null | null | null | import tensorflow as tf
from data_types.training_result import TrainingResult
from data_types.training_set import TrainingSet
from timeseries.build import compile_and_fit
from timeseries.window_generator import WindowGenerator
def evaluate_linear(
training_set: TrainingSet
) -> TrainingResult:
## LINEAR
linear = tf.keras.Sequential([
tf.keras.layers.Dense(units=1)
])
single_step_window = WindowGenerator(
input_width=1,
label_width=1,
shift=1,
training_set=training_set,
label_columns=['T (degC)']
)
print('Input shape:', single_step_window.example[0].shape)
print('Output shape:', linear(single_step_window.example[0]).shape)
compile_and_fit(linear, single_step_window)
wide_window = WindowGenerator(
input_width=24,
label_width=24,
shift=1,
label_columns=['T (degC)'],
training_set=training_set
)
wide_window.plot(linear)
metric_index = linear.metrics_names.index('mean_absolute_error')
return TrainingResult(
performance=linear.evaluate(single_step_window.test, verbose=0)[metric_index],
validation_performance=linear.evaluate(single_step_window.val)[metric_index]
) | 28.386364 | 86 | 0.709367 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 79 | 0.063251 |
fa7568af8ff47dd33c5421737ba46eddde06e111 | 3,639 | py | Python | apps/sso/utils/email.py | g10f/sso | ba6eb712add388c69d4880f5620a2e4ce42d3fee | [
"BSD-3-Clause"
] | 3 | 2021-05-16T17:06:57.000Z | 2021-05-28T17:14:05.000Z | apps/sso/utils/email.py | g10f/sso | ba6eb712add388c69d4880f5620a2e4ce42d3fee | [
"BSD-3-Clause"
] | null | null | null | apps/sso/utils/email.py | g10f/sso | ba6eb712add388c69d4880f5620a2e4ce42d3fee | [
"BSD-3-Clause"
] | null | null | null | from email.mime.image import MIMEImage
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from django.conf import settings
from django.contrib.sites.shortcuts import get_current_site
from django.core.mail import get_connection
from django.core.mail.message import EmailMessage, EmailMultiAlternatives
from django.utils.encoding import force_bytes
from django.utils.http import urlsafe_base64_encode
from sso.celery import send_mail_task
from sso.utils.translation import i18n_email_msg_and_subj
def send_mail(subject, message, recipient_list, from_email=None, html_message=None, fail_silently=False,
apply_async=None, countdown=0, bcc=None, **kwargs):
if apply_async is None:
apply_async = settings.SSO_ASYNC_EMAILS
kwargs.update({'subject': subject, 'message': message, 'from_email': from_email, 'recipient_list': recipient_list,
'html_message': html_message, 'fail_silently': fail_silently, 'bcc': bcc})
if apply_async:
return send_mail_task.apply_async(countdown=countdown, kwargs=kwargs)
else:
return send_mail_task(**kwargs)
def send_html_mail(subject, message, recipient_list, from_email, html_message, fail_silently=False, reply_to=None,
bcc=None):
msg_alternative = MIMEMultipart('alternative')
msg_html = MIMEText(html_message, _subtype='html', _charset='utf-8')
msg_text = MIMEText(message, _charset='utf-8')
msg_alternative.attach(msg_text)
msg_alternative.attach(msg_html)
msg = EmailMessage(subject, '', from_email, recipient_list, reply_to=reply_to, bcc=bcc)
msg.mixed_subtype = 'related'
msg.attach(msg_alternative)
if settings.SSO_EMAIL_LOGO:
with open(settings.SSO_EMAIL_LOGO, 'rb') as f:
email_image = MIMEImage(f.read())
email_image.add_header('Content-ID', '<{}>'.format("logo"))
email_image.add_header("Content-Disposition", "inline", filename="logo")
msg.attach(email_image)
return msg.send(fail_silently=fail_silently)
def send_text_mail(subject, message, from_email, recipient_list,
fail_silently=False, auth_user=None, auth_password=None,
connection=None, html_message=None, reply_to=None, bcc=None):
"""
extended version with reply_to
"""
connection = connection or get_connection(
username=auth_user,
password=auth_password,
fail_silently=fail_silently,
)
mail = EmailMultiAlternatives(subject, message, from_email, recipient_list, connection=connection,
reply_to=reply_to, bcc=bcc)
if html_message:
mail.attach_alternative(html_message, 'text/html')
return mail.send()
def get_email_message(user, request, reply_to_email, email_template_name, subject_template_name):
use_https = request.is_secure()
current_site = get_current_site(request)
site_name = settings.SSO_SITE_NAME
domain = current_site.domain
c = {
'user': user,
'sender': request.user,
'reply_to_email': reply_to_email,
'brand': settings.SSO_BRAND,
'email': user.primary_email(),
'username': user.username,
'domain': domain,
'site_name': site_name,
'uid': urlsafe_base64_encode(force_bytes(user.pk)),
'protocol': use_https and 'https' or 'http',
}
# use the user language or the default language (en-us)
language = user.language if user.language else settings.LANGUAGE_CODE
return i18n_email_msg_and_subj(c, email_template_name, subject_template_name, language)
| 40.88764 | 118 | 0.710085 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 400 | 0.10992 |
fa794d632072a61bdf906bd8971ea8dd930b5c9a | 2,292 | py | Python | examples/paper/synthetic.py | wesselb/gpar | 70f5cb7cd2dec075e33dd7d9cd133b5bc1798777 | [
"MIT"
] | 49 | 2018-10-17T04:08:47.000Z | 2021-09-26T13:20:47.000Z | examples/paper/synthetic.py | wesselb/gpar | 70f5cb7cd2dec075e33dd7d9cd133b5bc1798777 | [
"MIT"
] | 3 | 2020-02-01T02:58:37.000Z | 2020-12-04T16:04:46.000Z | examples/paper/synthetic.py | wesselb/gpar | 70f5cb7cd2dec075e33dd7d9cd133b5bc1798777 | [
"MIT"
] | 10 | 2019-02-12T10:11:59.000Z | 2021-08-21T12:20:56.000Z | import matplotlib.pyplot as plt
import numpy as np
from gpar.regression import GPARRegressor
from wbml.experiment import WorkingDirectory
import wbml.plot
if __name__ == "__main__":
wd = WorkingDirectory("_experiments", "synthetic", seed=1)
# Create toy data set.
n = 200
x = np.linspace(0, 1, n)
noise = 0.1
# Draw functions depending on each other in complicated ways.
f1 = -np.sin(10 * np.pi * (x + 1)) / (2 * x + 1) - x ** 4
f2 = np.cos(f1) ** 2 + np.sin(3 * x)
f3 = f2 * f1 ** 2 + 3 * x
f = np.stack((f1, f2, f3), axis=0).T
# Add noise and subsample.
y = f + noise * np.random.randn(n, 3)
x_obs, y_obs = x[::8], y[::8]
# Fit and predict GPAR.
model = GPARRegressor(
scale=0.1,
linear=True,
linear_scale=10.0,
nonlinear=True,
nonlinear_scale=0.1,
noise=0.1,
impute=True,
replace=False,
normalise_y=False,
)
model.fit(x_obs, y_obs)
means, lowers, uppers = model.predict(
x, num_samples=200, credible_bounds=True, latent=True
)
# Fit and predict independent GPs: set `markov=0` in GPAR.
igp = GPARRegressor(
scale=0.1,
linear=True,
linear_scale=10.0,
nonlinear=True,
nonlinear_scale=0.1,
noise=0.1,
markov=0,
normalise_y=False,
)
igp.fit(x_obs, y_obs)
igp_means, igp_lowers, igp_uppers = igp.predict(
x, num_samples=200, credible_bounds=True, latent=True
)
# Plot the result.
plt.figure(figsize=(15, 3))
for i in range(3):
plt.subplot(1, 3, i + 1)
# Plot observations.
plt.scatter(x_obs, y_obs[:, i], label="Observations", style="train")
plt.plot(x, f[:, i], label="Truth", style="test")
# Plot GPAR.
plt.plot(x, means[:, i], label="GPAR", style="pred")
plt.fill_between(x, lowers[:, i], uppers[:, i], style="pred")
# Plot independent GPs.
plt.plot(x, igp_means[:, i], label="IGP", style="pred2")
plt.fill_between(x, igp_lowers[:, i], igp_uppers[:, i], style="pred2")
plt.xlabel("$t$")
plt.ylabel(f"$y_{i + 1}$")
wbml.plot.tweak(legend=i == 2)
plt.tight_layout()
plt.savefig(wd.file("synthetic.pdf"))
| 27.95122 | 78 | 0.57199 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 403 | 0.175829 |
fa7999e4d1ec2ca801d5ae696a83b8e1e2447b50 | 8,462 | py | Python | code/js/interactive_ecoli_data.py | cremerlab/ribosomal_allocation | a48828e00906c18bc3c49d109b3874c531098ce1 | [
"MIT"
] | null | null | null | code/js/interactive_ecoli_data.py | cremerlab/ribosomal_allocation | a48828e00906c18bc3c49d109b3874c531098ce1 | [
"MIT"
] | null | null | null | code/js/interactive_ecoli_data.py | cremerlab/ribosomal_allocation | a48828e00906c18bc3c49d109b3874c531098ce1 | [
"MIT"
] | null | null | null | #%%
import numpy as np
import pandas as pd
import bokeh.plotting
import bokeh.io
import bokeh.models
import growth.model
import growth.viz
const = growth.model.load_constants()
colors, palette = growth.viz.bokeh_style()
mapper = growth.viz.load_markercolors()
bokeh.io.output_file('../../figures/interactive/interactive_ecoli_data.html')
# Define constants
gamma_max = const['gamma_max']
phi_O = const['phi_O']
Kd_cpc = const['Kd_cpc']
nu_max= np.arange(0.001, 50, 0.001)
const_phiRb = 0.25
# Load the mass_frac
mass_frac = pd.read_csv('../../data/main_figure_data/Fig4_ecoli_ribosomal_mass_fractions.csv')
elong = pd.read_csv('../../data/main_figure_data/Fig4_ecoli_peptide_elongation_rates.csv')
# Add markers and colors to maintain consistency.
markers = [mapper[g]['m_bokeh'] for g in mass_frac['source'].values]
_colors = [mapper[g]['c'] for g in mass_frac['source'].values]
mass_frac['marker'] = markers
mass_frac['color'] = _colors
markers = [mapper[g]['m_bokeh'] for g in elong['source'].values]
_colors = [mapper[g]['c'] for g in elong['source'].values]
elong['marker'] = markers
elong['color'] = _colors
mass_frac = bokeh.models.ColumnDataSource(mass_frac)
elong = bokeh.models.ColumnDataSource(elong)
# Set up the initial scenarios
opt_phiRb = growth.model.phiRb_optimal_allocation(gamma_max, nu_max, Kd_cpc, phi_O)
opt_gamma = growth.model.steady_state_gamma(gamma_max, opt_phiRb, nu_max, Kd_cpc, phi_O) * 7459 / 3600
opt_lam = growth.model.steady_state_growth_rate(gamma_max, opt_phiRb, nu_max, Kd_cpc, phi_O)
const_phiRb = const_phiRb * np.ones_like(nu_max)
const_gamma = growth.model.steady_state_gamma(gamma_max, const_phiRb, nu_max, Kd_cpc, phi_O) * 7459 / 3600
const_lam = growth.model.steady_state_growth_rate(gamma_max, const_phiRb, nu_max, Kd_cpc, phi_O)
trans_phiRb = growth.model.phiRb_constant_translation(gamma_max, nu_max, 10, Kd_cpc, phi_O)
trans_gamma = growth.model.steady_state_gamma(gamma_max, trans_phiRb, nu_max, Kd_cpc, phi_O) * 7459 / 3600
trans_lam = growth.model.steady_state_growth_rate(gamma_max, trans_phiRb, nu_max, Kd_cpc, phi_O)
source = bokeh.models.ColumnDataSource({'phiRb': [const_phiRb, trans_phiRb, opt_phiRb],
'gamma': [const_gamma, trans_gamma, opt_gamma],
'lam': [const_lam, trans_lam, opt_lam],
'color': [colors['primary_black'],
colors['primary_green'],
colors['primary_blue']],
'label': ['scenario I: constant allocation',
'scenario II: constant translation rate',
'scenario III: optimal allocation'],
'filler_xs': [[], [], []],
'filler_ys': [[], [], []]})
# ##############################################################################
# WIDGET DEFINITIONS
# ##############################################################################
phiO_slider = bokeh.models.Slider(start=0, end=0.95, step=0.001, value=phi_O,
title='allocation to other proteins')
gamma_slider = bokeh.models.Slider(start=1, end=25, step=0.001, value=gamma_max * 7459 / 3600,
title='maximum translation speed [AA / s]')
Kd_cpc_slider = bokeh.models.Slider(start=-4, end=-0.0001, step=0.001, value=np.log10(Kd_cpc),
title='log\u2081\u2080 precursor Michaelis-Menten constant')
phiRb_slider = bokeh.models.Slider(start=0.001, end=0.45, step=0.001,
value = 0.25,
title='scenario I: constant ribosomal allocation parameter',
bar_color=colors['primary_black'],
default_size=350)
sc2_cpc_slider = bokeh.models.Slider(start=0, end=0.999, step=0.01,
value = 0.9,
title='scenario II: target translation speed (relative to max)',
bar_color=colors['primary_green'],
default_size=350)
# ##############################################################################
# CANVAS DEFINITION
# ##############################################################################
mass_frac_tooltips = [('source', '@source'),
('ribosomal allocation', '@mass_fraction{0.2f}'),
('growth rate\n[inv. hr.]', '@growth_rate_hr{0.2f}'),
('method', '@method')]
elong_tooltips = [('source', '@source'),
('translation rate [AA/s]', '@elongation_rate_aa_s{0.2f}'),
('growth rate\n[inv. hr.]', '@growth_rate_hr{0.2f}')]
mass_hover = bokeh.models.HoverTool(names=['data'], tooltips=mass_frac_tooltips)
elong_hover = bokeh.models.HoverTool(names=['data'], tooltips=elong_tooltips)
allocation_axis = bokeh.plotting.figure(width=450, height=400,
x_axis_label='growth rate λ [inv. hr]',
y_axis_label = 'ribosomal allocation',
y_range=[0, 0.35],
x_range=[0, 2],
tools = [mass_hover, 'pan',
'wheel_zoom', 'box_zoom']
)
elongation_axis = bokeh.plotting.figure(width=450, height=400,
y_axis_label='translation speed [AA / s]',
x_axis_label = 'growth rate λ [inv. hr]',
y_range=[5, 20],
x_range = [0, 2],
tools = [elong_hover, 'pan',
'wheel_zoom', 'box_zoom']
)
legend_axis = bokeh.plotting.figure(width=370, height=120, tools=[])
legend_axis.axis.axis_label = None
legend_axis.axis.visible = False
legend_axis.grid.grid_line_color = None
legend_axis.background_fill_color = None
legend_axis.outline_line_color = None
# ##############################################################################
# GLYPH DEFINITION
# ##############################################################################
allocation_axis.scatter(x='growth_rate_hr', y='mass_fraction', marker='marker',
color='color', source=mass_frac, size=10, line_color='black',
alpha=0.75, name='data')
elongation_axis.scatter(x='growth_rate_hr', y='elongation_rate_aa_s', marker='marker',
color='color', source=elong, size=10, line_color='black',
alpha=0.75, name='data')
allocation_axis.multi_line(xs='lam', ys='phiRb', color='color', line_width=2,
source=source)
elongation_axis.multi_line(xs='lam', ys='gamma', color='color', line_width=2,
source=source)
legend_axis.multi_line(xs='filler_xs', ys='filler_ys', line_width=2.5,
line_color='color', legend_field='label' ,
source=source)
##############################################################################
# CALLBACK DEFINITION
# ##############################################################################
args = {'gamma_slider': gamma_slider,
'Kd_cpc_slider': Kd_cpc_slider,
'phiO_slider': phiO_slider,
'phiRb_slider': phiRb_slider,
'source': source,
'nu_max': nu_max,
'sc2_cpc_slider': sc2_cpc_slider}
callback = growth.viz.load_js(['./interactive_ecoli_data.js', './functions.js'],
args=args)
for s in [gamma_slider, Kd_cpc_slider, phiO_slider, phiRb_slider, sc2_cpc_slider]:
s.js_on_change('value', callback)
# ##############################################################################
# LAYOUT
# ##############################################################################
col1 = bokeh.layouts.Column(gamma_slider, phiO_slider)
col2 = bokeh.layouts.Column(Kd_cpc_slider, phiRb_slider, sc2_cpc_slider)
sliders = bokeh.layouts.Row(col1, col2, legend_axis)
row1 = bokeh.layouts.Row(allocation_axis, elongation_axis)
layout = bokeh.layouts.Column(sliders, row1)
bokeh.io.save(layout)
| 51.284848 | 106 | 0.536162 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,535 | 0.299504 |
fa79e9f429253026bc9c0f924d862a6178140da0 | 1,156 | py | Python | Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/common/djangoapps/util/monitoring.py | osoco/better-ways-of-thinking-about-software | 83e70d23c873509e22362a09a10d3510e10f6992 | [
"MIT"
] | 3 | 2021-12-15T04:58:18.000Z | 2022-02-06T12:15:37.000Z | Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/common/djangoapps/util/monitoring.py | osoco/better-ways-of-thinking-about-software | 83e70d23c873509e22362a09a10d3510e10f6992 | [
"MIT"
] | null | null | null | Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/common/djangoapps/util/monitoring.py | osoco/better-ways-of-thinking-about-software | 83e70d23c873509e22362a09a10d3510e10f6992 | [
"MIT"
] | 1 | 2019-01-02T14:38:50.000Z | 2019-01-02T14:38:50.000Z | """Helper methods for monitoring of events."""
from edx_django_utils.monitoring import set_custom_attribute, set_custom_attributes_for_course_key
def monitor_import_failure(course_key, import_step, message=None, exception=None):
"""
Helper method to add custom parameters to for import failures.
Arguments:
course_key: CourseKey object
import_step (str): current step in course import
message (str): any particular message to add
exception: Exception object
"""
set_custom_attribute('course_import_failure', import_step)
set_custom_attributes_for_course_key(course_key)
if message:
set_custom_attribute('course_import_failure_message', message)
if exception is not None:
exception_module = getattr(exception, '__module__', '')
separator = '.' if exception_module else ''
module_and_class = f'{exception_module}{separator}{exception.__class__.__name__}'
exc_message = str(exception)
set_custom_attribute('course_import_failure_error_class', module_and_class)
set_custom_attribute('course_import_failure_error_message', exc_message)
| 41.285714 | 98 | 0.745675 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 529 | 0.457612 |
fa7a70aa8c051849cdc07367bdb4c7b9fe08c5bc | 24,725 | py | Python | Unidad3/H.hv6_toy/bin/quast-4.6.3/quast_libs/genome_analyzer.py | Melcatus/TallerBioinf | 4a3beb210ee277ce213609301ea160746d39b434 | [
"MIT"
] | null | null | null | Unidad3/H.hv6_toy/bin/quast-4.6.3/quast_libs/genome_analyzer.py | Melcatus/TallerBioinf | 4a3beb210ee277ce213609301ea160746d39b434 | [
"MIT"
] | null | null | null | Unidad3/H.hv6_toy/bin/quast-4.6.3/quast_libs/genome_analyzer.py | Melcatus/TallerBioinf | 4a3beb210ee277ce213609301ea160746d39b434 | [
"MIT"
] | null | null | null | ############################################################################
# Copyright (c) 2015-2017 Saint Petersburg State University
# Copyright (c) 2011-2015 Saint Petersburg Academic University
# All Rights Reserved
# See file LICENSE for details.
############################################################################
import logging
import os
from quast_libs import fastaparser, genes_parser, reporting, qconfig, qutils
from quast_libs.log import get_logger
from quast_libs.qutils import is_python2
logger = get_logger(qconfig.LOGGER_DEFAULT_NAME)
ref_lengths_by_contigs = {}
# reading genes and operons
class FeatureContainer:
def __init__(self, fpaths, kind=''):
self.kind = kind # 'gene' or 'operon'
self.fpaths = fpaths
self.region_list = []
self.chr_names_dict = {}
def get_ref_aligned_lengths():
return ref_lengths_by_contigs
def chromosomes_names_dict(feature, regions, chr_names):
"""
returns dictionary to translate chromosome name in list of features (genes or operons) to
chromosome name in reference file.
They can differ, e.g. U22222 in the list and gi|48994873|gb|U22222| in the reference
"""
region_2_chr_name = {}
# single chromosome
if len(chr_names) == 1:
chr_name = chr_names[0]
for region in regions:
if region.seqname in chr_name or chr_name in region.seqname:
region_2_chr_name[region.seqname] = chr_name
else:
region_2_chr_name[region.seqname] = None
if len(region_2_chr_name) == 1:
if region_2_chr_name[regions[0].seqname] is None:
logger.notice('Reference name in %ss (%s) does not match the name of the reference (%s). '
'QUAST will ignore this ussue and count as if they matched.' %
(feature, regions[0].seqname, chr_name),
indent=' ')
region_2_chr_name[regions[0].seqname] = chr_name
else:
logger.warning('Some of the reference names in %ss do not match the name of the reference (%s). '
'Check your %s file.' % (feature, chr_name, feature), indent=' ')
# multiple chromosomes
else:
for region in regions:
no_chr_name_for_the_region = True
for chr_name in chr_names:
if region.seqname in chr_name or chr_name in region.seqname:
region_2_chr_name[region.seqname] = chr_name
no_chr_name_for_the_region = False
break
if no_chr_name_for_the_region:
region_2_chr_name[region.seqname] = None
if None in region_2_chr_name.values():
logger.warning('Some of the reference names in %ss does not match any chromosome. '
'Check your %s file.' % (feature, feature), indent=' ')
if all(chr_name is None for chr_name in region_2_chr_name.values()):
logger.warning('Reference names in %ss do not match any chromosome. Check your %s file.' % (feature, feature),
indent=' ')
return region_2_chr_name
def process_single_file(contigs_fpath, index, nucmer_path_dirpath, genome_stats_dirpath,
reference_chromosomes, genes_container, operons_container):
assembly_label = qutils.label_from_fpath(contigs_fpath)
corr_assembly_label = qutils.label_from_fpath_for_fname(contigs_fpath)
results = dict()
ref_lengths = {}
logger.info(' ' + qutils.index_to_str(index) + assembly_label)
nucmer_base_fpath = os.path.join(nucmer_path_dirpath, corr_assembly_label + '.coords')
if qconfig.use_all_alignments:
nucmer_fpath = nucmer_base_fpath
else:
nucmer_fpath = nucmer_base_fpath + '.filtered'
if not os.path.isfile(nucmer_fpath):
logger.error('Nucmer\'s coords file (' + nucmer_fpath + ') not found! Try to restart QUAST.',
indent=' ')
return None
coordfile = open(nucmer_fpath, 'r')
for line in coordfile:
if line.startswith('='):
break
# EXAMPLE:
# [S1] [E1] | [S2] [E2] | [LEN 1] [LEN 2] | [% IDY] | [TAGS]
#=====================================================================================
# 338980 339138 | 2298 2134 | 159 165 | 79.76 | gi|48994873|gb|U00096.2| NODE_0_length_6088
# 374145 374355 | 2306 2097 | 211 210 | 85.45 | gi|48994873|gb|U00096.2| NODE_0_length_6088
genome_mapping = {}
for chr_name, chr_len in reference_chromosomes.items():
genome_mapping[chr_name] = [0] * (chr_len + 1)
contig_tuples = fastaparser.read_fasta(contigs_fpath) # list of FASTA entries (in tuples: name, seq)
sorted_contig_tuples = sorted(enumerate(contig_tuples), key=lambda x: len(x[1][1]), reverse=True)
sorted_contigs_names = []
contigs_order = []
for idx, (name, _) in sorted_contig_tuples:
sorted_contigs_names.append(name)
contigs_order.append(idx)
genes_in_contigs = [0] * len(sorted_contigs_names) # for cumulative plots: i-th element is the number of genes in i-th contig
operons_in_contigs = [0] * len(sorted_contigs_names)
aligned_blocks_by_contig_name = {} # for gene finding: contig_name --> list of AlignedBlock
gene_searching_enabled = len(genes_container.region_list) or len(operons_container.region_list)
if qconfig.memory_efficient and gene_searching_enabled:
logger.warning('Run QUAST without genes and operons files to reduce memory consumption.')
if gene_searching_enabled:
for name in sorted_contigs_names:
aligned_blocks_by_contig_name[name] = []
for line in coordfile:
if line.strip() == '':
break
s1 = int(line.split('|')[0].split()[0])
e1 = int(line.split('|')[0].split()[1])
s2 = int(line.split('|')[1].split()[0])
e2 = int(line.split('|')[1].split()[1])
contig_name = line.split()[12].strip()
chr_name = line.split()[11].strip()
if chr_name not in genome_mapping:
logger.error("Something went wrong and chromosome names in your coords file (" + nucmer_base_fpath + ") " \
"differ from the names in the reference. Try to remove the file and restart QUAST.")
return None
if gene_searching_enabled:
aligned_blocks_by_contig_name[contig_name].append(AlignedBlock(seqname=chr_name, start=s1, end=e1,
contig=contig_name, start_in_contig=s2, end_in_contig=e2))
if s2 == 0 and e2 == 0: # special case: circular genome, contig starts on the end of a chromosome and ends in the beginning
for i in range(s1, len(genome_mapping[chr_name])):
genome_mapping[chr_name][i] = 1
for i in range(1, e1 + 1):
genome_mapping[chr_name][i] = 1
else: #if s1 <= e1:
for i in range(s1, e1 + 1):
genome_mapping[chr_name][i] = 1
coordfile.close()
if qconfig.space_efficient and nucmer_fpath.endswith('.filtered'):
os.remove(nucmer_fpath)
# counting genome coverage and gaps number
covered_bp = 0
gaps_count = 0
gaps_fpath = os.path.join(genome_stats_dirpath, corr_assembly_label + '_gaps.txt') if not qconfig.space_efficient else '/dev/null'
gaps_file = open(gaps_fpath, 'w')
for chr_name, chr_len in reference_chromosomes.items():
gaps_file.write(chr_name + '\n')
cur_gap_size = 0
aligned_len = 0
for i in range(1, chr_len + 1):
if genome_mapping[chr_name][i] == 1:
if cur_gap_size >= qconfig.min_gap_size:
gaps_count += 1
gaps_file.write(str(i - cur_gap_size) + ' ' + str(i - 1) + '\n')
aligned_len += 1
covered_bp += 1
cur_gap_size = 0
else:
cur_gap_size += 1
ref_lengths[chr_name] = aligned_len
if cur_gap_size >= qconfig.min_gap_size:
gaps_count += 1
gaps_file.write(str(chr_len - cur_gap_size + 1) + ' ' + str(chr_len) + '\n')
gaps_file.close()
results["covered_bp"] = covered_bp
results["gaps_count"] = gaps_count
# finding genes and operons
for container, feature_in_contigs, field, suffix in [
(genes_container,
genes_in_contigs,
reporting.Fields.GENES,
'_genes.txt'),
(operons_container,
operons_in_contigs,
reporting.Fields.OPERONS,
'_operons.txt')]:
if not container.region_list:
results[field + "_full"] = None
results[field + "_partial"] = None
continue
total_full = 0
total_partial = 0
found_fpath = os.path.join(genome_stats_dirpath, corr_assembly_label + suffix)
found_file = open(found_fpath, 'w')
found_file.write('%s\t\t%s\t%s\t%s\t%s\n' % ('ID or #', 'Start', 'End', 'Type', 'Contig'))
found_file.write('=' * 50 + '\n')
# 0 - gene is not found,
# 1 - gene is found,
# 2 - part of gene is found
found_list = [0] * len(container.region_list)
for i, region in enumerate(container.region_list):
found_list[i] = 0
gene_blocks = []
if region.id is None:
region.id = '# ' + str(region.number + 1)
for contig_id, name in enumerate(sorted_contigs_names):
cur_feature_is_found = False
for cur_block in aligned_blocks_by_contig_name[name]:
if container.chr_names_dict[region.seqname] != cur_block.seqname:
continue
# computing circular genomes
if cur_block.start > cur_block.end:
blocks = [AlignedBlock(seqname=cur_block.seqname, start=cur_block.start, end=region.end + 1,
contig=cur_block.contig_name, start_in_contig=cur_block.start_in_contig),
AlignedBlock(seqname=cur_block.seqname, start=1, end=cur_block.end,
contig=cur_block.contig_name, end_in_contig=cur_block.end_in_contig)]
if cur_block.start_in_contig < cur_block.end_in_contig:
blocks[0].end_in_contig = blocks[0].start_in_contig + (blocks[0].end - blocks[0].start)
blocks[1].start_in_contig = blocks[0].end_in_contig + 1
else:
blocks[0].end_in_contig = blocks[0].start_in_contig - (blocks[1].end - blocks[1].start)
blocks[1].start_in_contig = blocks[0].end_in_contig - 1
else:
blocks = [cur_block]
for block in blocks:
if region.end <= block.start or block.end <= region.start:
continue
elif block.start <= region.start and region.end <= block.end:
if found_list[i] == 2: # already found as partial gene
total_partial -= 1
found_list[i] = 1
total_full += 1
contig_info = block.format_gene_info(region)
found_file.write('%s\t\t%d\t%d\tcomplete\t%s\n' % (region.id, region.start, region.end, contig_info))
feature_in_contigs[contig_id] += 1 # inc number of found genes/operons in id-th contig
cur_feature_is_found = True
break
elif min(region.end, block.end) - max(region.start, block.start) >= qconfig.min_gene_overlap:
if found_list[i] == 0:
found_list[i] = 2
total_partial += 1
gene_blocks.append(block)
if cur_feature_is_found:
break
if cur_feature_is_found:
break
# adding info about partially found genes/operons
if found_list[i] == 2: # partial gene/operon
contig_info = ','.join([block.format_gene_info(region) for block in sorted(gene_blocks, key=lambda block: block.start)])
found_file.write('%s\t\t%d\t%d\tpartial\t%s\n' % (region.id, region.start, region.end, contig_info))
results[field + "_full"] = total_full
results[field + "_partial"] = total_partial
found_file.close()
logger.info(' ' + qutils.index_to_str(index) + 'Analysis is finished.')
unsorted_genes_in_contigs = [genes_in_contigs[idx] for idx in contigs_order]
unsorted_operons_in_contigs = [operons_in_contigs[idx] for idx in contigs_order]
return ref_lengths, (results, unsorted_genes_in_contigs, genes_in_contigs, unsorted_operons_in_contigs, operons_in_contigs)
def do(ref_fpath, aligned_contigs_fpaths, output_dirpath, genes_fpaths, operons_fpaths,
detailed_contigs_reports_dirpath, genome_stats_dirpath):
nucmer_path_dirpath = os.path.join(detailed_contigs_reports_dirpath, 'nucmer_output')
from quast_libs import search_references_meta
if search_references_meta.is_quast_first_run:
nucmer_path_dirpath = os.path.join(nucmer_path_dirpath, 'raw')
logger.print_timestamp()
logger.main_info('Running Genome analyzer...')
if not os.path.isdir(genome_stats_dirpath):
os.mkdir(genome_stats_dirpath)
reference_chromosomes = {}
genome_size = 0
for name, seq in fastaparser.read_fasta(ref_fpath):
chr_name = name.split()[0]
chr_len = len(seq)
genome_size += chr_len
reference_chromosomes[chr_name] = chr_len
# reading genome size
# genome_size = fastaparser.get_lengths_from_fastafile(reference)[0]
# reading reference name
# >gi|48994873|gb|U00096.2| Escherichia coli str. K-12 substr. MG1655, complete genome
# ref_file = open(reference, 'r')
# reference_name = ref_file.readline().split()[0][1:]
# ref_file.close()
# RESULTS file
result_fpath = genome_stats_dirpath + '/genome_info.txt'
res_file = open(result_fpath, 'w')
genes_container = FeatureContainer(genes_fpaths, 'gene')
operons_container = FeatureContainer(operons_fpaths, 'operon')
for container in [genes_container, operons_container]:
if not container.fpaths:
logger.notice('No file with ' + container.kind + 's provided. '
'Use the -' + container.kind[0].capitalize() + ' option '
'if you want to specify it.', indent=' ')
continue
for fpath in container.fpaths:
container.region_list += genes_parser.get_genes_from_file(fpath, container.kind)
if len(container.region_list) == 0:
logger.warning('No ' + container.kind + 's were loaded.', indent=' ')
res_file.write(container.kind + 's loaded: ' + 'None' + '\n')
else:
logger.info(' Loaded ' + str(len(container.region_list)) + ' ' + container.kind + 's')
res_file.write(container.kind + 's loaded: ' + str(len(container.region_list)) + '\n')
container.chr_names_dict = chromosomes_names_dict(container.kind, container.region_list, list(reference_chromosomes.keys()))
for contigs_fpath in aligned_contigs_fpaths:
report = reporting.get(contigs_fpath)
if genes_container.fpaths:
report.add_field(reporting.Fields.REF_GENES, len(genes_container.region_list))
if operons_container.fpaths:
report.add_field(reporting.Fields.REF_OPERONS, len(operons_container.region_list))
# for cumulative plots:
files_genes_in_contigs = {} # "filename" : [ genes in sorted contigs (see below) ]
files_unsorted_genes_in_contigs = {} # "filename" : [ genes in sorted contigs (see below) ]
files_operons_in_contigs = {}
files_unsorted_operons_in_contigs = {}
# for histograms
genome_mapped = []
full_found_genes = []
full_found_operons = []
# process all contig files
num_nf_errors = logger._num_nf_errors
n_jobs = min(len(aligned_contigs_fpaths), qconfig.max_threads)
if is_python2():
from joblib import Parallel, delayed
else:
from joblib3 import Parallel, delayed
if not qconfig.memory_efficient:
process_results = Parallel(n_jobs=n_jobs)(delayed(process_single_file)(
contigs_fpath, index, nucmer_path_dirpath, genome_stats_dirpath,
reference_chromosomes, genes_container, operons_container)
for index, contigs_fpath in enumerate(aligned_contigs_fpaths))
else:
process_results = [process_single_file(contigs_fpath, index, nucmer_path_dirpath, genome_stats_dirpath,
reference_chromosomes, genes_container, operons_container)
for index, contigs_fpath in enumerate(aligned_contigs_fpaths)]
num_nf_errors += len([res for res in process_results if res is None])
logger._num_nf_errors = num_nf_errors
process_results = [res for res in process_results if res]
if not process_results:
logger.main_info('Genome analyzer failed for all the assemblies.')
res_file.close()
return
ref_lengths = [process_results[i][0] for i in range(len(process_results))]
results_genes_operons_tuples = [process_results[i][1] for i in range(len(process_results))]
for ref in reference_chromosomes:
ref_lengths_by_contigs[ref] = [ref_lengths[i][ref] for i in range(len(ref_lengths))]
res_file.write('reference chromosomes:\n')
for chr_name, chr_len in reference_chromosomes.items():
aligned_len = max(ref_lengths_by_contigs[chr_name])
res_file.write('\t' + chr_name + ' (total length: ' + str(chr_len) + ' bp, maximal covered length: ' + str(aligned_len) + ' bp)\n')
res_file.write('\n')
res_file.write('total genome size: ' + str(genome_size) + '\n\n')
res_file.write('gap min size: ' + str(qconfig.min_gap_size) + '\n')
res_file.write('partial gene/operon min size: ' + str(qconfig.min_gene_overlap) + '\n\n')
# header
# header
res_file.write('\n\n')
res_file.write('%-25s| %-10s| %-12s| %-10s| %-10s| %-10s| %-10s| %-10s|\n'
% ('assembly', 'genome', 'duplication', 'gaps', 'genes', 'partial', 'operons', 'partial'))
res_file.write('%-25s| %-10s| %-12s| %-10s| %-10s| %-10s| %-10s| %-10s|\n'
% ('', 'fraction', 'ratio', 'number', '', 'genes', '', 'operons'))
res_file.write('=' * 120 + '\n')
for contigs_fpath, (results, unsorted_genes_in_contigs, genes_in_contigs, unsorted_operons_in_contigs, operons_in_contigs)\
in zip(aligned_contigs_fpaths, results_genes_operons_tuples):
assembly_name = qutils.name_from_fpath(contigs_fpath)
files_genes_in_contigs[contigs_fpath] = genes_in_contigs
files_unsorted_genes_in_contigs[contigs_fpath] = unsorted_genes_in_contigs
files_operons_in_contigs[contigs_fpath] = operons_in_contigs
files_unsorted_operons_in_contigs[contigs_fpath] = unsorted_operons_in_contigs
full_found_genes.append(sum(genes_in_contigs))
full_found_operons.append(sum(operons_in_contigs))
covered_bp = results["covered_bp"]
gaps_count = results["gaps_count"]
genes_full = results[reporting.Fields.GENES + "_full"]
genes_part = results[reporting.Fields.GENES + "_partial"]
operons_full = results[reporting.Fields.OPERONS + "_full"]
operons_part = results[reporting.Fields.OPERONS + "_partial"]
report = reporting.get(contigs_fpath)
genome_fraction = float(covered_bp) * 100 / float(genome_size)
duplication_ratio = (report.get_field(reporting.Fields.TOTALLEN) +
report.get_field(reporting.Fields.MISINTERNALOVERLAP) +
report.get_field(reporting.Fields.AMBIGUOUSEXTRABASES) -
report.get_field(reporting.Fields.UNALIGNEDBASES)) /\
((genome_fraction / 100.0) * float(genome_size))
res_file.write('%-25s| %-10s| %-12s| %-10s|'
% (assembly_name[:24], '%3.5f%%' % genome_fraction, '%1.5f' % duplication_ratio, gaps_count))
report.add_field(reporting.Fields.MAPPEDGENOME, '%.3f' % genome_fraction)
report.add_field(reporting.Fields.DUPLICATION_RATIO, '%.3f' % duplication_ratio)
genome_mapped.append(genome_fraction)
for (field, full, part) in [(reporting.Fields.GENES, genes_full, genes_part),
(reporting.Fields.OPERONS, operons_full, operons_part)]:
if full is None and part is None:
res_file.write(' %-10s| %-10s|' % ('-', '-'))
else:
res_file.write(' %-10s| %-10s|' % (full, part))
report.add_field(field, '%s + %s part' % (full, part))
res_file.write('\n')
res_file.close()
if genes_container.region_list:
ref_genes_num = len(genes_container.region_list)
else:
ref_genes_num = None
if operons_container.region_list:
ref_operons_num = len(operons_container.region_list)
else:
ref_operons_num = None
if qconfig.html_report:
from quast_libs.html_saver import html_saver
if genes_container.region_list:
html_saver.save_features_in_contigs(output_dirpath, aligned_contigs_fpaths, 'genes', files_genes_in_contigs, ref_genes_num)
if operons_container.region_list:
html_saver.save_features_in_contigs(output_dirpath, aligned_contigs_fpaths, 'operons', files_operons_in_contigs, ref_operons_num)
if qconfig.draw_plots:
# cumulative plots:
from . import plotter
from quast_libs.ca_utils.misc import contigs_aligned_lengths
if genes_container.region_list:
plotter.genes_operons_plot(len(genes_container.region_list), aligned_contigs_fpaths, files_genes_in_contigs,
genome_stats_dirpath + '/genes_cumulative_plot', 'genes')
plotter.frc_plot(output_dirpath, ref_fpath, aligned_contigs_fpaths, contigs_aligned_lengths, files_unsorted_genes_in_contigs,
genome_stats_dirpath + '/genes_frcurve_plot', 'genes')
plotter.histogram(aligned_contigs_fpaths, full_found_genes, genome_stats_dirpath + '/complete_genes_histogram',
'# complete genes')
if operons_container.region_list:
plotter.genes_operons_plot(len(operons_container.region_list), aligned_contigs_fpaths, files_operons_in_contigs,
genome_stats_dirpath + '/operons_cumulative_plot', 'operons')
plotter.frc_plot(output_dirpath, ref_fpath, aligned_contigs_fpaths, contigs_aligned_lengths, files_unsorted_operons_in_contigs,
genome_stats_dirpath + '/operons_frcurve_plot', 'operons')
plotter.histogram(aligned_contigs_fpaths, full_found_operons, genome_stats_dirpath + '/complete_operons_histogram',
'# complete operons')
plotter.histogram(aligned_contigs_fpaths, genome_mapped, genome_stats_dirpath + '/genome_fraction_histogram',
'Genome fraction, %', top_value=100)
logger.main_info('Done.')
return [genes_container, operons_container]
class AlignedBlock():
def __init__(self, seqname=None, start=None, end=None, contig=None, start_in_contig=None, end_in_contig=None):
self.seqname = seqname
self.start = start
self.end = end
self.contig = contig
self.start_in_contig = start_in_contig
self.end_in_contig = end_in_contig
def format_gene_info(self, region):
start, end = self.start_in_contig, self.end_in_contig
if self.start < region.start:
region_shift = region.start - self.start
if start < end:
start += region_shift
else:
start -= region_shift
if region.end < self.end:
region_size = region.end - max(region.start, self.start)
if start < end:
end = start + region_size
else:
end = start - region_size
return self.contig + ':' + str(start) + '-' + str(end) | 48.196881 | 141 | 0.621881 | 1,151 | 0.046552 | 0 | 0 | 0 | 0 | 0 | 0 | 4,436 | 0.179414 |
fa7a83fe53cfeb8f0635e9e1cf133b811d5394c2 | 3,713 | py | Python | app/auth/forms.py | blazejosojca/flask_blog | 2bac2f9c8bb60db8a5073147f35b9e088c97497b | [
"MIT"
] | null | null | null | app/auth/forms.py | blazejosojca/flask_blog | 2bac2f9c8bb60db8a5073147f35b9e088c97497b | [
"MIT"
] | 1 | 2021-06-01T23:21:11.000Z | 2021-06-01T23:21:11.000Z | app/auth/forms.py | blazejosojca/flask_blog | 2bac2f9c8bb60db8a5073147f35b9e088c97497b | [
"MIT"
] | null | null | null | from flask_wtf import FlaskForm
from flask_wtf.file import FileAllowed, FileField
from flask_babel import lazy_gettext as _l
from wtforms import StringField, TextAreaField, SubmitField, PasswordField, BooleanField
from wtforms.validators import DataRequired, Email, ValidationError, Length, EqualTo
from app.models import User
class RegistrationForm(FlaskForm):
username = StringField(_l('Username',
validators=[DataRequired(), Length(min=2, max=24)]))
email = StringField('Email', validators=[DataRequired(), Email()])
password = PasswordField(_l('Password', validators=[DataRequired()]))
password_confirmation = PasswordField(_l('Confirm Password',
validators=[DataRequired(), EqualTo('password')]))
submit = SubmitField(_l('Sign Up'))
def validate_username(self, username):
user = User.query.filter_by(username=username.data).first()
if user is not None:
raise ValidationError(
_l('This username already exists. Please use a different username!')
)
def validate_email(self, email):
user = User.query.filter_by(email=email.data).first()
if user is not None:
raise ValidationError(_l('This email already exists. Please use a different email!'))
class LoginForm(FlaskForm):
email = StringField('Email', validators=[DataRequired(), Email()])
password = PasswordField(_l('Password'), validators=[DataRequired()])
remember = BooleanField(_l('Remember Me'))
submit = SubmitField(_l('Sign In'))
class UpdateUserForm(FlaskForm):
username = StringField(_l('Username'),
validators=[DataRequired(), Length(min=2, max=24)])
email = StringField('Email', validators=[DataRequired(), Email()])
about_me = TextAreaField(_l('About me'), validators=[Length(min=0, max=140)])
image_file = FileField(_l('Update profile picture'),
validators=[FileAllowed(['jpg', 'jpeg', 'png'])])
submit = SubmitField(_l('Submit'))
def __init__(self, original_username, original_email, *args, **kwargs):
super(UpdateUserForm, self).__init__(*args, **kwargs)
self.original_username = original_username
self.original_email = original_email
def validate_username(self, username):
if username.data is not self.original_username:
user = User.query.filter_by(username=self.username.data).first()
if user is not None:
raise ValidationError(_l('Please use a different username!'))
def validate_email(self, email):
if email.data is not self.original_email:
user = User.query.filter_by(email=self.email.data).first()
if user is not None:
raise ValidationError(_l('Please use a different email!'))
class RequestResetForm(FlaskForm):
email = StringField('Email', validators=[DataRequired(), Email()])
submit = SubmitField(_l('Request Password Reset'))
def validate_email(self, email):
user = User.query.filter_by(email=email.data).first()
if user is None:
raise ValidationError('There is no account with this email.')
class ResetPasswordForm(FlaskForm):
password = PasswordField('Password', validators=[DataRequired()])
password_confirmation = PasswordField('Confirm Password', validators=[DataRequired(), EqualTo('password')])
submit = SubmitField('Reset Password')
class DeleteUserForm(FlaskForm):
username = StringField('Username', validators=[DataRequired(), Length(min=2, max=24)]
)
submit = SubmitField('Delete User')
| 43.174419 | 111 | 0.664153 | 3,367 | 0.906814 | 0 | 0 | 0 | 0 | 0 | 0 | 511 | 0.137625 |
fa7b83f18dbaad2881ce921e1b1d08adbc4e3409 | 5,308 | py | Python | pointmap.py | quillford/twitchslam | c75c609b39b5d51b325fb2a8efd3402b7d29f37f | [
"MIT"
] | 1 | 2022-01-12T10:12:09.000Z | 2022-01-12T10:12:09.000Z | pointmap.py | quillford/twitchslam | c75c609b39b5d51b325fb2a8efd3402b7d29f37f | [
"MIT"
] | null | null | null | pointmap.py | quillford/twitchslam | c75c609b39b5d51b325fb2a8efd3402b7d29f37f | [
"MIT"
] | 1 | 2022-01-05T09:55:12.000Z | 2022-01-05T09:55:12.000Z | from helpers import poseRt
from frame import Frame
import time
import numpy as np
import g2o
import json
LOCAL_WINDOW = 20
#LOCAL_WINDOW = None
class Point(object):
# A Point is a 3-D point in the world
# Each Point is observed in multiple Frames
def __init__(self, mapp, loc, color, tid=None):
self.pt = np.array(loc)
self.frames = []
self.idxs = []
self.color = np.copy(color)
self.id = tid if tid is not None else mapp.add_point(self)
def homogeneous(self):
return np.array([self.pt[0], self.pt[1], self.pt[2], 1.0])
def orb(self):
return [f.des[idx] for f,idx in zip(self.frames, self.idxs)]
def delete(self):
for f,idx in zip(self.frames, self.idxs):
f.pts[idx] = None
del self
def add_observation(self, frame, idx):
frame.pts[idx] = self
self.frames.append(frame)
self.idxs.append(idx)
class Map(object):
def __init__(self):
self.frames = []
self.points = []
self.max_frame = 0
self.max_point = 0
def serialize(self):
ret = {}
ret['points'] = [{'id': p.id, 'pt': p.pt.tolist(), 'color': p.color.tolist()} for p in self.points]
ret['frames'] = []
for f in self.frames:
ret['frames'].append({
'id': f.id, 'K': f.K.tolist(), 'pose': f.pose.tolist(), 'h': f.h, 'w': f.w,
'kpus': f.kpus.tolist(), 'des': f.des.tolist(),
'pts': [p.id if p is not None else -1 for p in f.pts]})
ret['max_frame'] = self.max_frame
ret['max_point'] = self.max_point
return json.dumps(ret)
def deserialize(self, s):
ret = json.loads(s)
self.max_frame = ret['max_frame']
self.max_point = ret['max_point']
self.points = []
self.frames = []
pids = {}
for p in ret['points']:
pp = Point(self, p['pt'], p['color'], p['id'])
self.points.append(pp)
pids[p['id']] = pp
for f in ret['frames']:
ff = Frame(self, None, f['K'], f['pose'], f['id'])
ff.w, ff.h = f['w'], f['h']
ff.kpus = np.array(f['kpus'])
ff.des = np.array(f['des'])
ff.pts = [None] * len(ff.kpus)
for i,p in enumerate(f['pts']):
if p != -1:
ff.pts[i] = pids[p]
self.frames.append(ff)
def add_point(self, point):
ret = self.max_point
self.max_point += 1
self.points.append(point)
return ret
def add_frame(self, frame):
ret = self.max_frame
self.max_frame += 1
self.frames.append(frame)
return ret
# *** optimizer ***
def optimize(self, local_window=LOCAL_WINDOW, fix_points=False, verbose=False):
# create g2o optimizer
opt = g2o.SparseOptimizer()
solver = g2o.BlockSolverSE3(g2o.LinearSolverCholmodSE3())
solver = g2o.OptimizationAlgorithmLevenberg(solver)
opt.set_algorithm(solver)
robust_kernel = g2o.RobustKernelHuber(np.sqrt(5.991))
if local_window is None:
local_frames = self.frames
else:
local_frames = self.frames[-local_window:]
# add frames to graph
for f in self.frames:
pose = np.linalg.inv(f.pose)
sbacam = g2o.SBACam(g2o.SE3Quat(pose[0:3, 0:3], pose[0:3, 3]))
sbacam.set_cam(f.K[0][0], f.K[1][1], f.K[0][2], f.K[1][2], 1.0)
v_se3 = g2o.VertexCam()
v_se3.set_id(f.id)
v_se3.set_estimate(sbacam)
v_se3.set_fixed(f.id <= 1 or f not in local_frames)
opt.add_vertex(v_se3)
# add points to frames
PT_ID_OFFSET = 0x10000
for p in self.points:
if not any([f in local_frames for f in p.frames]):
continue
pt = g2o.VertexSBAPointXYZ()
pt.set_id(p.id + PT_ID_OFFSET)
pt.set_estimate(p.pt[0:3])
pt.set_marginalized(True)
pt.set_fixed(fix_points)
opt.add_vertex(pt)
for f,idx in zip(p.frames, p.idxs):
edge = g2o.EdgeProjectP2MC()
edge.set_vertex(0, pt)
edge.set_vertex(1, opt.vertex(f.id))
uv = f.kpus[idx]
edge.set_measurement(uv)
edge.set_information(np.eye(2))
edge.set_robust_kernel(robust_kernel)
opt.add_edge(edge)
if verbose:
opt.set_verbose(True)
opt.initialize_optimization()
opt.optimize(20)
# put frames back
for f in self.frames:
est = opt.vertex(f.id).estimate()
R = est.rotation().matrix()
t = est.translation()
f.pose = np.linalg.inv(poseRt(R, t))
# put points back (and cull)
if not fix_points:
new_points = []
for p in self.points:
vert = opt.vertex(p.id + PT_ID_OFFSET)
if vert is None:
new_points.append(p)
continue
est = vert.estimate()
# <= 3 match point that's old
old_point = len(p.frames) <= 3 and p.frames[-1] not in local_frames
# compute reprojection error
errs = []
for f,idx in zip(p.frames, p.idxs):
uv = f.kpus[idx]
proj = np.dot(np.dot(f.K, f.pose[:3]),
np.array([est[0], est[1], est[2], 1.0]))
proj = proj[0:2] / proj[2]
errs.append(np.linalg.norm(proj-uv))
# cull
if old_point or np.mean(errs) > 5:
p.delete()
continue
p.pt = np.array(est)
new_points.append(p)
print("Culled: %d points" % (len(self.points) - len(new_points)))
self.points = new_points
return opt.active_chi2()
| 27.790576 | 103 | 0.58572 | 5,158 | 0.971741 | 0 | 0 | 0 | 0 | 0 | 0 | 501 | 0.094386 |
fa7c044effd2fd98d0d77b1344bdf8fa952165a5 | 482 | py | Python | day-02/python/part2.py | kayew/aoc-2020 | 55ea804c983aef4d3a7e159403247ec23a47aa10 | [
"MIT"
] | null | null | null | day-02/python/part2.py | kayew/aoc-2020 | 55ea804c983aef4d3a7e159403247ec23a47aa10 | [
"MIT"
] | null | null | null | day-02/python/part2.py | kayew/aoc-2020 | 55ea804c983aef4d3a7e159403247ec23a47aa10 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import sys
file = open(sys.argv[1], "r")
total = 0
for line in file:
letterMatch = 0
param = line.split()
passIndex = [int(x) for x in param[0].split('-')]
targetLetter = param[1][0]
password = param[2]
if password[passIndex[0]-1] == targetLetter:
letterMatch += 1
if password[passIndex[1]-1] == targetLetter:
letterMatch += 1
if letterMatch == 1:
total += 1
file.close()
print(f"total: {total}")
| 19.28 | 53 | 0.591286 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 45 | 0.093361 |
fa7f6dd4ea20017f1aa2340509ebc6691ac50171 | 3,165 | py | Python | handler.py | chiragjn/torchserve-t5-translation | ad8493302a405116c04fb2bea154412d051f27b8 | [
"MIT"
] | 3 | 2021-03-10T05:24:15.000Z | 2021-11-21T14:52:03.000Z | handler.py | chiragjn/torchserve-t5-translation | ad8493302a405116c04fb2bea154412d051f27b8 | [
"MIT"
] | null | null | null | handler.py | chiragjn/torchserve-t5-translation | ad8493302a405116c04fb2bea154412d051f27b8 | [
"MIT"
] | null | null | null | import torch
import os
import logging
import json
from abc import ABC
from ts.torch_handler.base_handler import BaseHandler
from transformers import T5Tokenizer, T5ForConditionalGeneration
logger = logging.getLogger(__name__)
class TransformersSeqGeneration(BaseHandler, ABC):
_LANG_MAP = {
"es": "Spanish",
"fr": "French",
"de": "German",
"en": "English",
}
def __init__(self):
super().__init__()
self.initialized = False
def initialize(self, ctx):
self.manifest = ctx.manifest
properties = ctx.system_properties
model_dir = properties.get("model_dir")
serialized_file = self.manifest["model"]["serializedFile"]
model_pt_path = os.path.join(model_dir, serialized_file)
self.device = torch.device(
"cuda:" + str(properties.get("gpu_id"))
if torch.cuda.is_available()
else "cpu"
)
# read configs for the mode, model_name, etc. from setup_config.json
setup_config_path = os.path.join(model_dir, "setup_config.json")
if os.path.isfile(setup_config_path):
with open(setup_config_path) as setup_config_file:
self.setup_config = json.load(setup_config_file)
else:
logger.warning("Missing the setup_config.json file.")
# Loading the model and tokenizer from checkpoint and config files based on the user's choice of mode
# further setup config can be added.
self.tokenizer = T5Tokenizer.from_pretrained(model_dir)
if self.setup_config["save_mode"] == "torchscript":
self.model = torch.jit.load(model_pt_path)
elif self.setup_config["save_mode"] == "pretrained":
self.model = T5ForConditionalGeneration.from_pretrained(model_dir)
else:
logger.warning("Missing the checkpoint or state_dict.")
self.model.to(self.device)
self.model.eval()
logger.info("Transformer model from path %s loaded successfully", model_dir)
self.initialized = True
def preprocess(self, requests):
input_batch = None
texts_batch = []
for idx, data in enumerate(requests):
data = data["body"]
input_text = data["text"]
src_lang = data["from"]
tgt_lang = data["to"]
if isinstance(input_text, (bytes, bytearray)):
input_text = input_text.decode("utf-8")
src_lang = src_lang.decode("utf-8")
tgt_lang = tgt_lang.decode("utf-8")
texts_batch.append(f"translate {self._LANG_MAP[src_lang]} to {self._LANG_MAP[tgt_lang]}: {input_text}")
inputs = self.tokenizer(texts_batch, return_tensors="pt")
input_batch = inputs["input_ids"].to(self.device)
return input_batch
def inference(self, input_batch):
generations = self.model.generate(input_batch)
generations = self.tokenizer.batch_decode(generations, skip_special_tokens=True)
return generations
def postprocess(self, inference_output):
return [{"text": text} for text in inference_output]
| 39.5625 | 115 | 0.641706 | 2,934 | 0.927014 | 0 | 0 | 0 | 0 | 0 | 0 | 650 | 0.205371 |
fa7f8a320c3494838594cfc8dddc16eae8654412 | 1,114 | py | Python | api/pub/sensor/sensor.py | rtaft/pi-sensor-dashboard | e7f711e8ecd9e4c32976583c32dbc716b165d56a | [
"MIT"
] | null | null | null | api/pub/sensor/sensor.py | rtaft/pi-sensor-dashboard | e7f711e8ecd9e4c32976583c32dbc716b165d56a | [
"MIT"
] | null | null | null | api/pub/sensor/sensor.py | rtaft/pi-sensor-dashboard | e7f711e8ecd9e4c32976583c32dbc716b165d56a | [
"MIT"
] | null | null | null | import flask
from flask import request
import flask_restful as restful
from marshmallow import Schema, fields, validate
from api.helpers import success, created
from api.exceptions import NotFound
#from api.restful import API
#@API.route('/sensors', methods=['GET'], resource_class_kwargs={'test': 'foo'})
class SensorsConfig (restful.Resource):
def __init__(self, *args, **kwargs):
self.sensor_service = kwargs['sensor_service']
# def get(self):
# return success(self.sensor_service.get_sensor_ids())
def get(self):
return success(self.sensor_service.get_config())
def post(self):
data = request.get_json(force=True)
# TODO validator?
sensor_id = self.sensor_service.set_config(None, data)
return success(sensor_id)
def put(self, sensor_id):
data = request.get_json(force=True)
# TODO validator?
sensor_id = self.sensor_service.set_config(sensor_id, data)
return success(sensor_id)
def delete(self, sensor_id):
self.sensor_service.remove_sensor(sensor_id)
return success() | 30.944444 | 79 | 0.692101 | 806 | 0.723519 | 0 | 0 | 0 | 0 | 0 | 0 | 232 | 0.208259 |
fa80141154a663ebd88b94d2fadc61b86d838ccf | 5,802 | py | Python | homework/week8/models.py | enigmacodemaster/Project_Of_Mask_Real_Time_Detection | a4d0123709315ab436318999aa758b5a0a5f61e8 | [
"Apache-2.0"
] | null | null | null | homework/week8/models.py | enigmacodemaster/Project_Of_Mask_Real_Time_Detection | a4d0123709315ab436318999aa758b5a0a5f61e8 | [
"Apache-2.0"
] | null | null | null | homework/week8/models.py | enigmacodemaster/Project_Of_Mask_Real_Time_Detection | a4d0123709315ab436318999aa758b5a0a5f61e8 | [
"Apache-2.0"
] | 1 | 2021-03-05T03:21:38.000Z | 2021-03-05T03:21:38.000Z | from __future__ import division
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
from utils.parse_config import *
from utils.utils import build_targets, to_cpu, non_max_suppression
import matplotlib.pyplot as plt
import matplotlib.patches as patches
class Swish(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return x * F.sigmoid(x)
class Mish(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return x * torch.tanh(F.softplus(x))
# Depthwise Convolution的实现
class DepthwiseConv2d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=1, stride=1, padding=0, dilation=1, bias=False):
super(DepthwiseConv2d, self).__init__()
self.conv1 = nn.Conv2d(in_channels, in_channels, kernel_size, stride, padding, dilation, groups=in_channels,
bias=bias)
self.pointwise = nn.Conv2d(in_channels, out_channels, 1, 1, 0, 1, 1, bias=bias)
def forward(self, x):
x = self.conv1(x)
x = self.pointwise(x)
return x
def create_modules(module_defs, act_type=0, mobile_yolo=False):
"""
Constructs module list of layer blocks from module configuration in module_defs
"""
# module_defs = [{"type":"net", "channels":3, ...}, # each elemnt is a layer block (dtype=dict)
# {"type":"convolutional", "batch_normalize":1, ...},
# ...]
hyperparams = module_defs.pop(0) # [net]的整体参数
output_filters = [int(hyperparams["channels"])] # 3: 最初。因为是rgb 3通道
module_list = nn.ModuleList() # 存储每一大层,如conv层: 包括conv-bn-leaky relu等
for module_i, module_def in enumerate(module_defs):
modules = nn.Sequential()
if module_def["type"] == "convolutional":
bn = int(module_def["batch_normalize"])
filters = int(module_def["filters"])
kernel_size = int(module_def["size"])
pad = (kernel_size - 1) // 2
# 根据参数选择是否使用depthwise Convolution
if mobile_yolo:
modules.add_module(
f"conv_{module_i}",
DepthwiseConv2d(
in_channels=output_filters[-1],
out_channels=filters,
kernel_size=kernel_size,
stride=int(module_def["stride"]),
padding=pad,
bias=not bn,
),
)
else:
modules.add_module(
f"conv_{module_i}",
nn.Conv2d(
in_channels=output_filters[-1],
out_channels=filters,
kernel_size=kernel_size,
stride=int(module_def["stride"]),
padding=pad,
bias=not bn,
),
)
if bn:
modules.add_module(f"batch_norm_{module_i}", nn.BatchNorm2d(filters, momentum=0.9, eps=1e-5))
if module_def["activation"] == "leaky":
if int(act_type) == 0:
print("Adding LeakyReLU")
modules.add_module(f"leaky_{module_i}", nn.LeakyReLU(0.1))
elif int(act_type) == 1:
print("Adding Swish")
modules.add_module(f"swish_{module_i}", Swish())
elif int(act_type) == 2:
print("Adding Mish")
modules.add_module(f"mish_{module_i}", Mish())
elif module_def["type"] == "maxpool":
kernel_size = int(module_def["size"])
stride = int(module_def["stride"])
if kernel_size == 2 and stride == 1:
modules.add_module(f"_debug_padding_{module_i}", nn.ZeroPad2d((0, 1, 0, 1)))
maxpool = nn.MaxPool2d(kernel_size=kernel_size, stride=stride, padding=int((kernel_size - 1) // 2))
modules.add_module(f"maxpool_{module_i}", maxpool)
elif module_def["type"] == "upsample":
upsample = Upsample(scale_factor=int(module_def["stride"]), mode="nearest")
modules.add_module(f"upsample_{module_i}", upsample)
elif module_def["type"] == "route":
layers = [int(x) for x in module_def["layers"].split(",")]
filters = sum([output_filters[1:][i] for i in layers]) # channel个数相加,对应concat
modules.add_module(f"route_{module_i}", EmptyLayer())
elif module_def["type"] == "shortcut":
filters = output_filters[1:][int(module_def["from"])]
modules.add_module(f"shortcut_{module_i}", EmptyLayer())
elif module_def["type"] == "yolo":
# # mask: 6,7,8 / 3,4,5 / 0,1,2 <=> 小/中/大 feature map <=> 大/中/小 物体
anchor_idxs = [int(x) for x in module_def["mask"].split(",")]
# Extract anchors
anchors = [int(x) for x in module_def["anchors"].split(",")]
anchors = [(anchors[i], anchors[i + 1]) for i in range(0, len(anchors), 2)]
anchors = [anchors[i] for i in anchor_idxs]
# for mask: 6,7,8
# [(116, 90), (156, 198), (373, 326)]
num_classes = int(module_def["classes"]) # 80
img_size = int(hyperparams["height"]) # 416
# Define detection layer
yolo_layer = YOLOLayer(anchors, num_classes, img_size)
modules.add_module(f"yolo_{module_i}", yolo_layer)
# Register module list and number of output filters
module_list.append(modules)
output_filters.append(filters)
return hyperparams, module_list
| 39.739726 | 119 | 0.55636 | 818 | 0.138456 | 0 | 0 | 0 | 0 | 0 | 0 | 1,322 | 0.223764 |
fa8070ca6c33d7c997430f09e7d9b6f93e1c5af7 | 5,553 | py | Python | ggtools/gg/__init__.py | richannan/GGTOOLS | 7909da988d90de50c82532d97121a3fbcfc0263a | [
"MIT"
] | 22 | 2019-12-16T01:30:29.000Z | 2022-03-01T08:57:07.000Z | ggtools/gg/__init__.py | richannan/GGTOOLS | 7909da988d90de50c82532d97121a3fbcfc0263a | [
"MIT"
] | 3 | 2019-12-23T14:09:30.000Z | 2022-03-29T01:52:53.000Z | ggtools/gg/__init__.py | richannan/GGTOOLS | 7909da988d90de50c82532d97121a3fbcfc0263a | [
"MIT"
] | 13 | 2019-12-19T07:01:19.000Z | 2022-03-14T11:26:36.000Z | '''
ggtools gg subpackage
This subpackage defines the following functions:
# ====================== fitting function ==================== #
func - Define a linear function f(x) = a0 + a1/T*x to be fitted, where a0 and a1 are parameters for inter
# ====================== estinate lovebums =================== #
lovebums - Estimate Load Love Numbers(LLNs)
# ===================== plotting functions =================== #
plot_at_northpole - Plot grid data at the northpole.
# ===================== mascon functions ===================== #
mascon_download - Download GRACE mascon data from https://neptune.gsfc.nasa.gov/uploads/grace/mascons_2.4/
read_mascon - Read the GRACE mascon files.
# ========== Compare DDK filter and Gaussian filter ========== #
ddk_gausian - Given a specific type of DDK filter and the maximum SHC degree number, evaluate the 'equivalent' Gaussian filter radius.
# ================= Tikhonov Regularization ================== #
solve_lambda_x - Minimize the Lagrangian L(x,lambda) = || A*x - y||**2 + lambda*||x||**2 based on the idea of Tikhonov Regularization. Estimate the parameters x and Lagrange multiplier roughly.
L_curve - Minimize the Lagrangian L(x,lambda) = || A*x - y||**2 + lambda*||x||**2 based on the idea of Tikhonov Regularization. Estimate the parameters x and Lagrange multiplier accurately.
# ===================== Filter functions ===================== #
filter_ddk - DDK filter used to attenuate noise described as striping patterns in GRACE GSM data.
filter_gaussian - Gaussian filter used to attenuate noise described as striping patterns in GRACE GSM data.
filter_gaussian_inverse - Inversion of Gaussian filter. It is used to recover the signal in the process of leakage correction in GRACE GSM data.
# ================= Signal leakage correction ================ #
scale_factor - Estimate the scale factor(gain factor) used in signal leakage correction.
forward_model_initial - Expand the grid data to spherical harmonic coefficients and perform Gaussian filtering, then transfer it back to the grid data.
forward_model - Iterative Forward Modeling used to perform signal leakage correction in GRACE data processing.
space_domain - Space domain method used to perform signal leakage correction in GRACE data processing.
spectral_domain - Spectrum domain method used to perform signal leakage correction in GRACE data processing.
# ==================== Least square method =================== #
lsqm - Linearly fit f(x) = a0 + a1/T*x using the Least Square algorithm.
wlsqm - Linearly fit f(x) = a0 + a1/T*x using the Weighted Least Square algorithm.
ilsqm - Linearly fit f(x) = a0 + a1/T*x using the Iterative Least Square method. The 3-sigma rule is used to eliminate outliers.
iwlsqm - Linearly fit f(x) = a0 + a1/T*x using the Iterative Weighted Least Square algorithm. The 3-sigma rule is used to eliminate outliers.
# ==================== GRACE data utilitues ================== #
parse_gsm_filename - Parse GRACE GSM filenames.
print_gsm_date_coverage - Print the date coverage for the GRACE GSM data from isdcftp.gfz-potsdam.de
gsm_download - Download GRACE GSM data from isdcftp.gfz-potsdam.de
parse_gsm_file - Parse the GRACE GSM Level-2 file.
read_gsm - Read the GRACE GSM Level-2 files.
gsm_average - Combine the (deaveraged) GSM solution from multiple institutions into an integrated one.
# ==================== GLDAS data utilitues ================== #
gldas_download - Download the GLDAS grid data over a period defined by the start date and end date and its documentation from urs.earthdata.nasa.gov
read_gldas - Read the GLDAS files into a GLDAS class instance.
regular_gldas - Normalize the GLDAS grid data to meet the requirements of spherical harmonic expansion with pyshtools based on the sampling theorem of Driscoll and Healy (1994).
lsm - Calculate some land surface quantities from GLDAS grid data, such as Terrestrial Water Storage Changes(TWSC).
landmask - Establish a land window function based on the global terrain data ETOPO5.
# =================== SLR C20 data utilitues ================= #
slr_c20_download - Download SLR C20 data from isdcftp.gfz-potsdam.de
read_slr_c20 - Read the SLR C20 file.
# ========================= Utilitues ======================== #
print_error_grace - If source, D, and RL do not meet the input requirements, an error message will be printed.
print_error_gldas - If source and res do not meet the input requirements, an error message will be printed.
month2int - Given a list of month list, translate it to an array of month sequence.
med(x) - Calculate the middle divisor.
solid_angle_ratio - Calculate the ratio of a solid angle of an ellipse on a sphere to 4pi.
crop_region - Crop the global grid data to a region of interested.
yx2latlon - Transform index to latitudes and longitudes.
latlon2yx - Transform latitudes and longitudes to index.
generate_mascons - Create mascons within a study area using a set of points or polygons.
'''
from .gsm_utils import print_gsm_date_coverage,gsm_download,read_gsm,gsm_average
from .static_models import static_download
from .slr_utils import slr_c20_download,read_slr_c20
from .gldas_utils import gldas_download,read_gldas
from .plot import plot_at_northpole
from .ddk_gaussian import ddk_gaussian
from .leakage import scale_factor
from .lcurve import L_curve
from .utils import generate_nodes,solid_angle_ratio
from .mascon import mascon_download,read_mascon
from .ddk_gaussian import ddk_gaussian
from .landmask import landmask | 43.724409 | 193 | 0.715469 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5,016 | 0.903296 |
fa80d721e471bbc14b93aa5e1ca661ad13077c16 | 3,203 | py | Python | django_core_models/locations/urls.py | ajaniv/django-core-models | 7fde3792de745b5df875e8dc760096f5b10d46ce | [
"MIT"
] | null | null | null | django_core_models/locations/urls.py | ajaniv/django-core-models | 7fde3792de745b5df875e8dc760096f5b10d46ce | [
"MIT"
] | 15 | 2016-04-23T17:18:42.000Z | 2018-09-06T16:32:48.000Z | django_core_models/locations/urls.py | ajaniv/django-core-models | 7fde3792de745b5df875e8dc760096f5b10d46ce | [
"MIT"
] | null | null | null | """
.. module:: django_core_models.locations.urls
:synopsis: django_core_models locations application urls module
django_core_models *locations* application urls module.
"""
from __future__ import absolute_import
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^addresses/$',
views.AddressList.as_view(),
name='address-list'),
url(r'^addresses/(?P<pk>[0-9]+)/$',
views.AddressDetail.as_view(),
name='address-detail'),
url(r'^address-types/$',
views.AddressTypeList.as_view(),
name='address-type-list'),
url(r'^address-types/(?P<pk>[0-9]+)/$',
views.AddressTypeDetail.as_view(),
name='address-type-detail'),
url(r'^cities/$',
views.CityList.as_view(),
name='city-list'),
url(r'^cities/(?P<pk>[0-9]+)/$',
views.CityDetail.as_view(),
name='city-detail'),
url(r'^countries/$',
views.CountryList.as_view(),
name='country-list'),
url(r'^countries/(?P<pk>[0-9]+)/$',
views.CountryDetail.as_view(),
name='country-detail'),
url(r'^distance-units/$', views.DistanceUnitList.as_view(),
name='distance-unit-list'),
url(r'^distance-units/(?P<pk>[0-9]+)/$',
views.DistanceUnitDetail.as_view(),
name='distance-unit-detail'),
url(r'^geographic-locations/$',
views.GeographicLocationList.as_view(),
name='geographic-location-list'),
url(r'^geographic-locations/(?P<pk>[0-9]+)/$',
views.GeographicLocationDetail.as_view(),
name='geographic-location-detail'),
url(r'^geographic-location-types/$',
views.GeographicLocationTypeList.as_view(),
name='geographic-location-type-list'),
url(r'^geographic-location-types/(?P<pk>[0-9]+)/$',
views.GeographicLocationTypeDetail.as_view(),
name='geographic-location-type-detail'),
url(r'^language-types/$',
views.LanguageTypeList.as_view(),
name='language-type-list'),
url(r'^language-types/(?P<pk>[0-9]+)/$',
views.LanguageTypeDetail.as_view(),
name='language-type-detail'),
url(r'^languages/$',
views.LanguageList.as_view(),
name='language-list'),
url(r'^languages/(?P<pk>[0-9]+)/$',
views.LanguageDetail.as_view(),
name='language-detail'),
url(r'^timezone-types/$',
views.TimezoneTypeList.as_view(),
name='timezone-type-list'),
url(r'^timezone-types/(?P<pk>[0-9]+)/$',
views.TimezoneTypeDetail.as_view(),
name='timezone-type-detail'),
url(r'^timezones/$',
views.TimezoneList.as_view(),
name='timezone-list'),
url(r'^timezones/(?P<pk>[0-9]+)/$',
views.TimezoneDetail.as_view(),
name='timezone-detail'),
url(r'^proninces/$',
views.ProvinceList.as_view(),
name='province-list'),
url(r'^proninces/(?P<pk>[0-9]+)/$',
views.ProvinceDetail.as_view(),
name='province-detail'),
url(r'^states/$',
views.StateList.as_view(),
name='state-list'),
url(r'^states/(?P<pk>[0-9]+)/$',
views.StateDetail.as_view(),
name='state-detail'),
]
| 30.504762 | 67 | 0.596316 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,335 | 0.416797 |
fa81f8a7916eae6aa8efd3ffef3174775ad07146 | 3,984 | py | Python | dataloader/Dataset.py | mvemoon/TextClassificationBenchmark | e564f653cd9117a765bbe0b2da42d34b31546ae4 | [
"MIT"
] | 576 | 2017-12-13T12:44:20.000Z | 2022-03-03T13:28:58.000Z | dataloader/Dataset.py | motefly/TextClassificationBenchmark | 77cbf922e3abe1241961c2759170a4657a3a4672 | [
"MIT"
] | 21 | 2017-12-17T07:46:25.000Z | 2020-08-20T16:55:01.000Z | dataloader/Dataset.py | motefly/TextClassificationBenchmark | 77cbf922e3abe1241961c2759170a4657a3a4672 | [
"MIT"
] | 149 | 2017-12-13T15:00:37.000Z | 2022-03-23T18:29:44.000Z | # -*- coding: utf-8 -*-
import os,urllib
class Dataset(object):
def __init__(self,opt=None):
if opt is not None:
self.setup(opt)
self.http_proxy= opt.__dict__.get("proxy","null")
else:
self.name="demo"
self.dirname="demo"
self.http_proxy="null"
self.urls=[]
self.root=".data"
self.saved_path= os.path.join(os.path.join(self.root,"clean"),self.name)
self.formated_files=None
def setup(self,opt):
self.name=opt.dataset
self.dirname=opt.dataset
self.http_proxy= opt.__dict__.get("proxy","null")
def process(self):
dirname=self.download()
print("processing dirname: "+ dirname)
raise Exception("method in father class have been called in processing: {} dataset".format(opt.dataset))
return dirname
def getFormatedData(self):
if self.formated_files is not None:
return self.formated_files
if os.path.exists(self.saved_path):
return [os.path.join(self.saved_path,filename) for filename in os.listdir(self.saved_path)]
self.formated_files = self.process()
return self.formated_files
def download_from_url(self,url, path, schedule=None):
#if schedule is None:
# schedule=lambda a,b,c : print("%.1f"%(100.0 * a * b / c), end='\r',flush=True) if (int(a * b / c)*100)%10==0 else None
if self.http_proxy != "null":
proxy = urllib.request.ProxyHandler({'http': self.http_proxy,'https': self.http_proxy})
# construct a new opener using your proxy settings
opener = urllib.request.build_opener(proxy)
# install the openen on the module-level
urllib.request.install_opener(opener)
print("proxy in %s" % self.http_proxy)
# urllib.request.urlretrieve(url,path,lambda a,b,c : print("%.1f"%(100.0 * a * b / c), end='\r',flush=True) if (int(a * b / c)*1000)%100==0 else None )a
try:
urllib.request.urlretrieve(url,path )
except:
import urllib2
urllib2.urlretrieve(url,path )
return path
def download(self,check=None):
"""Download and unzip an online archive (.zip, .gz, or .tgz).
Arguments:
check (str or None): Folder whose existence indicates
that the dataset has already been downloaded, or
None to check the existence of root/{cls.name}.
Returns:
dataset_path (str): Path to extracted dataset.
"""
import zipfile,tarfile
path = os.path.join(self.root, self.name)
check = path if check is None else check
if not os.path.isdir(check):
for url in self.urls:
if isinstance(url, tuple):
url, filename = url
else:
filename = os.path.basename(url)
zpath = os.path.join(path, filename)
if not os.path.isfile(zpath):
if not os.path.exists(os.path.dirname(zpath)):
os.makedirs(os.path.dirname(zpath))
print('downloading {}'.format(filename))
self.download_from_url(url, zpath)
ext = os.path.splitext(filename)[-1]
if ext == '.zip':
with zipfile.ZipFile(zpath, 'r') as zfile:
print('extracting')
zfile.extractall(path)
elif ext in ['.gz', '.tgz',".bz2"]:
with tarfile.open(zpath, 'r:gz') as tar:
dirs = [member for member in tar.getmembers()]
tar.extractall(path=path, members=dirs)
else:
print("%s do not need to be downloaded" % path)
return path
| 37.233645 | 159 | 0.541667 | 3,934 | 0.98745 | 0 | 0 | 0 | 0 | 0 | 0 | 1,061 | 0.266315 |
fa81fc214c9e7372cdac762542256af383055ca7 | 1,232 | py | Python | scrapy/arxiv1.py | SaeedPourjafar/ws_2021 | 6e91e70fc8f40007eb3c4d68282aa86b80b79363 | [
"MIT"
] | null | null | null | scrapy/arxiv1.py | SaeedPourjafar/ws_2021 | 6e91e70fc8f40007eb3c4d68282aa86b80b79363 | [
"MIT"
] | null | null | null | scrapy/arxiv1.py | SaeedPourjafar/ws_2021 | 6e91e70fc8f40007eb3c4d68282aa86b80b79363 | [
"MIT"
] | null | null | null | # Please note that since the number of topics in computer science are exactly 40 and it's less than 100
# therefore we applied the limit on the second file (arxiv2.py) which has somewhere around 700-800 outputs
# To run this file please put it in the spiders folder and run the code below in terminal/cmd:
# scrapy crawl topics -o topics.csv
import scrapy
import psutil # For memory usage
import os
class Link(scrapy.Item):
link = scrapy.Field()
class LinkListsSpider(scrapy.Spider):
name = 'topics'
allowed_domains = ['https://arxiv.org']
start_urls = ['https://arxiv.org']
def parse(self, response):
# We are looking for the list of topics under Computer Science section
# i.e. from 'Artificial Intelligence' all the way to 'Systems and Control'
xpath = '//h2/following-sibling::h2[contains(text(),"Computer Science")]/following-sibling::ul/li/a[re:test(@id, "cs\..*")]/@href'
selection = response.xpath(xpath)
for s in selection:
l = Link()
l['link'] = 'https://arxiv.org' + s.get()
yield l
print("Memory usage in MB:",round(psutil.Process(os.getpid()).memory_info().rss / 1024 ** 2,2))
| 39.741935 | 139 | 0.650162 | 812 | 0.659091 | 503 | 0.408279 | 0 | 0 | 0 | 0 | 722 | 0.586039 |
fa827fe0d4e36def59dcda41a7b889abf5ecab35 | 392 | py | Python | pdm/pep517/_vendor/toml/ordered.py | linw1995/pdm-pep517 | 6b8030225ec374acf04ee70c685e55a495f24482 | [
"MIT"
] | 4 | 2021-04-14T16:18:08.000Z | 2022-01-13T13:03:47.000Z | pdm/pep517/_vendor/toml/ordered.py | linw1995/pdm-pep517 | 6b8030225ec374acf04ee70c685e55a495f24482 | [
"MIT"
] | 29 | 2021-03-23T15:40:56.000Z | 2022-03-10T11:55:38.000Z | pdm/pep517/_vendor/toml/ordered.py | frostming/pdm-pep517 | 99b6aab5f3cb2dac657f3a750d8eb4ad001dd095 | [
"MIT"
] | 6 | 2021-03-21T17:42:25.000Z | 2022-01-25T21:28:35.000Z | from collections import OrderedDict
from pdm.pep517._vendor.toml import TomlEncoder
from pdm.pep517._vendor.toml import TomlDecoder
class TomlOrderedDecoder(TomlDecoder):
def __init__(self):
super(self.__class__, self).__init__(_dict=OrderedDict)
class TomlOrderedEncoder(TomlEncoder):
def __init__(self):
super(self.__class__, self).__init__(_dict=OrderedDict)
| 24.5 | 63 | 0.77551 | 254 | 0.647959 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
fa869925b1b7d58317ee5c78a24422f831fc4fc4 | 673 | py | Python | easyHTTP/client/api.py | hxgz/easyHTTP | 21d28119cd3ad7ca8275197468adc5cae634ba6f | [
"Apache-2.0"
] | null | null | null | easyHTTP/client/api.py | hxgz/easyHTTP | 21d28119cd3ad7ca8275197468adc5cae634ba6f | [
"Apache-2.0"
] | null | null | null | easyHTTP/client/api.py | hxgz/easyHTTP | 21d28119cd3ad7ca8275197468adc5cae634ba6f | [
"Apache-2.0"
] | null | null | null | # coding:utf-8
from urllib.parse import urlencode, urljoin
from .client import Client
class API(Client):
HOST = None
PATH = None
TIMEOUT = 30
@classmethod
def _build_url(cls, path_args=None, params=None):
url = urljoin(cls.HOST, cls.PATH)
if path_args:
url = url.format(**path_args)
sep = "&" if "?" in url else "?"
if params:
url = "{}{}{}".format(url, sep, urlencode(params))
return url
async def call(self, path_args=None, params=None, data=None, headers=None):
url = self._build_url(path_args, params)
return await super(API, self).call(url, data, headers)
| 24.035714 | 79 | 0.603269 | 582 | 0.864785 | 0 | 0 | 316 | 0.469539 | 187 | 0.27786 | 31 | 0.046062 |
fa86af3e536c67102ea203913013e7cc1a7477ca | 96 | py | Python | ingredient_parser/__init__.py | johnwmillr/RecipesAPI | 3c7422b838fef4f25c59b5c410e2a82e64ba2dc4 | [
"MIT"
] | null | null | null | ingredient_parser/__init__.py | johnwmillr/RecipesAPI | 3c7422b838fef4f25c59b5c410e2a82e64ba2dc4 | [
"MIT"
] | null | null | null | ingredient_parser/__init__.py | johnwmillr/RecipesAPI | 3c7422b838fef4f25c59b5c410e2a82e64ba2dc4 | [
"MIT"
] | null | null | null | __author__ = 'sheraz'
__all__ = ['parse','normalize']
from ingredient_parser.en import parse
| 13.714286 | 38 | 0.739583 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 26 | 0.270833 |
fa87373ae132aaf932898395d86017b53654b8c3 | 11,847 | py | Python | RL_practise/MCTS/AlphaZero/board.py | xiaoyangzai/DeepReinforcementLearning | 3da248074ab9542a5eb1c3ae60eb7b7ee7777b87 | [
"MIT"
] | null | null | null | RL_practise/MCTS/AlphaZero/board.py | xiaoyangzai/DeepReinforcementLearning | 3da248074ab9542a5eb1c3ae60eb7b7ee7777b87 | [
"MIT"
] | null | null | null | RL_practise/MCTS/AlphaZero/board.py | xiaoyangzai/DeepReinforcementLearning | 3da248074ab9542a5eb1c3ae60eb7b7ee7777b87 | [
"MIT"
] | 1 | 2021-04-08T10:36:13.000Z | 2021-04-08T10:36:13.000Z | #!/usr/bin/python
from __future__ import print_function
import numpy as np
import os
from human_player import human_player
import time
class Board(object):
"""board for game"""
def __init__(self,**kwargs):
self.width = int(kwargs.get('width',8))
self.height = int(kwargs.get('height',8))
#location:player
self.states = {}
self.win_flag = 0
self.winner = 0
self.n_in_row = int(kwargs.get('n_in_row',5))
#player1 and player2
self.players = [1,2]
self.start_player = int(kwargs.get('start_player',0))
return
def init_board(self,start_player=0):
if self.width < self.n_in_row or self.height < self.n_in_row:
raise Exception('board width or height can not be less than {}',format(self.n_in_row))
self.current_player = self.players[start_player]
self.availables = list(range(self.width*self.height))
self.states = {}
self.last_move = -1
return
def move_to_location(self,move):
'''
3*3 board's moves like:
6 7 8
3 4 5
0 1 2
eg: move 5's location is (1,2)
'''
h = move // self.height
w = move % self.width
return [h,w]
def location_to_move(self,h,w):
move = h * self.width + w
return move
def current_state(self):
'''
return the board state
state shape: 4*width*height
state: the player state, the next player state, the next player state of last location; is first player.
'''
square_state = np.zeros((4,self.width,self.height))
if self.states:
moves, players = np.array(list(zip(*self.states.items())))
move_curr = moves[players == self.current_player]
move_oppo = moves[players != self.current_player]
square_state[0][move_curr // self.width, move_curr % self.height] = 1.0
square_state[1][move_oppo // self.width, move_curr % self.height] = 1.0
#last move location
square_state[2][self.last_move // self.width,self.last_move % self.height] = 1.0
if len(self.states) % 2 == 0:
square_state[3][:,:] = 1.0
return square_state
def do_move(self,move):
if move not in self.availables:
self.win_flag = -1
return self.win_flag,self.winner
self.states[move] = self.current_player
self.availables.remove(move)
self.last_move = move
self.win_flag = self.is_winner()
if self.win_flag == 1:
self.winner = self.current_player
self.current_player = self.players[0] if self.current_player == self.players[1] else self.players[1]
return
def is_game_over(self):
return self.win_flag,self.winner
def up_down_count(self):
'''
3*3 board's moves like:
6 7 8
3 4 5
0 1 2
eg: move 5's location is (1,2)
'''
count = 1
#count up_down direction
h,w = self.move_to_location(self.last_move)
while h < self.height:
temp_up_move = self.location_to_move(h+1,w)
temp_move = self.location_to_move(h,w)
if temp_up_move in self.states and self.states[temp_up_move] == self.states[temp_move]:
count += 1
h += 1
else:
break
h,w = self.move_to_location(self.last_move)
while h > 0:
temp_down_move = self.location_to_move(h-1,w)
temp_move = self.location_to_move(h,w)
if temp_down_move in self.states and self.states[temp_down_move] == self.states[temp_move]:
count += 1
h -= 1
else:
break
return count
def left_right_count(self):
count = 1
h,w = self.move_to_location(self.last_move)
while w > 0:
temp_left_move = self.location_to_move(h,w - 1)
temp_move = self.location_to_move(h,w)
if temp_left_move in self.states and self.states[temp_left_move] == self.states[temp_move]:
count += 1
w -= 1
else:
break
h,w = self.move_to_location(self.last_move)
while w < self.width:
temp_right_move = self.location_to_move(h,w+1)
temp_move = self.location_to_move(h,w)
if temp_right_move in self.states and self.states[temp_right_move] == self.states[temp_move]:
count += 1
w += 1
else:
break
return count
def left_up_to_right_down(self):
count = 1
h,w = self.move_to_location(self.last_move)
while h < self.height and w > 0:
temp_left_up_move = self.location_to_move(h + 1, w - 1)
temp_move = self.location_to_move(h,w)
if temp_left_up_move in self.states and self.states[temp_left_up_move] == self.states[temp_move]:
count += 1
h += 1
w -= 1
else:
break
h,w = self.move_to_location(self.last_move)
while h > 0 and w < self.width:
temp_right_down_move = self.location_to_move(h - 1, w + 1)
temp_move = self.location_to_move(h,w)
if temp_right_down_move in self.states and self.states[temp_right_down_move] == self.states[temp_move]:
count += 1
h -= 1
w += 1
else:
break
return count
def right_up_to_left_down(self):
count = 1
h,w = self.move_to_location(self.last_move)
while h < self.height and w < self.width:
temp_right_up_move = self.location_to_move(h + 1, w + 1)
temp_move = self.location_to_move(h,w)
if temp_right_up_move in self.states and self.states[temp_right_up_move] == self.states[temp_move]:
count += 1
h += 1
w += 1
else:
break
h,w = self.move_to_location(self.last_move)
while h > 0 and w > 0:
temp_left_down_move = self.location_to_move(h - 1, w - 1)
temp_move = self.location_to_move(h,w)
if temp_left_down_move in self.states and self.states[temp_left_down_move] == self.states[temp_move]:
count += 1
h -= 1
w -= 1
else:
break
return count
def get_current_player(self):
return self.current_player
def show_board(self):
os.system("clear")
char_type = {1:"X",2:"O"}
for _ in range(self.width):
print("* ",end='')
print("* *")
for i in range(self.height):
print("* ",end='')
for j in range(self.width):
move = self.location_to_move(i,j)
if move in self.states:
print(char_type[self.states[move]],"",end='')
else:
print("- ",end='')
print("*")
for _ in range(self.width):
print("* ",end='')
print("* *")
def is_winner(self):
if len(self.states) == self.width * self.height:
#pass
print("No one has win!!")
return 2
#Determine whether to win based on the position of the last move
#if len(self.states) < 2*(self.n_in_row - 1) or self.last_move == -1:
# return 0
#count up_down direction
count = self.up_down_count()
#print("up to down count : %d"%count)
if count >= self.n_in_row:
return 1
count = self.left_right_count()
#print("left to right count : %d"%count)
if count >= self.n_in_row:
return 1
count = self.left_up_to_right_down()
#print("left up to right down count : %d"%count)
if count >= self.n_in_row:
return 1
count = self.right_up_to_left_down()
#print("right up to left down count : %d"%count)
if count >= self.n_in_row:
return 1
return 0
class game():
def __init__(self,**kwargs):
self.width = int(kwargs.get('width',8))
self.height = int(kwargs.get('height',8))
#location:player
self.states = {}
self.n_in_row = int(kwargs.get('n_in_row',5))
#player1 and player2
self.start_player = int(kwargs.get('start_player',0))
self.board = Board(width = self.width,height = self.height,n_in_row = self.n_in_row,start_player = self.start_player)
return
def start_play(self,player1,player2,is_shown = False):
self.board.init_board()
p1, p2 = self.board.players
player1.set_player_index(p1)
player2.set_player_index(p2)
players = {p1: player1,p2: player2}
print("Index of player1: %d\tIndex of player2: %d"%(p1,p2))
win_flag = -1
winner = -1
while True:
if is_shown:
self.board.show_board()
current_player_index = self.board.current_player
current_player = players[current_player_index]
print("Waiting for %s to move...."%current_player)
#play output the move
move = current_player.get_action(self.board)
#exacute the move
self.board.do_move(move)
if is_shown:
self.board.show_board()
win_flag, winner = self.board.is_game_over()
if win_flag < 0:
h,w = self.board.move_to_location(move)
print("location [%d,%d] is invaled! Try again!"%(h,w))
time.sleep(2)
if win_flag > 0:
break
if win_flag == 1:
print("========* [%d] player wins the game! *========"%winner)
else:
print("No Winner!!")
def start_self_play(self,player,is_shown,temp=1e-3):
self.board.init_board()
players = {p1: player1,p2: player2}
win_flag = -1
winner = -1
states, mcts_probs, players_order = [], [], []
rewards_z = None
while True:
current_player_index = self.board.current_player
current_player = players[current_player_index]
move, move_probs = player.get_action(self.board,temp=temp,return_prob = True)
#store the state
states.append(self.board.current_state())
mcts_probs.append(move_probs)
players_order.append(current_player_index)
#perform a move
self.board.do_move(move)
if is_shown:
self.board.show_board()
if self.win_flag < 0:
print("location [%d,%d] is invaled! Try again!"%(h,w))
if win_flag > 0:
break
rewards_z = np.array(len(players_order))
if self.win_flag == 1:
print("========* [%d] player wins the game! *========"%winner)
#reward of each move
rewards_z[np.array(players_order) == self.winner] = 1.0
rewards_z[np.array(players_order) != self.winner] = -1.0
#reset MCTS root node
player.reset_player()
else:
print("No Winner!!")
return self.win_flag,self.winner,zip(states,mcts_probs,players_order)
def main():
g = game(width = 10,height = 10, n_in_row = 3)
player1 = human_player(1)
player2 = human_player(2)
g.start_play(player1,player2,is_shown = True)
print("Game Over!!")
return
if __name__ == "__main__":
main()
| 35.154303 | 125 | 0.545539 | 11,456 | 0.966996 | 0 | 0 | 0 | 0 | 0 | 0 | 1,611 | 0.135984 |
fa882f998e435f00fac9b0a2e9ec7aa2f7f84450 | 3,393 | py | Python | tests/validation_tool/test_validation_helper.py | zhuyulin27/amazon-emr-on-eks-custom-image-cli | fa6636345e6e720245eb24bd0ffa0de68cc8e09a | [
"Apache-2.0"
] | 17 | 2021-08-20T22:17:08.000Z | 2022-03-03T18:35:53.000Z | tests/validation_tool/test_validation_helper.py | zhuyulin27/amazon-emr-on-eks-custom-image-cli | fa6636345e6e720245eb24bd0ffa0de68cc8e09a | [
"Apache-2.0"
] | 2 | 2022-01-05T10:11:29.000Z | 2022-03-03T18:38:19.000Z | tests/validation_tool/test_validation_helper.py | zhuyulin27/amazon-emr-on-eks-custom-image-cli | fa6636345e6e720245eb24bd0ffa0de68cc8e09a | [
"Apache-2.0"
] | 1 | 2021-09-24T18:06:56.000Z | 2021-09-24T18:06:56.000Z | import unittest
import io
from unittest import mock
from tests.lib.utils import INSPECT
from custom_image_cli.validation_tool import validation_helper
from custom_image_cli.validation_tool.validation_models.validation_models import \
ImageDetail, ImageManifest, EmrRelease
class TestValidationHelper(unittest.TestCase):
def setUp(self) -> None:
self.inspect = INSPECT
self.manifest = ImageManifest([EmrRelease("release_name", [ImageDetail("image_type", None, [], [])])], [], [])
@mock.patch('sys.stdout', new_callable=io.StringIO)
@mock.patch('custom_image_cli.validation_tool.validation_helper.load_validation_info')
@mock.patch("custom_image_cli.validation_tool.validation_tests.check_local_job_run.CheckLocalJobRun.check")
@mock.patch("custom_image_cli.validation_tool.validation_tests.check_manifest.CheckManifest.check")
@mock.patch("custom_image_cli.validation_tool.validation_tests.check_manifest.CheckManifest.__init__")
@mock.patch("custom_image_cli.validation_tool.validation_tests.check_files.CheckFiles.check")
@mock.patch("custom_image_cli.validation_tool.validation_tests.check_files.CheckFiles.__init__")
@mock.patch("custom_image_cli.validation_tool.validation_tests.check_envs.CheckEnvs.check")
@mock.patch("custom_image_cli.validation_tool.validation_tests.check_envs.CheckEnvs.__init__")
def test_validate_all(self, check_envs_constructor, check_envs, check_files_constructor,
check_files, check_manifest_constructor,
check_manifest, check_local_job_run, load_info, mock_stdout):
check_envs_constructor.return_value = None
check_envs.return_value = True
check_files_constructor.return_value = None
check_files.return_value = True
check_manifest_constructor.return_value = None
check_manifest.return_value = True
check_local_job_run.return_value = True
load_info.return_value = ImageDetail("image_type", None, [], []), [], []
actual = validation_helper.validate_all(self.inspect, "docker_cmd", "docker_image_uri",
self.manifest, "release_name", "image_type", "log")
self.assertEqual(actual, True)
check_manifest.assert_called_once()
check_envs.assert_called_once()
check_files.assert_called_once()
check_local_job_run.assert_called_once()
expected = "... Checking Image Manifest\n"
self.assertEqual(expected, mock_stdout.getvalue())
@mock.patch("custom_image_cli.validation_tool.check_inputs.check_version")
@mock.patch("custom_image_cli.validation_tool.check_inputs.check_image")
def test_load_validation_info(self, check_image, check_version):
value = self.manifest
check_version.return_value = None
check_image.return_value = None
actual_img, actual_file, actual_env = validation_helper.load_validation_info(self.manifest, "release_name", "image_type", "log")
self.assertEqual(actual_img, self.manifest.emr_releases[0].images[0])
self.assertEqual(actual_file, [])
self.assertEqual(actual_env, [])
check_version.assert_called_once_with(self.manifest.emr_releases[0], "release_name", "log")
check_image.assert_called_once_with(self.manifest.emr_releases[0].images[0], "image_type", "log")
| 54.725806 | 136 | 0.742411 | 3,113 | 0.917477 | 0 | 0 | 2,876 | 0.847627 | 0 | 0 | 993 | 0.292661 |
fa8a6ced2cffa99e7edddbdbdbc6c8e1f0f2ffa5 | 2,216 | py | Python | other/stanford_ner_tagger.py | gauthamkrishna-g/Real-Time-Sentiment-Analyzer-of-Twitter-Trends | 478ea270f67aa75c964d69d29d9bac59978fd7c5 | [
"MIT"
] | 6 | 2017-08-25T10:08:02.000Z | 2021-02-02T16:15:16.000Z | other/stanford_ner_tagger.py | gauthkris/Real-Time-Sentiment-Analyzer-of-Twitter-Trends | 478ea270f67aa75c964d69d29d9bac59978fd7c5 | [
"MIT"
] | null | null | null | other/stanford_ner_tagger.py | gauthkris/Real-Time-Sentiment-Analyzer-of-Twitter-Trends | 478ea270f67aa75c964d69d29d9bac59978fd7c5 | [
"MIT"
] | 2 | 2019-07-12T08:07:32.000Z | 2020-05-22T17:21:13.000Z | # -*- coding: utf-8 -*-
import nltk
import os
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import style
#from nltk import pos_tag
from nltk.tag import StanfordNERTagger
from nltk.tokenize import word_tokenize
style.use('fivethirtyeight')
# Process text
raw_text = open("news_article.txt").read()
token_text = word_tokenize(raw_text)
def stanford_tagger(token_text):
st = StanfordNERTagger('english.all.3class.distsim.crf.ser.gz',
'stanford-ner.jar')
ne_tagged = st.tag(token_text)
return ne_tagged
def nltk_tagger(token_text):
tagged_words = nltk.pos_tag(token_text)
ne_tagged = nltk.ne_chunk(tagged_words)
return ne_tagged
def stanford_main():
print (stanford_tagger(token_text))
def nltk_main():
print (nltk_tagger(token_text))
def time_plot(stanford_total_time, nltk_total_time):
N = 1
ind = np.arange(N) # the x locations for the groups
width = 0.35 # the width of the bars
stanford_total_time = stanford_total_time
nltk_total_time = nltk_total_time
fig, ax = plt.subplots()
rects1 = ax.bar(ind, stanford_total_time, width, color='r')
rects2 = ax.bar(ind+width, nltk_total_time, width, color='y')
# Add text for labels, title and axes ticks
ax.set_xlabel('Classifier')
ax.set_ylabel('Time (in seconds)')
ax.set_title('Speed by NER Classifier')
ax.set_xticks(ind+width)
ax.set_xticklabels( ('') )
ax.legend( (rects1[0], rects2[0]), ('Stanford', 'NLTK'), bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0. )
def autolabel(rects):
#attach some text labels
for rect in rects:
height = rect.get_height()
ax.text(rect.get_x()+rect.get_width()/2., 1.02*height, '%10.2f' % float(height),
ha='center', va='bottom')
autolabel(rects1)
autolabel(rects2)
plt.show()
if __name__ == '__main__':
stanford_t0 = os.times()[4]
stanford_main()
stanford_t1 = os.times()[4]
stanford_total_time = stanford_t1 - stanford_t0
nltk_t0 = os.times()[4]
nltk_main()
nltk_t1 = os.times()[4]
nltk_total_time = nltk_t1 - nltk_t0
time_plot(stanford_total_time, nltk_total_time)
| 29.157895 | 112 | 0.676895 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 392 | 0.176895 |
fa8ea4a787dbe3d2892d7a449c5a14c4d96bc740 | 105 | py | Python | mak/libs/pyxx/cxx/grammar/expression/primary/requires/__init__.py | motor-dev/Motor | 98cb099fe1c2d31e455ed868cc2a25eae51e79f0 | [
"BSD-3-Clause"
] | 4 | 2015-05-13T16:28:36.000Z | 2017-05-24T15:34:14.000Z | mak/libs/pyxx/cxx/grammar/expression/primary/requires/__init__.py | motor-dev/Motor | 98cb099fe1c2d31e455ed868cc2a25eae51e79f0 | [
"BSD-3-Clause"
] | null | null | null | mak/libs/pyxx/cxx/grammar/expression/primary/requires/__init__.py | motor-dev/Motor | 98cb099fe1c2d31e455ed868cc2a25eae51e79f0 | [
"BSD-3-Clause"
] | 1 | 2017-03-21T08:28:07.000Z | 2017-03-21T08:28:07.000Z | from . import general
from . import simple
from . import type
from . import compound
from . import nested | 21 | 22 | 0.771429 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
fa9415fee11e95855395e081541950bda39d51e1 | 539 | py | Python | 02_Beyond_Fundamentals/02_01.py | AnmolTomer/lynda_programming_foundations | 2f1269f2984ae8707acd80017b892ff4cceb0ee9 | [
"MIT"
] | null | null | null | 02_Beyond_Fundamentals/02_01.py | AnmolTomer/lynda_programming_foundations | 2f1269f2984ae8707acd80017b892ff4cceb0ee9 | [
"MIT"
] | null | null | null | 02_Beyond_Fundamentals/02_01.py | AnmolTomer/lynda_programming_foundations | 2f1269f2984ae8707acd80017b892ff4cceb0ee9 | [
"MIT"
] | null | null | null | # Iteration: Repeat the same procedure until it reaches a end point.
# Specify the data to iterate over,what to do to data at every step, and we need to specify when our loop should stop.
# Infinite Loop: Bug that may occur when ending condition speicified incorrectly or not specified.
spices = [
'salt',
'pepper',
'cumin',
'turmeric'
]
for spice in spices: # in is a python keyword that indicates that what follows is the set of values that we want to iterate over
print(spice)
print("No more boring omelettes!")
| 33.6875 | 129 | 0.727273 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 449 | 0.833024 |
fa94dc14969adcec177f7a104c520957faf4be46 | 42,211 | py | Python | dsed/migrate/build.py | flowmatters/dsed-py | b967db2797320e63bc504e40023b7c7623a0b002 | [
"0BSD"
] | null | null | null | dsed/migrate/build.py | flowmatters/dsed-py | b967db2797320e63bc504e40023b7c7623a0b002 | [
"0BSD"
] | null | null | null | dsed/migrate/build.py | flowmatters/dsed-py | b967db2797320e63bc504e40023b7c7623a0b002 | [
"0BSD"
] | null | null | null | import os
import json
from functools import reduce
import numpy as np
import pandas as pd
import geopandas as gpd
from dsed.ow import DynamicSednetCatchment, FINE_SEDIMENT, COARSE_SEDIMENT
from dsed.const import *
import openwater.nodes as node_types
from openwater.examples import from_source
from openwater import debugging
from openwater.config import Parameteriser, ParameterTableAssignment, \
DataframeInputs, DefaultParameteriser, NestedParameteriser, \
LoadArraysParameters, DictParameteriser
import openwater.template as templating
from openwater.template import OWTemplate, TAG_MODEL,TAG_PROCESS
from openwater.timing import init_timer, report_time, close_timer
from veneer.general import _extend_network
from logging import getLogger
logger = getLogger(__name__)
DEFAULT_START='1986/07/01'
DEFAULT_END='2014/06/30'
EXPECTED_LINK_PREFIX='link for catchment '
SOURCE_EMC_MODEL='RiverSystem.Catchments.Models.ContaminantGenerationModels.EmcDwcCGModel'
DS_SEDIMENT_MODEL='Dynamic_SedNet.Models.SedNet_Sediment_Generation'
DS_EMC_GULLY_MODEL='Dynamic_SedNet.Models.SedNet_EMC_And_Gully_Model'
DS_CROP_SED_MODEL='GBR_DynSed_Extension.Models.GBR_CropSed_Wrap_Model'
EMC_DWC_MODELS=[SOURCE_EMC_MODEL,DS_EMC_GULLY_MODEL]
GULLY_MODELS=[DS_EMC_GULLY_MODEL,DS_SEDIMENT_MODEL,DS_CROP_SED_MODEL]
DS_AREAL_MODELS = [
('DepthToRate','area'),
('PassLoadIfFlow','scalingFactor'),
('USLEFineSedimentGeneration','area'),
('DynamicSednetGully','Area'),
('DynamicSednetGullyAlt','Area'),
('SednetParticulateNutrientGeneration','area')
]
CLIMATE_INPUTS = {
'rainfall':['USLEFineSedimentGeneration']
}
def levels_required(table ,column ,criteria):
if len(set(table[column]) )==1:
return 0
subsets = [table[table[criteria[0] ]==v] for v in set(table[criteria[0]])]
return max([ 1 +levels_required(subset ,column ,criteria[1:]) for subset in subsets])
def simplify(table ,column ,criteria=['Constituent']):
levels = levels_required(table ,column ,criteria)
new_columns = criteria[:levels] + [column]
return table.drop_duplicates(new_columns)[new_columns]
def compute_ts_sediment_delivery_ratios(df):
fine_sed_scaling = df['HillslopeFineSDR'] / 100.0 # df['LoadConversionFactor'] *
coarse_sed_scaling = df['HillslopeCoarseSDR'] / 100.0
fine_df = df[['catchment', 'cgu']]
fine_df = fine_df.copy()
fine_df['fraction'] = fine_sed_scaling
fine_df['constituent'] = FINE_SEDIMENT
fine_df['scale'] = df['LoadConversionFactor']
coarse_df = df[['catchment', 'cgu']]
coarse_df = coarse_df.copy()
coarse_df['fraction'] = coarse_sed_scaling
coarse_df['constituent'] = COARSE_SEDIMENT
return pd.concat([fine_df, coarse_df])
# What about SDR and Load_Conversion_Factor --> apply to node_types.Scale
# ts_sediment_scaling = dataframe(catchment,cgu,constituent,scaling_factor)
# where scaling_factor is HillslopeCoarseSDR/100 for coarse, and
# HillslopeFineSDR/100 * Load_Conversion_Factor for fine
def _rename_tag_columns(dataframe):
return dataframe.rename(columns={'Catchment': 'catchment', 'Functional Unit': 'cgu', 'Constituent': 'constituent'})
def _rename_link_tag_columns(dataframe, link_renames, link_col='NetworkElement'):
dataframe = dataframe.replace(link_renames)
dataframe = dataframe.rename(columns={'Constituent': 'constituent'})
dataframe['catchment'] = dataframe[link_col].str.slice(len(EXPECTED_LINK_PREFIX))
return dataframe
def build_ow_model(data_path, start=DEFAULT_START, end=DEFAULT_END,
link_renames=None,
replay_hydro=False,
existing_model=None,
progress=logger.info):
builder = SourceOpenwaterDynamicSednetMigrator(data_path,replay_hydro=replay_hydro,start=start,end=end)
return builder.build_ow_model(link_renames,progress=progress,existing_model=existing_model)
class SourceOpenwaterDynamicSednetMigrator(from_source.FileBasedModelConfigurationProvider):
def __init__(self,data_path,replay_hydro=False,start=DEFAULT_START,end=DEFAULT_END):
super(SourceOpenwaterDynamicSednetMigrator,self).__init__(data_path,climate_patterns=None,time_period=pd.date_range(start,end))
self.data_path = data_path
self.replay_hydro = replay_hydro
global RR,ROUTING
RR = node_types.Sacramento
ROUTING = node_types.StorageRouting
# def _load_json(self,f):
# return json.load(open(os.path.join(self.data_path, f + '.json')))
def _load_csv(self,f):
fn = os.path.join(self.data_path, f + '.csv')
if not os.path.exists(fn):
fn = fn + '.gz'
if not os.path.exists(fn):
return None
return pd.read_csv(fn, index_col=0, parse_dates=True)
# def _load_time_series_csv(self,f):
# df = self._load_csv(f)
# return df.reindex(self.time_period)
def _load_param_csv(self,f):
df = self._load_csv(f)
if df is None:
return None
return _rename_tag_columns(df)
def _get_combined_cropping(self,cropping):
dwcs = self._load_param_csv('cg-GBR_DynSed_Extension.Models.GBR_Pest_TSLoad_Model')
if dwcs is None:
return None,[]
dwcs['particulate_scale'] = PERCENT_TO_FRACTION * dwcs['DeliveryRatio'] * PERCENT_TO_FRACTION * dwcs['Fine_Percent']
dwcs['dissolved_scale'] = PERCENT_TO_FRACTION * dwcs['DeliveryRatioDissolved']
dwcs['final_scale'] = dwcs['Load_Conversion_Factor']
scaled_cropping_ts = {}
for col in cropping.columns:
constituent, variable, catchment, cgu = col.split('$')
if not variable in ['Dissolved_Load_g_per_Ha', 'Particulate_Load_g_per_Ha']:
continue
row = dwcs[dwcs.catchment == catchment]
row = row[row.constituent == constituent]
row = row[row.cgu == cgu]
assert len(row) == 1
row = row.iloc[0]
scale = row['%s_scale' % (variable.split('_')[0].lower())] * row['final_scale']
scaled_cropping_ts[col] = scale * cropping[col]
scaled_cropping = pd.DataFrame(scaled_cropping_ts)
combined_cropping_ts = {}
cropping_cgus = []
for col in scaled_cropping.columns:
constituent, variable, catchment, cgu = col.split('$')
cropping_cgus.append(cgu)
if variable != 'Dissolved_Load_g_per_Ha':
continue
# and variable != 'Particulate_Load_g_per_Ha':
dis_col = col
part_col = '%s$Particulate_Load_g_per_Ha$%s$%s' % (constituent, catchment, cgu)
comb_col = '%s$Combined_Load_kg_per_m2_per_s$%s$%s' % (constituent, catchment, cgu)
combined_cropping_ts[comb_col] = scaled_cropping[dis_col] + scaled_cropping[part_col]
combined_cropping = pd.DataFrame(combined_cropping_ts)
combined_cropping *= M2_TO_HA * PER_DAY_TO_PER_SECOND * G_TO_KG
cropping_cgus = list(set(cropping_cgus))
# print(combined_cropping.describe().transpose().describe())
# Particulate_Load_g_per_Ha or Dissolved_Load_g_per_Ha
return combined_cropping,cropping_cgus
def get_cropping_input_timeseries(self,cropping_df):
cropping = cropping_df
cropping_inputs = DataframeInputs()
combined_cropping,cropping_cgus = self._get_combined_cropping(cropping_df)
if combined_cropping is not None:
cropping_inputs.inputter(combined_cropping, 'inputLoad',
'${constituent}$$Combined_Load_kg_per_m2_per_s$$${catchment}$$${cgu}')
def extract_cropping_columns(df, column_marker, conversion):
columns = [c for c in df.columns if ('$%s$' % column_marker) in c]
subset = df[columns].copy()
subset *= conversion
renames = {c: c.replace(column_marker, 'rescaled') for c in columns}
subset = subset.rename(columns=renames)
return subset, 'inputLoad', '${constituent}$$rescaled$$${catchment}$$${cgu}'
cropping_inputs.inputter(
*extract_cropping_columns(cropping, 'Constituent_Load_T_per_Ha', TONS_TO_KG * M2_TO_HA * PER_DAY_TO_PER_SECOND))
cropping_inputs.inputter(
*extract_cropping_columns(cropping, 'Surface_DIN_Load_g_per_Ha', G_TO_KG * M2_TO_HA * PER_DAY_TO_PER_SECOND))
cropping_inputs.inputter(
*extract_cropping_columns(cropping, 'Leached_DIN_Load_g_per_Ha', G_TO_KG * M2_TO_HA * PER_DAY_TO_PER_SECOND))
crop_sed = cropping[[c for c in cropping.columns if 'Soil_Load_T_per_Ha' in c]]
crop_sed *= M2_TO_HA * PER_DAY_TO_PER_SECOND * TONS_TO_KG
# print(crop_sed.describe().transpose().describe())
cropping_inputs.inputter(crop_sed, 'inputLoad', '${constituent}$$Soil_Load_T_per_Ha$$${catchment}$$${cgu}')
return cropping_inputs,cropping_cgus
def build_catchment_template(self,meta):
catchment_template = DynamicSednetCatchment(dissolved_nutrients=meta['dissolved_nutrients'],
particulate_nutrients=meta['particulate_nutrients'],
particulate_nutrient_cgus=meta['particulate_nutrient_cgus'],
pesticides=meta['pesticides'],
ts_load_with_dwc=meta['ts_load']) # pesticides)
fus = meta['fus']
catchment_template.hrus = fus
catchment_template.cgus = fus
catchment_template.cgu_hrus = {fu: fu for fu in fus}
catchment_template.pesticide_cgus = meta['pesticide_cgus']
catchment_template.timeseries_sediment_cgus = meta['timeseries_sediment']
catchment_template.hillslope_cgus = meta['usle_cgus']
catchment_template.gully_cgus = meta['gully_cgus']
catchment_template.sediment_fallback_cgu = meta['emc_cgus']
if self.replay_hydro:
catchment_template.rr = None
catchment_template.routing = None
else:
catchment_template.rr = RR
catchment_template.routing = ROUTING
# def generation_model_for_constituent_and_fu(constituent, cgu=None):
# # progress(constituent,cgu)
# return default_cg[constituent]
return catchment_template
def assess_meta_structure(self,cropping):
meta = {}
meta['start'] = self.time_period[0]
meta['end'] = self.time_period[-1]
fus = self._load_json('fus')
meta['fus'] = fus
constituents = self._load_json('constituents')
meta['constituents'] = constituents
# TODO: These are really 'cropping' CGUs. Need a better way to identify. OR not require them?
pesticide_cgus = set([c.split('$')[-1] for c in cropping.columns if ('Dissolved_Load_g_per_Ha' in c) or ('Surface_DIN_Load_g_per_Ha' in c)])
#pesticide_cgus = pesticide_cgus.union(set(fus).intersection({'Irrigated Cropping','Dryland Cropping'}))
dissolved_nutrients = [c for c in constituents if '_D' in c or '_F' in c]
meta['dissolved_nutrients'] = dissolved_nutrients
particulate_nutrients = [c for c in constituents if '_Particulate' in c]
meta['particulate_nutrients'] = particulate_nutrients
meta['particulate_nutrient_cgus'] = []
if(len(meta['particulate_nutrients']) > 0):
part_nutrient_params = self._load_param_csv('cg-Dynamic_SedNet.Models.SedNet_Nutrient_Generation_Particulate')
meta['particulate_nutrient_cgus'] = list(set(part_nutrient_params.cgu))
sediments = [c for c in constituents if c.startswith('Sediment - ')]
meta['sediments'] = sediments
pesticides = [c for c in constituents if not c in dissolved_nutrients + particulate_nutrients + sediments]
meta['pesticides'] = pesticides
meta['pesticide_cgus'] = list(pesticide_cgus)
meta['timeseries_sediment'] = list(set([c.split('$')[-1] for c in cropping.columns if 'Sediment - Fine$Soil_Load_T_per_Ha' in c]))
logger.debug(f'pesticide_cgus: {pesticide_cgus}')
cg_models = self._load_csv('cgmodels')
cg_models = simplify(cg_models, 'model', ['Constituent', 'Functional Unit', 'Catchment'])
def cgus_using_model(constituent,model):
all_fus = cg_models[cg_models.Constituent==constituent]
fus_using_model = list(set(all_fus[all_fus.model==model]['Functional Unit']))
if not len(fus_using_model):
return []
all_models_on_fus = set(all_fus[all_fus['Functional Unit'].isin(fus_using_model)].model)
is_homogenous = len(all_models_on_fus)==1
if not is_homogenous:
print('Looking for FUs using %s for constituent %s'%(model,constituent))
print('Expected one model for fus(%s)/constituent(%s)'%(','.join(fus_using_model),constituent))
print('Got: %s'%','.join(all_models_on_fus))
print('FU/CGU instances: %d'%len(all_fus))
print('FU/CGU instances using model: %d'%len(fus_using_model))
assert is_homogenous
return fus_using_model
def cgus_using_one_of_models(constituent,models):
return list(set(sum([cgus_using_model(constituent,m) for m in models],[])))
# fine_sed_cg = cg_models[cg_models.Constituent == 'Sediment - Fine']
# fine_sed_cg = dict(zip(fine_sed_cg['Functional Unit'], fine_sed_cg.model))
# erosion_cgus = [fu for fu, model in fine_sed_cg.items() if
# model == 'Dynamic_SedNet.Models.SedNet_Sediment_Generation']
meta['emc_cgus'] = cgus_using_model(FINE_SEDIMENT,SOURCE_EMC_MODEL)
meta['usle_cgus'] = cgus_using_model(FINE_SEDIMENT,DS_SEDIMENT_MODEL)
meta['gully_cgus'] = cgus_using_one_of_models(FINE_SEDIMENT,GULLY_MODELS)
meta['hillslope_emc_cgus'] = cgus_using_one_of_models(FINE_SEDIMENT,EMC_DWC_MODELS)
meta['emc_plus_gully_cgus'] = cgus_using_one_of_models(FINE_SEDIMENT,DS_EMC_GULLY_MODEL)
meta['ts_load'] = {
'cgus': list(set(cg_models[cg_models.model=='Dynamic_SedNet.Models.SedNet_TimeSeries_Load_Model']['Functional Unit'])),
'constituents':list(set(cg_models[cg_models.model=='Dynamic_SedNet.Models.SedNet_TimeSeries_Load_Model']['Constituent'])),
}
return meta
def _date_parameteriser(self,meta):
start = meta['start']
return DefaultParameteriser(node_types.DateGenerator, startYear=start.year,
startMonth=start.month, startDate=start.day)
def _climate_parameteriser(self):
climate_ts = self._load_time_series_csv('climate')
i = DataframeInputs()
for v in ['rainfall','pet']:
i.inputter(climate_ts,'input','%s for ${catchment}'%v,'Input',variable=v)
for model_type in CLIMATE_INPUTS.get(v,[]):
i.inputter(climate_ts,v,'%s for ${catchment}'%v,model_type)
return i
def _runoff_parameteriser(self):
sacramento_parameters = self._load_csv('runoff_params')
sacramento_parameters['hru'] = sacramento_parameters['Functional Unit']
sacramento_parameters = sacramento_parameters.rename(columns={c: c.lower() for c in sacramento_parameters.columns})
return ParameterTableAssignment(sacramento_parameters, RR, dim_columns=['catchment', 'hru'])
def _routing_parameteriser(self,link_renames):
routing_params = _rename_link_tag_columns(self._load_csv('fr-RiverSystem.Flow.StorageRouting'), link_renames)
return ParameterTableAssignment(routing_params, ROUTING, dim_columns=['catchment']),routing_params
def _constituent_generation_parameteriser(self,meta,cropping):
res = NestedParameteriser()
def apply_dataframe(df,model,complete=True):
parameteriser = ParameterTableAssignment(df,
model,
dim_columns=['catchment', 'cgu', 'constituent'],
complete=complete)
res.nested.append(parameteriser)
cropping_inputs,cropping_cgus = self.get_cropping_input_timeseries(cropping)
meta['cropping_cgus']=cropping_cgus
# TODO NEED TO SCALE BY AREA!
res.nested.append(cropping_inputs)
fine_sediment_params = self._load_param_csv('cg-Dynamic_SedNet.Models.SedNet_Sediment_Generation')
if fine_sediment_params is not None:
usle_timeseries = self._load_time_series_csv('usle_timeseries')
usle_timeseries = usle_timeseries.fillna(method='ffill')
assert set(fine_sediment_params.useAvModel) == {False}
assert len(set(fine_sediment_params.constituent)) == 1
fine_sediment_params = fine_sediment_params.rename(columns={
'Max_Conc': 'maxConc',
'USLE_HSDR_Fine': 'usleHSDRFine',
'USLE_HSDR_Coarse': 'usleHSDRCoarse'
})
fine_sediment_params = fine_sediment_params.rename(
columns={c: c.replace('_', '') for c in fine_sediment_params.columns})
usle_parameters = ParameterTableAssignment(fine_sediment_params, node_types.USLEFineSedimentGeneration,
dim_columns=['catchment', 'cgu'])
res.nested.append(usle_parameters)
usle_timeseries_inputs = DataframeInputs()
usle_timeseries_inputs.inputter(usle_timeseries, 'KLSC', 'KLSC_Total For ${catchment} ${cgu}')
usle_timeseries_inputs.inputter(usle_timeseries, 'KLSC_Fine', 'KLSC_Fines For ${catchment} ${cgu}')
usle_timeseries_inputs.inputter(usle_timeseries, 'CovOrCFact', 'C-Factor For ${catchment} ${cgu}')
res.nested.append(usle_timeseries_inputs)
gbr_crop_sed_params = self._load_param_csv('cg-GBR_DynSed_Extension.Models.GBR_CropSed_Wrap_Model')
if gbr_crop_sed_params is not None:
gbr_crop_sed_params = gbr_crop_sed_params.rename(
columns={c: c.replace('_', '') for c in gbr_crop_sed_params.columns})
if fine_sediment_params is None:
fine_sediment_params = gbr_crop_sed_params
else:
fine_sediment_params = pd.concat([fine_sediment_params, gbr_crop_sed_params], sort=False)
#This is checking for our EMC _ Gully model that also has gully inputs
#if(len(meta['emc_plus_gully_cgus']) > 0):
gbr_emc_gully_params = self._load_param_csv('cg-Dynamic_SedNet.Models.SedNet_EMC_And_Gully_Model')
if gbr_emc_gully_params is not None:
gbr_emc_gully_params['Gully_Management_Practice_Factor'] = 0.0 # HACK work around bug in C#
for sed in ['Fine','Coarse']:
emc_dwc_params = gbr_emc_gully_params.rename(columns={
sed.lower()+'EMC':'EMC',
sed.lower()+'DWC':'DWC'
}).copy()
emc_dwc_params['constituent'] = 'Sediment - '+sed
apply_dataframe(emc_dwc_params,node_types.EmcDwc,complete=False)
gbr_emc_gully_params = gbr_emc_gully_params.rename(
columns={c: c.replace('_', '') for c in gbr_emc_gully_params.columns})
if fine_sediment_params is None:
fine_sediment_params = gbr_emc_gully_params
else:
fine_sediment_params = pd.concat([fine_sediment_params, gbr_emc_gully_params], sort=False)
fine_sediment_params = fine_sediment_params.rename(columns={
'GullyYearDisturb': 'YearDisturbance',
'AverageGullyActivityFactor': 'averageGullyActivityFactor',
'GullyManagementPracticeFactor': 'managementPracticeFactor',
'GullySDRFine': 'sdrFine',
'GullySDRCoarse': 'sdrCoarse'
})
# Area, AnnualRunoff, GullyAnnualAverageSedimentSupply, annualLoad, longtermRunoffFactor
# dailyRunoffPowerFactor
gully_params = fine_sediment_params.fillna({
'sdrFine': 100.0,
'sdrCoarse': 100.0,
'managementPracticeFactor': 1.0
}).fillna(0.0)
gully_parameters = ParameterTableAssignment(gully_params, node_types.DynamicSednetGullyAlt,
dim_columns=['catchment', 'cgu'])
res.nested.append(gully_parameters)
gully_parameters = ParameterTableAssignment(gully_params, node_types.DynamicSednetGully,
dim_columns=['catchment', 'cgu'])
res.nested.append(gully_parameters)
ts_load_hillslope_fine = gully_params.pivot('catchment', 'cgu', 'HillSlopeFinePerc') / 100.0
hillslope_fine = ParameterTableAssignment(ts_load_hillslope_fine, node_types.FixedPartition,
parameter='fraction', column_dim='cgu', row_dim='catchment')
res.nested.append(hillslope_fine)
ts_sediment_delivery_ratios = compute_ts_sediment_delivery_ratios(gully_params)
apply_dataframe(ts_sediment_delivery_ratios,node_types.DeliveryRatio,complete=False)
fine_ts_conversion_factor = ts_sediment_delivery_ratios[ts_sediment_delivery_ratios.constituent==FINE_SEDIMENT]
apply_dataframe(fine_ts_conversion_factor,node_types.ApplyScalingFactor,complete=False)
if(len(meta['gully_cgus']) > 0):
gully_timeseries = self._load_csv('gully_timeseries').reindex(self.time_period, method='ffill')
gully_inputs = DataframeInputs()
gully_ts_columns = ['Annual Load', 'Annual Runoff']
gully_ts_destinations = ['annualLoad', 'AnnualRunoff']
for col, input_name in zip(gully_ts_columns, gully_ts_destinations):
gully_inputs.inputter(gully_timeseries, input_name, '%s For ${catchment} ${cgu}' % col)
res.nested.append(gully_inputs)
# ts_load_hillslope_fine = gully_params.pivot('catchment', 'cgu', 'HillSlopeFinePerc') / 100.0
# hillslope_fine = ParameterTableAssignment(ts_load_hillslope_fine, node_types.FixedPartition,
# parameter='fraction', column_dim='cgu', row_dim='catchment')
# res.nested.append(hillslope_fine)
# ts_sediment_delivery_ratios = compute_ts_sediment_delivery_ratios(gully_params)
# apply_dataframe(ts_sediment_delivery_ratios,node_types.DeliveryRatio,complete=False)
# fine_ts_conversion_factor = ts_sediment_delivery_ratios[ts_sediment_delivery_ratios.constituent==FINE_SEDIMENT]
# apply_dataframe(fine_ts_conversion_factor,node_types.ApplyScalingFactor,complete=False)
emc_dwc = self._load_param_csv('cg-' + SOURCE_EMC_MODEL)
if emc_dwc is not None:
emc_dwc = emc_dwc.rename(columns={
'eventMeanConcentration': 'EMC',
'dryMeanConcentration': 'DWC'
})
apply_dataframe(emc_dwc,node_types.EmcDwc,complete=False)
if gbr_crop_sed_params is not None:
gbr_crop_sed_params = gbr_crop_sed_params.rename(columns={
'Catchment': 'catchment',
'Functional Unit': 'cgu',
}) # TODO! Surely not necessary!
crop_sed_fine_dwcs = gbr_crop_sed_params[['catchment', 'cgu', 'HillslopeFineDWC', 'HillslopeFineSDR']].copy()
crop_sed_fine_dwcs['DWC'] = crop_sed_fine_dwcs['HillslopeFineDWC'] # * PERCENT_TO_FRACTION * crop_sed_fine_dwcs['HillslopeFineSDR']
crop_sed_fine_dwcs['constituent'] = FINE_SEDIMENT
apply_dataframe(crop_sed_fine_dwcs,node_types.EmcDwc,complete=False)
crop_sed_coarse_dwcs = gbr_crop_sed_params[['catchment', 'cgu', 'HillslopeCoarseDWC', 'HillslopeCoarseSDR']].copy()
crop_sed_coarse_dwcs['DWC'] = crop_sed_coarse_dwcs[
'HillslopeCoarseDWC'] # * PERCENT_TO_FRACTION * crop_sed_coarse_dwcs['HillslopeCoarseSDR']
crop_sed_coarse_dwcs['constituent'] = COARSE_SEDIMENT
apply_dataframe(crop_sed_coarse_dwcs,node_types.EmcDwc,complete=False)
dissolved_nutrient_params = self._load_param_csv('cg-Dynamic_SedNet.Models.SedNet_Nutrient_Generation_Dissolved')
if dissolved_nutrient_params is not None:
apply_dataframe(dissolved_nutrient_params,node_types.SednetDissolvedNutrientGeneration,complete=False)
part_nutrient_params = self._load_param_csv('cg-Dynamic_SedNet.Models.SedNet_Nutrient_Generation_Particulate')
# meta['particulate_nutrient_cgus'] = set(part_nutrient_params.cgu)
if part_nutrient_params is not None:
apply_dataframe(part_nutrient_params,node_types.SednetParticulateNutrientGeneration,complete=False)
sugarcane_din_params = self._load_param_csv('cg-GBR_DynSed_Extension.Models.GBR_DIN_TSLoadModel')
if sugarcane_din_params is not None:
apply_dataframe(sugarcane_din_params, node_types.EmcDwc,complete=False)
sugarcane_din_params['scale'] = sugarcane_din_params['Load_Conversion_Factor'] * sugarcane_din_params[
'DeliveryRatioSurface'] * PERCENT_TO_FRACTION
apply_dataframe(sugarcane_din_params, node_types.ApplyScalingFactor,complete=False)
sugarcane_leached_params = self._load_param_csv('cg-GBR_DynSed_Extension.Models.GBR_DIN_TSLoadModel')
sugarcane_leached_params['constituent'] = 'NLeached'
sugarcane_leached_params['scale'] = sugarcane_leached_params['Load_Conversion_Factor'] * sugarcane_din_params[
'DeliveryRatioSeepage'] * PERCENT_TO_FRACTION
apply_dataframe(sugarcane_leached_params, node_types.ApplyScalingFactor,complete=False)
sugarcane_p_params = self._load_param_csv('cg-GBR_DynSed_Extension.Models.GBR_DissP_Gen_Model')
if sugarcane_p_params is not None:
sugarcane_p_params['PconcentrationMgPerL'] = 1e-3 * sugarcane_p_params['phos_saturation_index'].apply(
lambda psi: (7.5 * psi) if psi < 10.0 else (-200.0 + 27.5 * psi))
sugarcane_p_params['EMC'] = sugarcane_p_params['PconcentrationMgPerL'] * \
sugarcane_p_params['ProportionOfTotalP'] * \
sugarcane_p_params['Load_Conversion_Factor'] * \
sugarcane_p_params['DeliveryRatioAsPercent'] * PERCENT_TO_FRACTION
apply_dataframe(sugarcane_p_params, node_types.EmcDwc,complete=False)
# OLD
# if (phos_saturation_index < 10)
# {
# PconcentrationMgPerL = 7.5 * phos_saturation_index / 1000.0;
# }
# else
# {
# PconcentrationMgPerL = (-200 + 27.5 * phos_saturation_index) / 1000.0;
# }
# quickflowConstituent = quickflow * UnitConversion.CUBIC_METRES_TO_MEGA_LITRES * MEGA_LITRES_TO_LITRES * PconcentrationMgPerL * (1 / KG_TO_MILLIGRAM) * ProportionOfTotalP * Load_Conversion_Factor * DeliveryRatioAsPercent * ConversionConst.Percentage_to_Proportion;
# lag_parameters = ParameterTableAssignment(lag_outlet_links(network_veneer),node_types.Lag,dim_columns=['catchment'],complete=False)
particulate_nut_gen = self._load_param_csv('cg-Dynamic_SedNet.Models.SedNet_Nutrient_Generation_Particulate')
if particulate_nut_gen is not None:
apply_dataframe(particulate_nut_gen, node_types.SednetParticulateNutrientGeneration,complete=False)
ts_load_params = self._load_param_csv('cg-Dynamic_SedNet.Models.SedNet_TimeSeries_Load_Model')
if ts_load_params is not None:
# Particulate_P - time series should load (already converted to kg/m2/s)
# fu areas should load
#
ts_load_params['scale'] = ts_load_params['Load_Conversion_Factor'] * ts_load_params['DeliveryRatio'] * PERCENT_TO_FRACTION
# ts_load_params['scale'] = ts_load_params['Load_Conversion_Factor']
# ts_load_params['fraction'] = ts_load_params['DeliveryRatio'] * PERCENT_TO_FRACTION
apply_dataframe(ts_load_params, node_types.EmcDwc,complete=False)
apply_dataframe(ts_load_params, node_types.ApplyScalingFactor,complete=False)
# apply_dataframe(ts_load_params, node_types.DeliveryRatio,complete=False)
return res
def _constituent_transport_parameteriser(self,link_renames,routing_params):
res = NestedParameteriser()
if not self.replay_hydro:
relevant_lag_params = lag_non_routing_links(routing_params)
if len(relevant_lag_params):
lag_parameters = ParameterTableAssignment(relevant_lag_params, node_types.Lag,
dim_columns=['catchment'], complete=False)
res.nested.append(lag_parameters)
instream_fine_sediment_params = self._load_csv('cr-Dynamic_SedNet.Models.SedNet_InStream_Fine_Sediment_Model')
if instream_fine_sediment_params is None:
logger.info('No instream fine sediment model found')
return res
instream_fine_sediment_params = _rename_link_tag_columns(instream_fine_sediment_params, link_renames, 'Link')
link_attributes = instream_fine_sediment_params[
['catchment', 'LinkLength_M', 'LinkHeight_M', 'LinkWidth_M']].set_index('catchment')
link_attributes = link_attributes.rename(
columns={'LinkLength_M': 'linkLength', 'LinkHeight_M': 'linkHeight', 'LinkWidth_M': 'linkWidth'})
instream_nutrient_params = _rename_link_tag_columns(
self._load_csv('cr-Dynamic_SedNet.Models.SedNet_InStream_DissolvedNut_Model'), link_renames, 'Link')
instream_nutrient_params['uptakeVelocity'] = instream_nutrient_params['UptakeVelocity']
instream_nutrient_params = instream_nutrient_params.set_index('catchment')
instream_nutrient_params = instream_nutrient_params.join(link_attributes, how='inner').reset_index()
# print(instream_nutrient_params)
instream_nutrient_parameteriser = ParameterTableAssignment(instream_nutrient_params,
'InstreamDissolvedNutrientDecay',
dim_columns=['catchment'],
complete=False)
res.nested.append(instream_nutrient_parameteriser)
instream_fine_sediment_params = instream_fine_sediment_params.rename(columns={
'BankFullFlow': 'bankFullFlow',
'BankHeight_M': 'bankHeight',
'FloodPlainArea_M2': 'floodPlainArea',
# 'LinkHeight_M':'bankHeight',
'LinkLength_M': 'linkLength',
'LinkWidth_M': 'linkWidth',
'Link_Slope': 'linkSlope',
'LongTermAvDailyFlow': 'longTermAvDailyFlow',
'ManningsN': 'manningsN',
'RiparianVegPercent': 'riparianVegPercent',
'SoilErodibility': 'soilErodibility',
'SoilPercentFine': 'soilPercentFine',
# 'annualReturnInterval':'',
# 'contribArea_Km':'',
'initFineChannelStorProp': 'channelStoreFine'
})
instream_fine_sediment_params['channelStoreFine'] = - instream_fine_sediment_params['channelStoreFine']
instream_fine_sed_parameteriser = ParameterTableAssignment(instream_fine_sediment_params, 'InstreamFineSediment',
dim_columns=['catchment'],complete=False)
res.nested.append(instream_fine_sed_parameteriser)
bank_erosion_parameteriser = ParameterTableAssignment(instream_fine_sediment_params, 'BankErosion',
dim_columns=['catchment'],complete=False)
res.nested.append(bank_erosion_parameteriser)
instream_particulate_nutrient_params = _rename_link_tag_columns(
self._load_param_csv('cr-Dynamic_SedNet.Models.SedNet_InStream_ParticulateNut_Model'), link_renames, 'Link')
instream_particulate_nutrient_params = instream_particulate_nutrient_params.rename(columns={
'partNutConc':'particulateNutrientConcentration'
})
instream_particulate_parameteriser = \
ParameterTableAssignment(instream_particulate_nutrient_params,
'InstreamParticulateNutrient',
dim_columns=['catchment','constituent'])
res.nested.append(instream_particulate_parameteriser)
instream_particulate_sed_parameteriser = \
ParameterTableAssignment(instream_fine_sediment_params[['catchment','soilPercentFine']],
'InstreamParticulateNutrient',
dim_columns=['catchment'],complete=False)
res.nested.append(instream_particulate_sed_parameteriser)
return res
def build_ow_model(self,
link_renames=None,
existing_model=None,
progress=print):
init_timer('Build')
init_timer('Read structure data')
network = gpd.read_file(os.path.join(self.data_path, 'network.json'))
self.network = _extend_network(self._load_json('network'))
if link_renames is None:
link_renames = map_link_name_mismatches(self.network)
# self.time_period = pd.date_range(start, end)
cropping = self._load_time_series_csv('cropping')
meta = self.assess_meta_structure(cropping)
meta['link_renames'] = link_renames
print(meta)
# return meta
# cr_models = self._load_csv('transportmodels')
# cr_models = simplify(cr_models, 'model', ['Constituent'])
# routing_models = self._load_csv('routing_models')
# routing_models.replace(link_renames, inplace=True)
# default_cg = catchment_template.cg.copy()
# tpl_nested = catchment_template.get_template()
# tpl = tpl_nested.flatten()
# template_image = debugging.graph_template(tpl)
# template_image.render('cgu_template', format='png')
if existing_model is None:
model = self.build_structure(meta,network)
else:
model = existing_model
report_time('Build basic parameterisers')
p = Parameteriser()
p._parameterisers.append(DefaultParameteriser())
p._parameterisers.append(self._date_parameteriser(meta))
report_time('Build climate parameteriser')
p._parameterisers.append(self._climate_parameteriser())
report_time('Build fu area parameteriser')
p._parameterisers.append(from_source.fu_areas_parameteriser(self._load_csv('fu_areas'),DS_AREAL_MODELS))
report_time('Build runoff parameteriser')
p._parameterisers.append(self._runoff_parameteriser())
report_time('Build generation parameteriser')
p._parameterisers.append(self._constituent_generation_parameteriser(meta,cropping))
report_time('Build routing parameteriser')
rp,routing_params = self._routing_parameteriser(link_renames)
p._parameterisers.append(rp)
report_time('Build transport parameteriser')
p._parameterisers.append(self._constituent_transport_parameteriser(link_renames,routing_params))
p._parameterisers.append(from_source.node_model_parameteriser(self))
# report_time('Build demand parameteriser')
if self.replay_hydro:
report_time('Build hydro time series parameteriser')
p._parameterisers.append(self.hydro_timeseries_inputter(link_renames))
model._parameteriser = p
progress('Model parameterisation established')
close_timer()
close_timer()
meta['warnings'] = self.warnings
return model, meta, network
def build_structure(self,meta,network):
def setup_dates(g):
date_tpl = OWTemplate()
date_tags = {
'calendar': 1
}
date_tags[TAG_PROCESS] = 'date_gen'
date_tpl.add_node(node_types.DateGenerator, **date_tags)
g = templating.template_to_graph(g, date_tpl)
date_node = [n for n in g.nodes if g.nodes[n][TAG_MODEL] == 'DateGenerator'][0]
usle_nodes = [n for n in g.nodes if g.nodes[n][TAG_MODEL] == 'USLEFineSedimentGeneration']
# progress('USLE Nodes:', len(usle_nodes))
for usle in usle_nodes:
g.add_edge(date_node, usle, src=['dayOfYear'], dest=['dayOfYear'])
gully_nodes = [n for n in g.nodes if g.nodes[n][TAG_MODEL] in ['DynamicSednetGullyAlt','DynamicSednetGully']]
for gully in gully_nodes:
g.add_edge(date_node, gully, src=['year'], dest=['year'])
return g
report_time('Build template')
catchment_template = self.build_catchment_template(meta)
report_time('Build graph')
model = from_source.build_catchment_graph(catchment_template, network, progress=nop, custom_processing=setup_dates)
return model
def hydro_timeseries_inputter(self,link_renames):
HRU_TEMPLATE='${cgu}: ${catchment}'
LINK_TEMPLATE='link for catchment ${catchment}'
i = DataframeInputs()
# 'Slow_Flow'
slow_flow = self._load_time_series_csv('Results/Slow_Flow')
i.inputter(slow_flow, 'baseflow', HRU_TEMPLATE,model='EmcDwc')
# i.inputter(slow_flow, 'baseflow', HRU_TEMPLATE,model='USLEFineSedimentGeneration')
i.inputter(slow_flow, 'slowflow', HRU_TEMPLATE,model='SednetParticulateNutrientGeneration')
# i.inputter(slow_flow, 'slowflow', HRU_TEMPLATE,model='SednetDissolvedNutrientGeneration')
quick_flow = self._load_time_series_csv('Results/Quick_Flow')
i.inputter(quick_flow, 'quickflow', HRU_TEMPLATE,model='EmcDwc')
# i.inputter(quick_flow, 'quickflow', HRU_TEMPLATE,model='USLEFineSedimentGeneration')
# i.inputter(quick_flow, 'quickflow', HRU_TEMPLATE,model='SednetDissolvedNutrientGeneration')
# i.inputter(quick_flow, 'quickflow', HRU_TEMPLATE,model='DynamicSednetGully')
# i.inputter(quick_flow, 'quickflow', HRU_TEMPLATE,model='DynamicSednetGullyAlt')
i.inputter(quick_flow,'flow',HRU_TEMPLATE,model='PassLoadIfFlow')
i.inputter(slow_flow,'flow',HRU_TEMPLATE,model='PassLoadIfFlow',constituent='NLeached')
ds_flow = self._load_time_series_csv('Results/downstream_flow_volume') * PER_DAY_TO_PER_SECOND
storage = self._load_time_series_csv('Results/storage_volume')
if len(link_renames):
ds_flow = ds_flow.rename(columns=link_renames)
storage = storage.rename(columns=link_renames)
i.inputter(ds_flow,'downstreamFlowVolume',LINK_TEMPLATE,model='BankErosion')
i.inputter(ds_flow,'outflow',LINK_TEMPLATE,model='InstreamFineSediment')
# i.inputter(ds_flow,'outflow',LINK_TEMPLATE,model='InstreamDissolvedNutrientDecay')
# i.inputter(ds_flow,'outflow',LINK_TEMPLATE,model='LumpedConstituentRouting')
i.inputter(storage,'totalVolume',LINK_TEMPLATE,model='BankErosion')
i.inputter(storage,'reachVolume',LINK_TEMPLATE,model='InstreamFineSediment')
# i.inputter(storage,'reachVolume',LINK_TEMPLATE,model='InstreamDissolvedNutrientDecay')
i.inputter(storage,'storage',LINK_TEMPLATE,model='LumpedConstituentRouting')
return i
def nop(*args,**kwargs):
pass
def lag_outlet_links(network):
outlets = [n['properties']['id'] for n in network.outlet_nodes()]
links_to_outlets = reduce(lambda x,y: list(x) + list(y),
[network['features'].find_by_to_node(n) for n in outlets])
single_link_networks = [l for l in links_to_outlets if len(network.upstream_links(l))==0]
outlet_links = [l['properties']['name'] for l in single_link_networks]
print('Outlet links',len(outlet_links),outlet_links)
return lag_links(outlet_links)
def lag_links(links):
return pd.DataFrame([{'catchment':l.replace('link for catchment ',''),'timeLag':1} for l in links])
def lag_headwater_links(network):
links = network['features'].find_by_feature_type('link')
headwater_links = [l for l in links if len(network.upstream_links(l))==0]
headwater_link_names = [l['properties']['name'] for l in headwater_links]
print('Headwater links',len(headwater_link_names),headwater_link_names)
return lag_links(headwater_link_names)
def lag_non_routing_links(params):
RC_THRESHOLD=5e-4 # was 1e-5
links_to_lag = list(params[params.RoutingConstant<RC_THRESHOLD].catchment)
#print('Links to lag',len(links_to_lag),links_to_lag)
return lag_links(links_to_lag)
def map_link_name_mismatches(network):
network_df = network.as_dataframe()
catchments = network_df[network_df.feature_type=='catchment']
links = network_df[network_df.feature_type=='link']
links_with_mismatched_names = links[~links.name.str.startswith(EXPECTED_LINK_PREFIX)][['name','veneer_id']].set_index('veneer_id')
corresponding_catchments = catchments[catchments.link.isin(list(links_with_mismatched_names.index))][['name','link']].set_index('link')
lookup = corresponding_catchments.join(links_with_mismatched_names,how='inner',lsuffix='_catchment',rsuffix='_link')
lookup['old_name'] = lookup['name_link']
lookup['new_name'] = EXPECTED_LINK_PREFIX
lookup['new_name'] = lookup['new_name'] + lookup['name_catchment']
lookup = lookup.set_index('old_name')
lookup['new_name'].to_dict()
result = lookup['new_name'].to_dict()
print(result)
return result
| 50.734375 | 273 | 0.675677 | 36,074 | 0.854611 | 0 | 0 | 0 | 0 | 0 | 0 | 12,294 | 0.291251 |
fa9523caf9ac0424eb71bc4eef12115550d20826 | 3,738 | py | Python | scripts/gen_app_yaml.py | MatthewWilkes/mw4068-packaging | 5c5d50eea89372e967994dac3bd8b06d25b4f0fa | [
"Apache-2.0"
] | null | null | null | scripts/gen_app_yaml.py | MatthewWilkes/mw4068-packaging | 5c5d50eea89372e967994dac3bd8b06d25b4f0fa | [
"Apache-2.0"
] | null | null | null | scripts/gen_app_yaml.py | MatthewWilkes/mw4068-packaging | 5c5d50eea89372e967994dac3bd8b06d25b4f0fa | [
"Apache-2.0"
] | null | null | null | #! /usr/bin/env python2.5
# Copyright 2009 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""gen_app_yaml.py [-f] [-o] (-i | APPLICATION_NAME)
A script to generate the app.yaml from the template with an application
name filled in.
Arguments:
APPLICATION_NAME: the name to use for the application (no underscores)
"""
from __future__ import with_statement
__authors__ = [
# alphabetical order by last name, please
'"Dan Bentley" <dbentley@google.com>',
]
import os
import sys
from optparse import OptionParser
def generateAppYaml(application_name, force=False, override_version=None):
"""Generate the app.yaml file.
Args:
application_name: str, the name to write into the application filed
force: bool, whether to overwrite an existing app.yaml
override_version: str, the manual version to use
"""
scripts_directory = os.path.dirname(__file__)
app_dir = os.path.abspath(os.path.join(scripts_directory, '../app'))
template_path = os.path.join(app_dir, 'app.yaml.template')
app_yaml_path = os.path.join(app_dir, 'app.yaml')
if not os.path.exists(template_path):
sys.exit("Template file %s non-existent. Corrupt client?" % template_path)
if os.path.exists(app_yaml_path):
if not force:
sys.exit("%s exists; exiting. To overwrite, pass -f on the command-line"
% app_yaml_path)
with open(template_path) as infile:
template_contents = infile.read()
contents = template_contents.replace(
'# application: FIXME',
'application: '+ application_name)
if override_version:
# find the "version" field
stop = contents.find("version: ")
# find the next \n after it
end = contents.find("\n", stop)
# insert new version
app_yaml_contents = contents[:stop+9] + override_version + contents[end:]
else:
app_yaml_contents = contents
with open(app_yaml_path, 'w') as outfile:
outfile.write(app_yaml_contents)
print "Wrote application name %s to %s." % (application_name, app_yaml_path)
def usage(msg):
"""Print an error message and the usage of the program; then quit.
"""
sys.exit('Error: %s\n\n%s' % (msg, __doc__))
def main():
"""Main program.
"""
args = sys.argv[1:]
parser = OptionParser(usage=__doc__)
parser.add_option("-f", "--force", action="store_true", default=False,
help="Overwrite existing app.yaml")
parser.add_option("-i", "--interactive", action="store_true", default=False,
help="Ask for the application name interactively")
parser.add_option("-o", "--override-version",
help="Uses the specified version instead of the one from app.yaml.template")
options, args = parser.parse_args(args)
if options.interactive:
if args:
parser.error("Cannot combine application name with -i")
sys.stdout.write("Application name: ")
application_name = sys.stdin.readline().strip()
else:
if len(args) != 1:
parser.error("No application name supplied.")
application_name = args[0]
generateAppYaml(application_name, force=options.force,
override_version=options.override_version)
if __name__ == '__main__':
main() # strip off the binary name
| 30.639344 | 96 | 0.697967 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,904 | 0.509363 |
fa954cccfdb16470bfc0b5d84f0c0d0b0bc56f46 | 70 | py | Python | task_list_dev/__main__.py | HenriqueLR/task-list-dev | a623543bbf882458bca476f7a94156cd30d544ab | [
"MIT"
] | null | null | null | task_list_dev/__main__.py | HenriqueLR/task-list-dev | a623543bbf882458bca476f7a94156cd30d544ab | [
"MIT"
] | 5 | 2017-11-25T17:05:47.000Z | 2017-11-28T16:06:16.000Z | task_list_dev/__main__.py | HenriqueLR/task-list-dev | a623543bbf882458bca476f7a94156cd30d544ab | [
"MIT"
] | null | null | null | # coding: utf-8
print(__import__('task_list_dev').tools.get_list())
| 14 | 51 | 0.728571 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 30 | 0.428571 |
fa9589b85e6ec9a294f76d78c7dade6ff939d964 | 119 | py | Python | CodingBat/Python/Warmup-1 > not_string.py | JLJTECH/TutorialTesting | f2dbbd49a86b3b086d0fc156ac3369fb74727f86 | [
"MIT"
] | null | null | null | CodingBat/Python/Warmup-1 > not_string.py | JLJTECH/TutorialTesting | f2dbbd49a86b3b086d0fc156ac3369fb74727f86 | [
"MIT"
] | null | null | null | CodingBat/Python/Warmup-1 > not_string.py | JLJTECH/TutorialTesting | f2dbbd49a86b3b086d0fc156ac3369fb74727f86 | [
"MIT"
] | null | null | null | #Warmup-1 > not_string
def not_string(str):
if str.startswith('not'):
return str
else:
return "not " + str | 19.833333 | 27 | 0.638655 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 34 | 0.285714 |
fa96f592ecf04753046dc1b36c84fb0196c05868 | 11,242 | py | Python | gputools/transforms/transformations.py | jni/gputools | cecaa9565b9be0c716e6c11b0914bbbdfd75657c | [
"BSD-3-Clause"
] | null | null | null | gputools/transforms/transformations.py | jni/gputools | cecaa9565b9be0c716e6c11b0914bbbdfd75657c | [
"BSD-3-Clause"
] | null | null | null | gputools/transforms/transformations.py | jni/gputools | cecaa9565b9be0c716e6c11b0914bbbdfd75657c | [
"BSD-3-Clause"
] | null | null | null | """ scaling images
"""
from __future__ import print_function, unicode_literals, absolute_import, division
import logging
logger = logging.getLogger(__name__)
import os
import numpy as np
import warnings
from gputools import OCLArray, OCLImage, OCLProgram
from gputools.core.ocltypes import cl_buffer_datatype_dict
from gputools.utils import mat4_rotate, mat4_translate
from ._abspath import abspath
from mako.template import Template
def affine(data, mat=np.identity(4), output_shape=None, mode="constant", interpolation="linear", res_g=None):
"""
affine transform data with matrix mat, which is the inverse coordinate transform matrix
(similar to ndimage.affine_transform)
Parameters
----------
data, ndarray or OCLImage
3d array to be transformed
mat, ndarray or OCLArray
3x3 or 4x4 inverse coordinate transform matrix
output_shape: tuple of ints
shape of transformed array
mode: string
boundary mode, one of the following:
'constant'
pads with zeros
'edge'
pads with edge values
'wrap'
pads with the repeated version of the input
interpolation, string
interpolation mode, one of the following
'linear'
'nearest'
Returns
-------
res: ndarray or openCL array
transformed array (same shape as input)
"""
warnings.warn(
"gputools.transform.affine: API change as of gputools>= 0.2.8: the inverse of the matrix is now used as in scipy.ndimage.affine_transform")
if data.ndim != 3:
raise ValueError("input data has to be a 3d array!")
interpolation_defines = {"linear": ["-D", "SAMPLER_FILTER=CLK_FILTER_LINEAR"],
"nearest": ["-D", "SAMPLER_FILTER=CLK_FILTER_NEAREST"]}
mode_defines = {"constant": ["-D", "SAMPLER_ADDRESS=CLK_ADDRESS_CLAMP"],
"wrap": ["-D", "SAMPLER_ADDRESS=CLK_ADDRESS_REPEAT"],
"edge": ["-D", "SAMPLER_ADDRESS=CLK_ADDRESS_CLAMP_TO_EDGE"]
}
if not interpolation in interpolation_defines:
raise KeyError(
"interpolation = '%s' not defined ,valid: %s" % (interpolation, list(interpolation_defines.keys())))
if not mode in mode_defines:
raise KeyError("mode = '%s' not defined ,valid: %s" % (mode, list(mode_defines.keys())))
# reorder matrix, such that x,y,z -> z,y,x (as the kernel is assuming that)
if output_shape is None:
output_shape = data.shape
if isinstance(data, OCLImage):
d_im = data
else:
d_im = OCLImage.from_array(data.astype(np.float32, copy=False))
if res_g is None:
res_g = OCLArray.empty(output_shape, np.float32)
mat_inv_g = OCLArray.from_array(mat.astype(np.float32, copy=False))
prog = OCLProgram(abspath("kernels/affine.cl")
, build_options=interpolation_defines[interpolation] +
mode_defines[mode])
prog.run_kernel("affine3",
output_shape[::-1], None,
d_im, res_g.data, mat_inv_g.data)
if isinstance(data, OCLImage):
return res_g
else:
return res_g.get()
def shift(data, shift=(0, 0, 0), mode="constant", interpolation="linear"):
"""
translates 3d data by given amount
Parameters
----------
data: ndarray
3d array
shift : float or sequence
The shift along the axes. If a float, `shift` is the same for each axis.
If a sequence, `shift` should contain one value for each axis.
mode: string
boundary mode, one of the following:
'constant'
pads with zeros
'edge'
pads with edge values
'wrap'
pads with the repeated version of the input
interpolation, string
interpolation mode, one of the following
'linear'
'nearest'
Returns
-------
res: ndarray
shifted array (same shape as input)
"""
if np.isscalar(shift):
shift = (shift,) * 3
if len(shift) != 3:
raise ValueError("shift (%s) should be of length 3!")
shift = -np.array(shift)
return affine(data, mat4_translate(*shift), mode=mode, interpolation=interpolation)
def rotate(data, axis=(1., 0, 0), angle=0., center=None, mode="constant", interpolation="linear"):
"""
rotates data around axis by a given angle
Parameters
----------
data: ndarray
3d array
axis: tuple
axis to rotate by angle about
axis = (x,y,z)
angle: float
center: tuple or None
origin of rotation (cz,cy,cx) in pixels
if None, center is the middle of data
mode: string
boundary mode, one of the following:
'constant'
pads with zeros
'edge'
pads with edge values
'wrap'
pads with the repeated version of the input
interpolation, string
interpolation mode, one of the following
'linear'
'nearest'
Returns
-------
res: ndarray
rotated array (same shape as input)
"""
if center is None:
center = tuple([s // 2 for s in data.shape])
cx, cy, cz = center
m = np.dot(mat4_translate(cx, cy, cz),
np.dot(mat4_rotate(angle, *axis),
mat4_translate(-cx, -cy, -cz)))
m = np.linalg.inv(m)
return affine(data, m, mode=mode, interpolation=interpolation)
def map_coordinates(data, coordinates, interpolation="linear",
mode='constant'):
"""
Map data to new coordinates by interpolation.
The array of coordinates is used to find, for each point in the output,
the corresponding coordinates in the input.
should correspond to scipy.ndimage.map_coordinates
Parameters
----------
data
coordinates
output
interpolation
mode
cval
prefilter
Returns
-------
"""
if not (isinstance(data, np.ndarray) and data.ndim in (2, 3)):
raise ValueError("input data has to be a 2d or 3d array!")
coordinates = np.asarray(coordinates, np.int32)
if not (coordinates.shape[0] == data.ndim):
raise ValueError("coordinate has to be of shape (data.ndim,m) ")
interpolation_defines = {"linear": ["-D", "SAMPLER_FILTER=CLK_FILTER_LINEAR"],
"nearest": ["-D", "SAMPLER_FILTER=CLK_FILTER_NEAREST"]}
mode_defines = {"constant": ["-D", "SAMPLER_ADDRESS=CLK_ADDRESS_CLAMP"],
"wrap": ["-D", "SAMPLER_ADDRESS=CLK_ADDRESS_REPEAT"],
"edge": ["-D", "SAMPLER_ADDRESS=CLK_ADDRESS_CLAMP_TO_EDGE"]
}
if not interpolation in interpolation_defines:
raise KeyError(
"interpolation = '%s' not defined ,valid: %s" % (interpolation, list(interpolation_defines.keys())))
if not mode in mode_defines:
raise KeyError("mode = '%s' not defined ,valid: %s" % (mode, list(mode_defines.keys())))
if not data.dtype.type in cl_buffer_datatype_dict:
raise KeyError("dtype %s not supported yet (%s)" % (data.dtype.type, tuple(cl_buffer_datatype_dict.keys())))
dtype_defines = ["-D", "DTYPE=%s" % cl_buffer_datatype_dict[data.dtype.type]]
d_im = OCLImage.from_array(data)
coordinates_g = OCLArray.from_array(coordinates.astype(np.float32, copy=False))
res_g = OCLArray.empty(coordinates.shape[1], data.dtype)
prog = OCLProgram(abspath("kernels/map_coordinates.cl")
, build_options=interpolation_defines[interpolation] +
mode_defines[mode] + dtype_defines)
kernel = "map_coordinates{ndim}".format(ndim=data.ndim)
prog.run_kernel(kernel,
(coordinates.shape[-1],), None,
d_im, res_g.data, coordinates_g.data)
return res_g.get()
def geometric_transform(data, mapping = "c0,c1", output_shape=None,
mode='constant', interpolation="linear"):
"""
Apply an arbitrary geometric transform.
The given mapping function is used to find, for each point in the
output, the corresponding coordinates in the input. The value of the
input at those coordinates is determined by spline interpolation of
the requested order.
Parameters
----------
%(input)s
mapping : {callable, scipy.LowLevelCallable}
A callable object that accepts a tuple of length equal to the output
array rank, and returns the corresponding input coordinates as a tuple
of length equal to the input array rank.
"""
if not (isinstance(data, np.ndarray) and data.ndim in (2, 3)):
raise ValueError("input data has to be a 2d or 3d array!")
interpolation_defines = {"linear": ["-D", "SAMPLER_FILTER=CLK_FILTER_LINEAR"],
"nearest": ["-D", "SAMPLER_FILTER=CLK_FILTER_NEAREST"]}
mode_defines = {"constant": ["-D", "SAMPLER_ADDRESS=CLK_ADDRESS_CLAMP"],
"wrap": ["-D", "SAMPLER_ADDRESS=CLK_ADDRESS_REPEAT"],
"edge": ["-D", "SAMPLER_ADDRESS=CLK_ADDRESS_CLAMP_TO_EDGE"]
}
if not interpolation in interpolation_defines:
raise KeyError(
"interpolation = '%s' not defined ,valid: %s" % (interpolation, list(interpolation_defines.keys())))
if not mode in mode_defines:
raise KeyError("mode = '%s' not defined ,valid: %s" % (mode, list(mode_defines.keys())))
if not data.dtype.type in cl_buffer_datatype_dict:
raise KeyError("dtype %s not supported yet (%s)" % (data.dtype.type, tuple(cl_buffer_datatype_dict.keys())))
dtype_defines = ["-D", "DTYPE={type}".format(type=cl_buffer_datatype_dict[data.dtype.type])]
image_functions = {np.float32:"read_imagef",
np.uint8: "read_imageui",
np.uint16: "read_imageui",
np.int32: "read_imagei"}
image_read_defines = ["-D","READ_IMAGE=%s"%image_functions[data.dtype.type]]
with open(abspath("kernels/geometric_transform.cl"), "r") as f:
tpl = Template(f.read())
output_shape = tuple(output_shape)
mappings = {"FUNC2": "c1,c0",
"FUNC3": "c2,c1,c0"}
mappings["FUNC%d" % data.ndim] = ",".join(reversed(mapping.split(",")))
rendered = tpl.render(**mappings)
d_im = OCLImage.from_array(data)
res_g = OCLArray.empty(output_shape, data.dtype)
prog = OCLProgram(src_str=rendered,
build_options=interpolation_defines[interpolation] +
mode_defines[mode] + dtype_defines+image_read_defines)
kernel = "geometric_transform{ndim}".format(ndim=data.ndim)
prog.run_kernel(kernel,
output_shape[::-1], None,
d_im, res_g.data)
return res_g.get()
if __name__ == '__main__':
d = np.zeros((200, 200, 200), np.float32)
d[20:-20, 20:-20, 20:-20] = 1.
# res = translate(d, x = 10, y = 5, z= -10 )
res = rotate(d, center=(100, 100, 100), angle=.5)
| 33.558209 | 147 | 0.610656 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5,309 | 0.472247 |
fa97031b9abd030801b3037a4db301e08dded608 | 776 | py | Python | index.py | genesis331/fdk-object-detection | b5859a5dea0bc87f7382022d08c3616ecd7d1b85 | [
"MIT"
] | null | null | null | index.py | genesis331/fdk-object-detection | b5859a5dea0bc87f7382022d08c3616ecd7d1b85 | [
"MIT"
] | null | null | null | index.py | genesis331/fdk-object-detection | b5859a5dea0bc87f7382022d08c3616ecd7d1b85 | [
"MIT"
] | null | null | null | import streamlit as st
from streamlit import caching
import os
import torch
from src.core.detect import Detector
from src.core.utils import utils
from PIL import Image
import cv2
st.title('1stDayKit Object Detection')
st.write('1stDayKit is a high-level Deep Learning toolkit for solving generic tasks.')
uploaded_file = st.file_uploader("Choose an image...", type=["png","jpg"])
if uploaded_file is not None:
st.spinner()
with st.spinner(text='Loading...'):
det = Detector(name="DemoDet")
img = Image.open(uploaded_file)
img_cv = utils.pil_to_cv2(img)
output = det.predict(img_cv)
out_img = det.visualize(img_cv,output,figsize=(18,18))
cv2.imwrite('tempImage.jpg', out_img)
st.image('tempImage.jpg',width=700) | 33.73913 | 86 | 0.706186 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 185 | 0.238402 |
fa97149416601cbc0c317503ce2c46c344e58e58 | 121 | py | Python | src/connections/_sqlalchemy.py | Freonius/tranquillity | bb190b4a8facf643d5018a710100b3ff45d6d640 | [
"MIT"
] | null | null | null | src/connections/_sqlalchemy.py | Freonius/tranquillity | bb190b4a8facf643d5018a710100b3ff45d6d640 | [
"MIT"
] | 20 | 2021-12-31T15:28:20.000Z | 2022-02-15T18:24:16.000Z | src/connections/_sqlalchemy.py | Freonius/tranquillity | bb190b4a8facf643d5018a710100b3ff45d6d640 | [
"MIT"
] | null | null | null | from sqlalchemy.engine import Engine, Connection
from .__interface import IConnection
class Sql(IConnection):
pass
| 17.285714 | 48 | 0.801653 | 32 | 0.264463 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
fa97154f8775840c89574571c6a5f0bd69db298e | 8,036 | py | Python | custom_components/meross_lan/merossclient/__init__.py | gelokatil/meross_lan | e662666da58456e35432310c929c39ac2675dda6 | [
"MIT"
] | null | null | null | custom_components/meross_lan/merossclient/__init__.py | gelokatil/meross_lan | e662666da58456e35432310c929c39ac2675dda6 | [
"MIT"
] | null | null | null | custom_components/meross_lan/merossclient/__init__.py | gelokatil/meross_lan | e662666da58456e35432310c929c39ac2675dda6 | [
"MIT"
] | null | null | null | """An Http API Client to interact with meross devices"""
from email import header
import logging
from types import MappingProxyType
from typing import List, MappingView, Optional, Dict, Any, Callable, Union
from enum import Enum
from uuid import uuid4
from hashlib import md5
from time import time
from json import (
dumps as json_dumps,
loads as json_loads,
)
import aiohttp
from yarl import URL
import socket
import asyncio
import async_timeout
from . import const as mc
KeyType = Union[dict, Optional[str]] # pylint: disable=unsubscriptable-object
def build_payload(namespace:str, method:str, payload:dict = {}, key:KeyType = None, device_id:str = None)-> dict:
if isinstance(key, dict):
key[mc.KEY_NAMESPACE] = namespace
key[mc.KEY_METHOD] = method
key[mc.KEY_PAYLOADVERSION] = 1
key[mc.KEY_FROM] = mc.TOPIC_RESPONSE.format(device_id or mc.MANUFACTURER)
return {
mc.KEY_HEADER: key,
mc.KEY_PAYLOAD: payload
}
else:
messageid = uuid4().hex
timestamp = int(time())
return {
mc.KEY_HEADER: {
mc.KEY_MESSAGEID: messageid,
mc.KEY_NAMESPACE: namespace,
mc.KEY_METHOD: method,
mc.KEY_PAYLOADVERSION: 1,
mc.KEY_FROM: mc.TOPIC_RESPONSE.format(device_id or mc.MANUFACTURER),
#mc.KEY_FROM: "/app/0-0/subscribe",
#"from": "/appliance/9109182170548290882048e1e9522946/publish",
mc.KEY_TIMESTAMP: timestamp,
mc.KEY_TIMESTAMPMS: 0,
mc.KEY_SIGN: md5((messageid + (key or "") + str(timestamp)).encode('utf-8')).hexdigest()
},
mc.KEY_PAYLOAD: payload
}
def get_replykey(header: dict, key:KeyType = None) -> KeyType:
"""
checks header signature against key:
if ok return sign itsef else return the full header { "messageId", "timestamp", "sign", ...}
in order to be able to use it in a reply scheme
**UPDATE 28-03-2021**
the 'reply scheme' hack doesnt work on mqtt but works on http: this code will be left since it works if the key is correct
anyway and could be reused in a future attempt
"""
if isinstance(key, dict):
# no way! we're already keying as replykey workflow
return header
sign = md5((header[mc.KEY_MESSAGEID] + (key or "") + str(header[mc.KEY_TIMESTAMP])).encode('utf-8')).hexdigest()
if sign == header[mc.KEY_SIGN]:
return key
return header
def get_productname(type: str) -> str:
for _type, _name in mc.TYPE_NAME_MAP.items():
if type.startswith(_type):
return _name
return type
def get_productnameuuid(type: str, uuid: str) -> str:
return f"{get_productname(type)} ({uuid})"
def get_productnametype(type: str) -> str:
name = get_productname(type)
return f"{name} ({type})" if name is not type else type
class MerossDeviceDescriptor:
"""
Utility class to extract various info from Appliance.System.All
device descriptor
"""
all = dict()
_dynamicattrs = {
mc.KEY_SYSTEM: lambda _self: _self.all.get(mc.KEY_SYSTEM, {}),
mc.KEY_HARDWARE: lambda _self: _self.system.get(mc.KEY_HARDWARE, {}),
mc.KEY_FIRMWARE: lambda _self: _self.system.get(mc.KEY_FIRMWARE, {}),
mc.KEY_TYPE: lambda _self: _self.hardware.get(mc.KEY_TYPE, mc.MANUFACTURER),
mc.KEY_UUID: lambda _self: _self.hardware.get(mc.KEY_UUID),
mc.KEY_MACADDRESS: lambda _self: _self.hardware.get(mc.KEY_MACADDRESS, mc.MEROSS_MACADDRESS),
mc.KEY_INNERIP: lambda _self: _self.firmware.get(mc.KEY_INNERIP),
mc.KEY_TIME: lambda _self: _self.system.get(mc.KEY_TIME, {}),
mc.KEY_TIMEZONE: lambda _self: _self.time.get(mc.KEY_TIMEZONE),
'productname': lambda _self: get_productnameuuid(_self.type, _self.uuid),
'productmodel': lambda _self: f"{_self.type} {_self.hardware.get(mc.KEY_VERSION, '')}"
}
def __init__(self, payload: dict):
self.ability = payload.get(mc.KEY_ABILITY, {})
self.update(payload)
def __getattr__(self, name):
value = MerossDeviceDescriptor._dynamicattrs[name](self)
setattr(self, name, value)
return value
"""
@property
def uuid(self) -> str:
return self.hardware.get(mc.KEY_UUID)
@property
def macAddress(self) -> str:
return self.hardware.get(mc.KEY_MACADDRESS, '48:e1:e9:XX:XX:XX')
@property
def ipAddress(self) -> str:
return self.firmware.get(mc.KEY_INNERIP)
@property
def timezone(self) -> str:
return self.system.get(mc.KEY_TIME, {}).get(mc.KEY_TIMEZONE)
@property
def productname(self) -> str:
return get_productnameuuid(self.type, self.uuid)
@property
def productmodel(self) -> str:
return f"{self.type} {self.hardware.get(mc.KEY_VERSION, '')}"
"""
def update(self, payload: dict):
"""
reset the cached pointers
"""
self.all = payload.get(mc.KEY_ALL, self.all)
self.digest = self.all.get(mc.KEY_DIGEST, {})
for key in MerossDeviceDescriptor._dynamicattrs.keys():
try:
delattr(self, key)
except Exception:
continue
class MerossHttpClient:
DEFAULT_TIMEOUT = 5
def __init__(self,
host: str,
key: str = None,
session: aiohttp.client.ClientSession = None,
logger: logging.Logger = None
):
self._host = host
self._requesturl = URL(f"http://{host}/config")
self.key = key
self.replykey = None
self._session = session or aiohttp.ClientSession()
self._logger = logger or logging.getLogger(__name__)
def set_host_key(self, host: str, key: str) -> None:
if host != self._host:
self._host = host
self._requesturl = URL(f"http://{host}/config")
self.key = key
async def async_request(
self,
namespace: str,
method: str = mc.METHOD_GET,
payload: dict = {},
timeout=DEFAULT_TIMEOUT
) -> dict:
self._logger.debug("MerossHttpClient(%s): HTTP POST method:(%s) namespace:(%s)", self._host, method, namespace)
request: dict = build_payload(namespace, method, payload, self.key or self.replykey)
response: dict = await self.async_raw_request(request, timeout)
if response.get(mc.KEY_PAYLOAD, {}).get(mc.KEY_ERROR, {}).get(mc.KEY_CODE) == 5001:
#sign error... hack and fool
self._logger.debug(
"Key error on %s (%s:%s) -> retrying with key-reply hack",
self._host, method, namespace)
req_header = request[mc.KEY_HEADER]
resp_header = response[mc.KEY_HEADER]
req_header[mc.KEY_MESSAGEID] = resp_header[mc.KEY_MESSAGEID]
req_header[mc.KEY_TIMESTAMP] = resp_header[mc.KEY_TIMESTAMP]
req_header[mc.KEY_SIGN] = resp_header[mc.KEY_SIGN]
response = await self.async_raw_request(request, timeout)
return response
async def async_raw_request(self, payload: dict, timeout=DEFAULT_TIMEOUT) -> dict:
try:
with async_timeout.timeout(timeout):
response = await self._session.post(
url=self._requesturl,
data=json_dumps(payload)
)
response.raise_for_status()
text_body = await response.text()
self._logger.debug("MerossHttpClient(%s): HTTP Response (%s)", self._host, text_body)
json_body:dict = json_loads(text_body)
self.replykey = get_replykey(json_body.get(mc.KEY_HEADER), self.key)
except Exception as e:
self._logger.debug("MerossHttpClient(%s): HTTP Exception (%s)", self._host, str(e) or type(e).__name__)
raise e
return json_body
| 34.195745 | 126 | 0.620956 | 5,081 | 0.63228 | 0 | 0 | 0 | 0 | 1,968 | 0.244898 | 1,898 | 0.236187 |
fa9727363fedea573e43bf5393f3cdd76d8a0357 | 632 | py | Python | modulo 2/d037/conversao.py | rafa-evangelista/PYTHON | 761ec7e01f1617263bc023a6b82b599a936275ee | [
"MIT"
] | null | null | null | modulo 2/d037/conversao.py | rafa-evangelista/PYTHON | 761ec7e01f1617263bc023a6b82b599a936275ee | [
"MIT"
] | null | null | null | modulo 2/d037/conversao.py | rafa-evangelista/PYTHON | 761ec7e01f1617263bc023a6b82b599a936275ee | [
"MIT"
] | null | null | null | num = int(input('Digite um número: '))
print('''Qual será a base de conversão do número {}
[1] para "binário"
[2] para "octal"
[3] para "hexadecimal"'''.format(num))
num1 = int(input('Escolha uma opção: '))
if num1 == 1:
print('Você escolheu converter o número {} para binário. O valor é de {}.'.format(
num, bin(num)))
elif num1 == 2:
print('Você escolheu converter o número {} para octal. O valor é de {}'.format(
num, oct(num)))
elif num1 == 3:
print('Você escolheu converter o número {} para hexadecimal. O valor é de {}'.format(
num, hex(num)))
else:
print('Escolha uma opção válida.')
| 33.263158 | 89 | 0.628165 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 399 | 0.611963 |
fa97541ff73e5ddf240b2b8fd1ad06427655c0a8 | 1,359 | py | Python | python/matplotlib/contour_from_hist2d_sigmas.py | jeremiedecock/snippets | 4bd4e7f459eee610d5cf19f845299ca942ff4b64 | [
"MIT"
] | 23 | 2015-06-08T13:01:00.000Z | 2021-12-30T08:20:04.000Z | python/matplotlib/contour_from_hist2d_sigmas.py | jeremiedecock/snippets | 4bd4e7f459eee610d5cf19f845299ca942ff4b64 | [
"MIT"
] | 1 | 2020-10-22T02:36:10.000Z | 2020-10-22T02:36:10.000Z | python/matplotlib/contour_from_hist2d_sigmas.py | jeremiedecock/snippets | 4bd4e7f459eee610d5cf19f845299ca942ff4b64 | [
"MIT"
] | 7 | 2017-10-31T09:48:14.000Z | 2022-01-04T15:59:45.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Plot contours from an 2D histogram showing the standard deviation
"""
import numpy as np
import matplotlib.pyplot as plt
# Build datas ###############
x, y = np.random.normal(size=(2, 1000000))
xbins = np.linspace(-2, 2, 30)
ybins = np.linspace(-2, 2, 30)
counts, xedges, yedges = np.histogram2d(x, y, bins=(xbins, ybins))
print("std(x)=", np.std(x))
print("std(y)=", np.std(y))
# Plot data #################
fig, ax = plt.subplots()
sigmas = [1., 2., 3.]
levels = []
fmt = {}
for sigma in sigmas:
levels.append(float(sigma) * np.std(counts))
fmt[float(sigma) * np.std(counts)] = r"${}\sigma$".format(int(sigma))
cs = plt.contour(xedges[:-1], yedges[:-1], counts.T, levels,
linewidths=(2, 2, 3), linestyles=('dotted', 'dashed', 'solid'),
alpha=0.8, colors='red')
ax.clabel(cs, inline=True, fontsize=16, fmt=fmt)
# Set title and labels ######
ax.set_title("Contour", fontsize=20)
ax.set_xlabel(r"$X_1$", fontsize=20)
ax.set_ylabel(r"$X_2$", fontsize=20)
# Set legend ################
lines = [ cs.collections[0]]
labels = [r'$\mathcal{N}$']
ax.legend(lines, labels, prop={'size': 14}, loc='best', fancybox=True, framealpha=0.5)
# Save file #################
plt.savefig("contour_from_hist2d_sigmas.png")
# Plot ######################
plt.show()
| 23.033898 | 86 | 0.589404 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 436 | 0.320824 |
fa98db342baff8386700db039d9f97d991e6637e | 2,648 | py | Python | tests/conftest.py | Peter-Metz/taxdata | ee4d401979f48bbbe392bbfba1b571233fec3495 | [
"CC0-1.0"
] | null | null | null | tests/conftest.py | Peter-Metz/taxdata | ee4d401979f48bbbe392bbfba1b571233fec3495 | [
"CC0-1.0"
] | null | null | null | tests/conftest.py | Peter-Metz/taxdata | ee4d401979f48bbbe392bbfba1b571233fec3495 | [
"CC0-1.0"
] | null | null | null | import os
import json
import pytest
import pandas as pd
# TODO: revise the following constants when using new or revised CPS/PUF data
CPS_START_YEAR = 2014
PUF_START_YEAR = 2011
PUF_COUNT = 248591
LAST_YEAR = 2027
@pytest.fixture(scope='session')
def test_path():
return os.path.abspath(os.path.dirname(__file__))
@pytest.fixture(scope='session')
def growfactors(test_path):
gf_path = os.path.join(test_path, '../puf_stage1/growfactors.csv')
return pd.read_csv(gf_path, index_col='YEAR')
@pytest.fixture(scope='session')
def metadata(test_path):
md_path = os.path.join(test_path, 'records_metadata.json')
with open(md_path, 'r') as mdf:
return json.load(mdf)
@pytest.fixture(scope='session')
def cps(test_path):
cps_path = os.path.join(test_path, '../cps_data/cps.csv.gz')
return pd.read_csv(cps_path)
@pytest.fixture(scope='session')
def cps_count(test_path):
cps_path = os.path.join(test_path, '../cps_data/cps.csv.gz')
cps_df = pd.read_csv(cps_path)
return cps_df.shape[0]
@pytest.fixture(scope='session')
def cps_start_year():
return CPS_START_YEAR
@pytest.fixture(scope='session')
def puf_path(test_path):
return os.path.join(test_path, '../puf_data/puf.csv')
@pytest.fixture(scope='session')
def puf(puf_path):
if os.path.isfile(puf_path):
return pd.read_csv(puf_path)
else:
return None
@pytest.fixture(scope='session')
def puf_count(puf_path):
if os.path.isfile(puf_path):
puf_df = pd.read_csv(puf_path)
count = puf_df.shape[0]
if count != PUF_COUNT:
msg = 'puf.shape[0] = {} not equal to PUF_COUNT = {}'
raise ValueError(msg.format(count, PUF_COUNT))
else:
count = PUF_COUNT
return count
@pytest.fixture(scope='session')
def puf_start_year():
return PUF_START_YEAR
@pytest.fixture(scope='session')
def last_year():
return LAST_YEAR
@pytest.fixture(scope='session')
def cps_weights(test_path):
cpsw_path = os.path.join(test_path, '../cps_stage2/cps_weights.csv.gz')
return pd.read_csv(cpsw_path)
@pytest.fixture(scope='session')
def puf_weights(test_path):
pufw_path = os.path.join(test_path, '../puf_stage2/puf_weights.csv.gz')
return pd.read_csv(pufw_path)
@pytest.fixture(scope='session')
def cps_ratios(test_path):
# cpsr_path = os.path.join(test_path, '../cps_stage3/cps_ratios.csv')
# return pd.read_csv(cpsr_path, index_col=0)
return None
@pytest.fixture(scope='session')
def puf_ratios(test_path):
pufr_path = os.path.join(test_path, '../puf_stage3/puf_ratios.csv')
return pd.read_csv(pufr_path, index_col=0)
| 24.293578 | 77 | 0.702795 | 0 | 0 | 0 | 0 | 2,387 | 0.901435 | 0 | 0 | 602 | 0.227341 |
fa9a41e91340bc016be9afc635d4430468d4b664 | 9,326 | py | Python | tests/snappi/pfcwd/files/pfcwd_runtime_traffic_helper.py | xwjiang2021/sonic-mgmt | 82c446b9fb016eb070af765aa9d9999e55b27342 | [
"Apache-2.0"
] | 2 | 2021-11-24T09:33:41.000Z | 2021-12-03T09:08:29.000Z | tests/snappi/pfcwd/files/pfcwd_runtime_traffic_helper.py | xwjiang2021/sonic-mgmt | 82c446b9fb016eb070af765aa9d9999e55b27342 | [
"Apache-2.0"
] | 1 | 2021-09-08T00:59:33.000Z | 2021-09-08T00:59:33.000Z | tests/snappi/pfcwd/files/pfcwd_runtime_traffic_helper.py | xwjiang2021/sonic-mgmt | 82c446b9fb016eb070af765aa9d9999e55b27342 | [
"Apache-2.0"
] | 1 | 2021-08-20T03:34:30.000Z | 2021-08-20T03:34:30.000Z | import time
import logging
from tests.common.helpers.assertions import pytest_assert
from tests.common.snappi.snappi_helpers import get_dut_port_id
from tests.common.snappi.common_helpers import start_pfcwd, stop_pfcwd
from tests.common.snappi.port import select_ports, select_tx_port
from tests.common.snappi.snappi_helpers import wait_for_arp
DATA_FLOW_NAME = "Data Flow"
DATA_PKT_SIZE = 1024
DATA_FLOW_DURATION_SEC = 15
PFCWD_START_DELAY_SEC = 3
SNAPPI_POLL_DELAY_SEC = 2
TOLERANCE_THRESHOLD = 0.05
logger = logging.getLogger(__name__)
def run_pfcwd_runtime_traffic_test(api,
testbed_config,
port_config_list,
conn_data,
fanout_data,
duthost,
dut_port,
prio_list,
prio_dscp_map):
"""
Test PFC watchdog's impact on runtime traffic
Args:
api (obj): SNAPPI session
testbed_config (obj): testbed L1/L2/L3 configuration
port_config_list (list): list of port configuration
conn_data (dict): the dictionary returned by conn_graph_fact.
fanout_data (dict): the dictionary returned by fanout_graph_fact.
duthost (Ansible host instance): device under test
dut_port (str): DUT port to test
prio_list (list): priorities of data flows
prio_dscp_map (dict): Priority vs. DSCP map (key = priority).
Returns:
N/A
"""
pytest_assert(testbed_config is not None, 'Fail to get L2/3 testbed config')
stop_pfcwd(duthost)
""" Get the ID of the port to test """
port_id = get_dut_port_id(dut_hostname=duthost.hostname,
dut_port=dut_port,
conn_data=conn_data,
fanout_data=fanout_data)
pytest_assert(port_id is not None,
'Fail to get ID for port {}'.format(dut_port))
__gen_traffic(testbed_config=testbed_config,
port_config_list=port_config_list,
port_id=port_id,
data_flow_name=DATA_FLOW_NAME,
data_flow_dur_sec=DATA_FLOW_DURATION_SEC,
data_pkt_size=DATA_PKT_SIZE,
prio_list=prio_list,
prio_dscp_map=prio_dscp_map)
flows = testbed_config.flows
all_flow_names = [flow.name for flow in flows]
flow_stats = __run_traffic(api=api,
config=testbed_config,
duthost=duthost,
all_flow_names=all_flow_names,
pfcwd_start_delay_sec=PFCWD_START_DELAY_SEC,
exp_dur_sec=DATA_FLOW_DURATION_SEC)
speed_str = testbed_config.layer1[0].speed
speed_gbps = int(speed_str.split('_')[1])
__verify_results(rows=flow_stats,
speed_gbps=speed_gbps,
data_flow_dur_sec=DATA_FLOW_DURATION_SEC,
data_pkt_size=DATA_PKT_SIZE,
tolerance=TOLERANCE_THRESHOLD)
def __gen_traffic(testbed_config,
port_config_list,
port_id,
data_flow_name,
data_flow_dur_sec,
data_pkt_size,
prio_list,
prio_dscp_map):
"""
Generate configurations of flows
Args:
testbed_config (obj): testbed L1/L2/L3 configuration
port_config_list (list): list of port configuration
port_id (int): ID of DUT port to test.
data_flow_name (str): data flow name
data_flow_dur_sec (int): duration of data flows in second
data_pkt_size (int): size of data packets in byte
prio_list (list): priorities of data flows
prio_dscp_map (dict): Priority vs. DSCP map (key = priority).
Returns:
N/A
"""
rx_port_id = port_id
tx_port_id_list, rx_port_id_list = select_ports(port_config_list=port_config_list,
pattern="many to one",
rx_port_id=rx_port_id)
pytest_assert(len(tx_port_id_list) > 0, "Cannot find any TX ports")
tx_port_id = select_tx_port(tx_port_id_list=tx_port_id_list,
rx_port_id=rx_port_id)
pytest_assert(tx_port_id is not None, "Cannot find a suitable TX port")
tx_port_config = next((x for x in port_config_list if x.id == tx_port_id), None)
rx_port_config = next((x for x in port_config_list if x.id == rx_port_id), None)
tx_mac = tx_port_config.mac
if tx_port_config.gateway == rx_port_config.gateway and \
tx_port_config.prefix_len == rx_port_config.prefix_len:
""" If soruce and destination port are in the same subnet """
rx_mac = rx_port_config.mac
else:
rx_mac = tx_port_config.gateway_mac
tx_port_name = testbed_config.ports[tx_port_id].name
rx_port_name = testbed_config.ports[rx_port_id].name
data_flow_rate_percent = int(100 / len(prio_list))
""" For each priority """
for prio in prio_list:
data_flow = testbed_config.flows.flow(
name='{} Prio {}'.format(data_flow_name, prio))[-1]
data_flow.tx_rx.port.tx_name = tx_port_name
data_flow.tx_rx.port.rx_name = rx_port_name
eth, ipv4 = data_flow.packet.ethernet().ipv4()
eth.src.value = tx_mac
eth.dst.value = rx_mac
eth.pfc_queue.value = prio
ipv4.src.value = tx_port_config.ip
ipv4.dst.value = rx_port_config.ip
ipv4.priority.choice = ipv4.priority.DSCP
ipv4.priority.dscp.phb.values = prio_dscp_map[prio]
ipv4.priority.dscp.ecn.value = (
ipv4.priority.dscp.ecn.CAPABLE_TRANSPORT_1)
data_flow.size.fixed = data_pkt_size
data_flow.rate.percentage = data_flow_rate_percent
data_flow.duration.fixed_seconds.seconds = data_flow_dur_sec
data_flow.metrics.enable = True
data_flow.metrics.loss = True
def __run_traffic(api, config, duthost, all_flow_names, pfcwd_start_delay_sec, exp_dur_sec):
"""
Start traffic at time 0 and enable PFC watchdog at pfcwd_start_delay_sec
Args:
api (obj): SNAPPI session
config (obj): experiment config (testbed config + flow config)
duthost (Ansible host instance): device under test
all_flow_names (list): list of names of all the flows
pfcwd_start_delay_sec (int): PFC watchdog start delay in second
exp_dur_sec (int): experiment duration in second
Returns:
per-flow statistics (list)
"""
api.set_config(config)
logger.info('Wait for Arp to Resolve ...')
wait_for_arp(api, max_attempts=10, poll_interval_sec=2)
logger.info('Starting transmit on all flows ...')
ts = api.transmit_state()
ts.state = ts.START
api.set_transmit_state(ts)
time.sleep(pfcwd_start_delay_sec)
start_pfcwd(duthost)
time.sleep(exp_dur_sec - pfcwd_start_delay_sec)
attempts = 0
max_attempts = 20
while attempts < max_attempts:
request = api.metrics_request()
request.flow.flow_names = all_flow_names
rows = api.get_metrics(request).flow_metrics
""" If all the flows have stopped """
transmit_states = [row.transmit for row in rows]
if len(rows) == len(all_flow_names) and\
list(set(transmit_states)) == ['stopped']:
time.sleep(SNAPPI_POLL_DELAY_SEC)
break
else:
time.sleep(1)
attempts += 1
pytest_assert(attempts < max_attempts,
"Flows do not stop in {} seconds".format(max_attempts))
""" Dump per-flow statistics """
request = api.metrics_request()
request.flow.flow_names = all_flow_names
rows = api.get_metrics(request).flow_metrics
logger.info('Stop transmit on all flows ...')
ts = api.transmit_state()
ts.state = ts.STOP
api.set_transmit_state(ts)
return rows
def __verify_results(rows, speed_gbps, data_flow_dur_sec, data_pkt_size, tolerance):
"""
Verify if we get expected experiment results
Args:
rows (list): per-flow statistics
speed_gbps (int): link speed in Gbps
data_flow_dur_sec (int): duration of data flows in second
data_pkt_size (int): size of data packets in byte
tolerance (float): maximum allowable deviation
Returns:
N/A
"""
data_flow_rate_percent = int(100 / len(rows))
for row in rows:
flow_name = row.name
tx_frames = row.frames_tx
rx_frames = row.frames_rx
pytest_assert(tx_frames == rx_frames, "{} packets of {} are dropped".\
format(tx_frames-rx_frames, flow_name))
exp_rx_pkts = data_flow_rate_percent / 100.0 * speed_gbps \
* 1e9 * data_flow_dur_sec / 8.0 / data_pkt_size
deviation = (rx_frames - exp_rx_pkts) / float(exp_rx_pkts)
pytest_assert(abs(deviation) < tolerance,
"{} should receive {} packets (actual {})".\
format(flow_name, exp_rx_pkts, rx_frames))
| 36.147287 | 92 | 0.623097 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,588 | 0.277504 |
fa9a48ce8dbd9143921b6d9a6aaa68457aed7d58 | 3,353 | py | Python | sixx/plugins/utils/converters.py | TildeBeta/6X | 1814eb8f394b7c25b49decdd7d7249567c85f30f | [
"MIT"
] | 2 | 2018-03-06T20:39:49.000Z | 2018-03-17T04:28:57.000Z | sixx/plugins/utils/converters.py | TildeBeta/TwitterImages | 1814eb8f394b7c25b49decdd7d7249567c85f30f | [
"MIT"
] | 2 | 2018-03-06T20:39:46.000Z | 2018-03-15T17:03:03.000Z | sixx/plugins/utils/converters.py | TildeBeta/TwitterImages | 1814eb8f394b7c25b49decdd7d7249567c85f30f | [
"MIT"
] | 1 | 2018-04-25T22:24:40.000Z | 2018-04-25T22:24:40.000Z | from math import sqrt
import re
from curious.commands import Context
from curious.commands.exc import ConversionFailedError
from typing import Tuple
colour_pattern = re.compile(r'(#|0x)?([A-Za-z0-9]{1,6})')
RGB = Tuple[int, int, int]
class Colour:
"""
A class that represents a colour.
"""
def __init__(self, value: int):
self.value = value
def _get_part(self, part) -> int:
string = f'{self.value:06x}'
piece = slice(part * 2, part * 2 + 2)
return int(string[piece], base=16)
@property
def red(self) -> int:
return self._get_part(0)
r = red
@property
def green(self) -> int:
return self._get_part(1)
g = green
@property
def blue(self) -> int:
return self._get_part(2)
b = blue
@property
def rgb(self) -> RGB:
return self.r, self.g, self.b
def distance(self, other: 'Colour'):
# Taken from some wikipedia article I'm too lazy to dig it up
r1, g1, b1 = self.rgb
r2, g2, b2 = other.rgb
return sqrt((r2 - r1) ** 2 + (g2 - g1) ** 2 + (b2 - b1) ** 2)
def luminance(self) -> float:
"""
Calculate the luminance of the colour.
Based on information from https://www.w3.org/TR/WCAG20-TECHS/G18.html
"""
def convert(value):
value /= 255
if value <= 0.03928:
return value / 12.92
else:
return ((value + 0.055) / 1.055) ** 2.4
r, g, b = map(convert, self.rgb)
return r * 0.2126 + g * 0.7152 + b * 0.0722
def contrast(self, other: 'Colour'):
"""
Calculate the contrast between two colours.
Based on information from https://www.w3.org/TR/WCAG20-TECHS/G18.html
"""
# TODO make x.contrast(y) the same as y.contrast(x) instead of x/y y/x
return (self.luminance() + 0.05) / (other.luminance() + 0.05)
def __repr__(self):
return '{0.__class__.__name__}({0.value})'.format(self)
def __str__(self):
return f'#{self.value:06x}'
def __eq__(self, other):
return self.value == other.value
def __hash__(self):
return hash(self.value)
def convert_hex_colour(annotation, ctx: Context, arg: str) -> Colour:
"""
Converts a string representation of a hex colour into an instance of Colour.
"""
arg = colour_pattern.sub(r'\2', arg)
try:
value = int(arg, base=16)
except ValueError:
raise ConversionFailedError(ctx, arg, annotation, 'Invalid value.')
else:
return annotation(value)
class RGBPart:
"""
Represents a hex value that is in the unsigned char range.
"""
pass
def valid_unsigned_char(annotation, ctx: Context, arg: str):
"""
Checks if given input is a number in the domain [0, 255].
255 = 0xFF, which is the largest value for any component of an RGB(A) number.
"""
try:
value = int(arg)
except ValueError:
raise ConversionFailedError(ctx, arg, annotation, 'Invalid number.')
else:
if not 0 <= value <= 255:
raise ConversionFailedError(ctx, arg, annotation, 'Value must be within range (0 - 255)')
return value
Context.add_converter(Colour, convert_hex_colour)
Context.add_converter(RGBPart, valid_unsigned_char)
| 25.022388 | 101 | 0.593498 | 2,089 | 0.623024 | 0 | 0 | 280 | 0.083507 | 0 | 0 | 983 | 0.29317 |
fa9b854e0bf31a77452586413b7c4fd1b1f19d69 | 85 | py | Python | tests/plugins/tool/docformatter_tool_plugin/valid_package/wrong.py | kogut/statick | 6cbe43b5ac78275a12af3ac5aa325833368d0767 | [
"CC0-1.0"
] | 54 | 2018-08-27T19:12:41.000Z | 2022-03-31T04:16:40.000Z | tests/plugins/tool/docformatter_tool_plugin/valid_package/wrong.py | gregtkogut/statick | 11a8f950d50b52903a25f4f00c7cd52a90eff56c | [
"CC0-1.0"
] | 288 | 2018-08-28T13:17:44.000Z | 2022-03-21T20:05:19.000Z | tests/plugins/tool/docformatter_tool_plugin/valid_package/wrong.py | gregtkogut/statick | 11a8f950d50b52903a25f4f00c7cd52a90eff56c | [
"CC0-1.0"
] | 12 | 2018-08-28T13:18:39.000Z | 2022-03-21T19:27:16.000Z | '''
Docstring with single quotes instead of double quotes.
'''
my_str = "not an int"
| 17 | 54 | 0.705882 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 74 | 0.870588 |
fa9c176b0c5c5750593310639cb938dd5d20b975 | 1,789 | py | Python | src/tester.py | OompahLoompah/LinodeAPI-Client | 6842b5cb461676a5ae363f242c972d8b157ea67e | [
"MIT"
] | null | null | null | src/tester.py | OompahLoompah/LinodeAPI-Client | 6842b5cb461676a5ae363f242c972d8b157ea67e | [
"MIT"
] | null | null | null | src/tester.py | OompahLoompah/LinodeAPI-Client | 6842b5cb461676a5ae363f242c972d8b157ea67e | [
"MIT"
] | null | null | null | from client import linodeClient
import os
linode = linodeClient(os.getcwd() + '/../.config')
userInput = raw_input("What do you want to do?\n")
if userInput == 'create':
print(linode.createLinode('3', '1'))
if userInput == 'destroy':
userInput = raw_input("What do you want to destroy?\n")
response = linode.listDisks(userInput)
for disk in response['DATA']:
linode.deleteDisk(userInput, str(disk['DISKID']))
print(linode.destroyLinode(userInput))
if userInput == 'cfd':
linodeID = raw_input("LinodeID: ")
distro = raw_input("Distro ID: ")
label = raw_input("Label: ")
size = raw_input("Size (MB): ")
password = raw_input("Password: ")
print(linode.createFromDistro(linodeID, distro, label, size, password))
linode.createDisk(linodeID, 'swap', '512', 'swap')
linode.createConfig(linodeID, label, [label,'swap'])
if userInput == 'config':
linodeID = raw_input("LinodeID: ")
label = raw_input("Label: ")
disks = []
disk = raw_input("Enter disk ID: ")
while disk != '':
disks.append(disk)
disk = raw_input("Enter disk ID: ")
print(linode.createConfig(linodeID, label, disks))
if userInput == 'boot':
vps = raw_input("Which Linode? ")
print(linode.boot(vps))
if userInput == 'reboot':
vps = raw_input("Which Linode? ")
print(linode.reboot(vps))
if userInput == 'shutdown':
vps = raw_input("Which Linode? ")
print(linode.shutdown(vps))
if userInput == 'list disks':
vps = raw_input("Which Linode? ")
response = linode.listDisks(vps)
print(response['ERRORARRAY'])
for disk in response['DATA']:
print disk['DISKID']
if userInput == 'list IPs':
vps = raw_input("Which Linode? ")
response = linode.listIPs(vps)
print response
| 28.854839 | 75 | 0.643935 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 413 | 0.230855 |
fa9cd956e95a761dad517dc12fcf1239628e18a4 | 6,694 | py | Python | base/migrations/0002_auto_20210622_1947.py | francofgp/Syndeo | 888a1001f2cbb2ff8b7247e84a2899dcbd08af80 | [
"MIT"
] | 3 | 2022-01-04T17:38:04.000Z | 2022-01-05T12:45:22.000Z | base/migrations/0002_auto_20210622_1947.py | francofgp/Syndeo | 888a1001f2cbb2ff8b7247e84a2899dcbd08af80 | [
"MIT"
] | null | null | null | base/migrations/0002_auto_20210622_1947.py | francofgp/Syndeo | 888a1001f2cbb2ff8b7247e84a2899dcbd08af80 | [
"MIT"
] | null | null | null | # Generated by Django 3.2.2 on 2021-06-22 22:47
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('base', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Categoria',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre', models.CharField(max_length=30, null=True)),
],
),
migrations.RemoveField(
model_name='palabra',
name='name',
),
migrations.AddField(
model_name='account',
name='descripcion',
field=models.TextField(blank=True, null=True),
),
migrations.AddField(
model_name='account',
name='imagenPerfil',
field=models.ImageField(blank=True, default='images/default_user.png', null=True, upload_to='images/'),
),
migrations.AddField(
model_name='account',
name='imagenPortada',
field=models.ImageField(blank=True, default='images/default_portada.jpg', null=True, upload_to='images/'),
),
migrations.AddField(
model_name='account',
name='last_name',
field=models.CharField(blank=True, max_length=30, null=True),
),
migrations.AddField(
model_name='account',
name='metaDiaria',
field=models.BigIntegerField(null=True),
),
migrations.AddField(
model_name='palabra',
name='DiasAAgregarSiCorrecto',
field=models.PositiveIntegerField(blank=True, null=True),
),
migrations.AddField(
model_name='palabra',
name='cantidadDeRepasosHastaElProximoNivel',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AddField(
model_name='palabra',
name='cantidadRepasos',
field=models.PositiveIntegerField(blank=True, default=0, null=True),
),
migrations.AddField(
model_name='palabra',
name='fechaHastaDescenderNivel',
field=models.DateField(blank=True, null=True),
),
migrations.AddField(
model_name='palabra',
name='fechaLeidaPrimeraVez',
field=models.DateField(blank=True, null=True),
),
migrations.AddField(
model_name='palabra',
name='fechaSiguienteRepaso',
field=models.DateField(blank=True, null=True),
),
migrations.AddField(
model_name='palabra',
name='fechaUltimoRepaso',
field=models.DateField(blank=True, null=True),
),
migrations.AddField(
model_name='palabra',
name='idioma',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='base.idioma'),
),
migrations.AddField(
model_name='palabra',
name='palabra',
field=models.CharField(blank=True, max_length=30, null=True),
),
migrations.AddField(
model_name='palabra',
name='traduccion',
field=models.CharField(blank=True, max_length=100, null=True),
),
migrations.AddField(
model_name='palabra',
name='usuario',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='palabra',
name='dificultad',
field=models.PositiveIntegerField(blank=True, default=0, null=True),
),
migrations.AlterField(
model_name='palabra',
name='fechaCreacion',
field=models.DateField(auto_now_add=True, null=True),
),
migrations.AlterField(
model_name='palabra',
name='fechaModificacion',
field=models.DateField(auto_now=True, null=True),
),
migrations.CreateModel(
name='Texto',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('fechaCreacion', models.DateField(auto_now_add=True, null=True)),
('fechaModificacion', models.DateField(auto_now=True, null=True)),
('cantidadPalabras', models.BigIntegerField(blank=True, null=True)),
('texto', models.TextField(blank=True, max_length=100000, null=True)),
('audio', models.FileField(blank=True, null=True, upload_to='')),
('youtubeURL', models.URLField(blank=True, null=True)),
('imagen', models.ImageField(blank=True, null=True, upload_to='')),
('completado', models.BooleanField(blank=True, default=False, null=True)),
('fechaUltimaLectura', models.DateField(blank=True, null=True)),
('fechaCompletado', models.DateField(blank=True, null=True)),
('categoria', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='base.categoria')),
('idioma', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='base.idioma')),
('usuario', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='textos', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Desafios',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre', models.CharField(max_length=30, null=True)),
('cantidadPalabras', models.BigIntegerField(null=True)),
('cantidadPalabrasLeidas', models.BigIntegerField(null=True)),
('fechaFinalizacion', models.DateField(null=True)),
('fechaCreacion', models.DateField(auto_now_add=True, null=True)),
('imagen', models.ImageField(blank=True, null=True, upload_to='')),
('usuario', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='palabra',
name='texto',
field=models.ManyToManyField(to='base.Texto'),
),
]
| 42.100629 | 155 | 0.582313 | 6,535 | 0.976247 | 0 | 0 | 0 | 0 | 0 | 0 | 1,049 | 0.156707 |
fa9db8ff1e82424dabac6f76a1d2b7bc1e4f6ce5 | 327 | py | Python | selection_sort.py | Wajktor13/Sorting_algorithms | e7de2dff189f899b467b1b24f7352bb236c5c651 | [
"MIT"
] | null | null | null | selection_sort.py | Wajktor13/Sorting_algorithms | e7de2dff189f899b467b1b24f7352bb236c5c651 | [
"MIT"
] | null | null | null | selection_sort.py | Wajktor13/Sorting_algorithms | e7de2dff189f899b467b1b24f7352bb236c5c651 | [
"MIT"
] | null | null | null | def selection_sort(input_list):
for i in range(len(input_list)):
min_index = i
for k in range(i, len(input_list)):
if input_list[k] < input_list[min_index]:
min_index = k
input_list[i], input_list[min_index] = input_list[min_index], input_list[i]
return input_list
| 29.727273 | 83 | 0.623853 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
fa9ea7ecedd54d91de0696590347b0edb53b720e | 163 | py | Python | prefs.py | synap5e/pandora-station-to-spotify | d0c0caace016a35b56c4eae6593a47dad2a53205 | [
"MIT"
] | 1 | 2019-03-24T07:01:55.000Z | 2019-03-24T07:01:55.000Z | prefs.py | synap5e/pandora-station-to-spotify | d0c0caace016a35b56c4eae6593a47dad2a53205 | [
"MIT"
] | null | null | null | prefs.py | synap5e/pandora-station-to-spotify | d0c0caace016a35b56c4eae6593a47dad2a53205 | [
"MIT"
] | null | null | null | username = 'user@example.com'
password = 'hunter2'
# larger = less change of delays if you skip a lot
# smaller = more responsive to ups/downs
queue_size = 2 | 27.166667 | 51 | 0.711656 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 119 | 0.730061 |
fa9fe5acc3c1efc2638d84976321625ca2613512 | 3,299 | py | Python | src/scs_core/data/histogram.py | seoss/scs_core | 0d4323c5697a39eb44a887f179ba5dca3716c1d2 | [
"MIT"
] | null | null | null | src/scs_core/data/histogram.py | seoss/scs_core | 0d4323c5697a39eb44a887f179ba5dca3716c1d2 | [
"MIT"
] | null | null | null | src/scs_core/data/histogram.py | seoss/scs_core | 0d4323c5697a39eb44a887f179ba5dca3716c1d2 | [
"MIT"
] | null | null | null | """
Created on 9 Aug 2016
@author: Bruno Beloff (bruno.beloff@southcoastscience.com)
"""
import _csv
import sys
# --------------------------------------------------------------------------------------------------------------------
class Histogram(object):
"""
classdocs
"""
__HEADER_BIN = ".bin"
__HEADER_COUNT = ".count"
# ----------------------------------------------------------------------------------------------------------------
def __init__(self, minimum, maximum, bin_count, path):
"""
Constructor
"""
self.__minimum = minimum
self.__maximum = maximum
self.__bin_count = bin_count
self.__path = path
self.__counts = [0] * bin_count
self.__max_count = int(0)
self.__delta = (maximum - minimum) / bin_count
def __len__(self):
return self.__bin_count
# ----------------------------------------------------------------------------------------------------------------
def append(self, datum):
# reject out-of-range
if datum < self.__minimum or datum > self.__maximum:
raise ValueError("datum out of range:%f" % datum)
# compute index...
offset = datum - self.__minimum
index = int(offset // self.__delta)
# update counts...
self.__counts[index] += 1
if self.__counts[index] > self.__max_count:
self.__max_count = int(self.__counts[index])
return index, self.__counts[index]
def to_csv(self, filename=None):
file = sys.stdout if filename is None else open(filename, "w")
writer = _csv.writer(file)
writer.writerow((self.__path + Histogram.__HEADER_BIN, self.__path + Histogram.__HEADER_COUNT))
for i in range(self.bin_count):
writer.writerow((format(self.__bin(i), '.6f'), self.__counts[i]))
if filename is not None:
file.close()
# ----------------------------------------------------------------------------------------------------------------
@property
def bins(self):
return [self.__bin(i) for i in range(self.__bin_count)]
@property
def minimum(self):
return self.__minimum
@property
def maximum(self):
return self.__maximum
@property
def bin_count(self):
return self.__bin_count
@property
def path(self):
return self.__path
@property
def delta(self):
return self.__delta
@property
def max_count(self):
return self.__max_count
@property
def counts(self):
return self.__counts
# ----------------------------------------------------------------------------------------------------------------
def __bin(self, index):
return self.__minimum + (index * self.__delta)
# ----------------------------------------------------------------------------------------------------------------
def __str__(self, *args, **kwargs):
return "Histogram:{minimum:%0.6f, maximum:%0.6f, bin_count:%d, delta:%0.6f, max_count:%d, counts:%s, " \
"path:%s}" % \
(self.minimum, self.maximum, self.bin_count, self.delta, self.max_count, self.counts,
self.path)
| 24.992424 | 118 | 0.461352 | 3,062 | 0.92816 | 0 | 0 | 523 | 0.158533 | 0 | 0 | 1,044 | 0.31646 |